Merge branch 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband
authorLinus Torvalds <torvalds@woody.linux-foundation.org>
Fri, 27 Apr 2007 16:39:27 +0000 (09:39 -0700)
committerLinus Torvalds <torvalds@woody.linux-foundation.org>
Fri, 27 Apr 2007 16:39:27 +0000 (09:39 -0700)
* 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband: (49 commits)
  IB: Set class_dev->dev in core for nice device symlink
  IB/ehca: Implement modify_port
  IB/umad: Clarify documentation of transaction ID
  IPoIB/cm: spin_lock_irqsave() -> spin_lock_irq() replacements
  IB/mad: Change SMI to use enums rather than magic return codes
  IB/umad: Implement GRH handling for sent/received MADs
  IB/ipoib: Use ib_init_ah_from_path to initialize ah_attr
  IB/sa: Set src_path_bits correctly in ib_init_ah_from_path()
  IB/ucm: Simplify ib_ucm_event()
  RDMA/ucma: Simplify ucma_get_event()
  IB/mthca: Simplify CQ cleaning in mthca_free_qp()
  IB/mthca: Fix mthca_write_mtt() on HCAs with hidden memory
  IB/mthca: Update HCA firmware revisions
  IB/ipath: Fix WC format drift between user and kernel space
  IB/ipath: Check that a UD work request's address handle is valid
  IB/ipath: Remove duplicate stuff from ipath_verbs.h
  IB/ipath: Check reserved memory keys
  IB/ipath: Fix unit selection when all CPU affinity bits set
  IB/ipath: Don't allow QPs 0 and 1 to be opened multiple times
  IB/ipath: Disable IB link earlier in shutdown sequence
  ...

1366 files changed:
.mailmap
CREDITS
Documentation/feature-removal-schedule.txt
Documentation/filesystems/afs.txt
Documentation/filesystems/proc.txt
Documentation/keys.txt
Documentation/networking/bonding.txt
Documentation/networking/dccp.txt
Documentation/networking/ip-sysctl.txt
Documentation/networking/rxrpc.txt [new file with mode: 0644]
Documentation/networking/wan-router.txt
Documentation/s390/crypto/crypto-API.txt [deleted file]
Documentation/s390/zfcpdump.txt [new file with mode: 0644]
Documentation/x86_64/boot-options.txt
MAINTAINERS
Makefile
arch/alpha/lib/Makefile
arch/alpha/lib/strcasecmp.c [deleted file]
arch/avr32/Kconfig
arch/avr32/Makefile
arch/avr32/boards/atngw100/Makefile [new file with mode: 0644]
arch/avr32/boards/atngw100/flash.c [new file with mode: 0644]
arch/avr32/boards/atngw100/setup.c [new file with mode: 0644]
arch/avr32/boards/atstk1000/atstk1002.c
arch/avr32/boards/atstk1000/setup.c
arch/avr32/configs/atngw100_defconfig [new file with mode: 0644]
arch/avr32/kernel/cpu.c
arch/avr32/kernel/entry-avr32b.S
arch/avr32/kernel/module.c
arch/avr32/kernel/process.c
arch/avr32/kernel/setup.c
arch/avr32/kernel/time.c
arch/avr32/kernel/traps.c
arch/avr32/kernel/vmlinux.lds.c
arch/avr32/mach-at32ap/Kconfig [new file with mode: 0644]
arch/avr32/mach-at32ap/Makefile
arch/avr32/mach-at32ap/at32ap7000.c
arch/avr32/mach-at32ap/hmatrix.h [new file with mode: 0644]
arch/avr32/mach-at32ap/hsmc.c
arch/avr32/mach-at32ap/time-tc.c [new file with mode: 0644]
arch/avr32/mm/fault.c
arch/avr32/mm/init.c
arch/i386/kernel/alternative.c
arch/i386/kernel/cpu/cpufreq/longhaul.c
arch/i386/kernel/nmi.c
arch/ia64/hp/sim/simeth.c
arch/ia64/sn/kernel/xpnet.c
arch/mips/Kconfig
arch/mips/Makefile
arch/mips/basler/excite/excite_setup.c
arch/mips/cobalt/Makefile
arch/mips/cobalt/console.c
arch/mips/cobalt/irq.c
arch/mips/cobalt/pci.c [new file with mode: 0644]
arch/mips/cobalt/reset.c
arch/mips/cobalt/setup.c
arch/mips/configs/jmr3927_defconfig
arch/mips/configs/pnx8550-v2pci_defconfig [deleted file]
arch/mips/gt64120/wrppmc/pci.c
arch/mips/jmr3927/common/prom.c
arch/mips/jmr3927/common/puts.c
arch/mips/jmr3927/rbhma3100/Makefile
arch/mips/jmr3927/rbhma3100/init.c
arch/mips/jmr3927/rbhma3100/irq.c
arch/mips/jmr3927/rbhma3100/kgdb_io.c
arch/mips/jmr3927/rbhma3100/setup.c
arch/mips/kernel/asm-offsets.c
arch/mips/kernel/i8259.c
arch/mips/kernel/kspd.c
arch/mips/kernel/r2300_switch.S
arch/mips/kernel/r4k_switch.S
arch/mips/kernel/rtlx.c
arch/mips/kernel/signal-common.h
arch/mips/kernel/signal.c
arch/mips/kernel/signal32.c
arch/mips/kernel/traps.c
arch/mips/mips-boards/generic/display.c
arch/mips/mips-boards/generic/pci.c
arch/mips/mips-boards/generic/reset.c
arch/mips/mips-boards/malta/malta_int.c
arch/mips/mips-boards/malta/malta_setup.c
arch/mips/mm/cache.c
arch/mips/mm/init.c
arch/mips/oprofile/op_model_mipsxx.c
arch/mips/pci/Makefile
arch/mips/pci/fixup-jmr3927.c
arch/mips/pci/ops-gt64111.c [deleted file]
arch/mips/pci/ops-gt64xxx_pci0.c [moved from arch/mips/pci/ops-gt64120.c with 80% similarity]
arch/mips/pci/ops-tx3927.c
arch/mips/pci/pci-lasat.c
arch/mips/pci/pci-ocelot.c
arch/mips/pci/pci.c
arch/mips/sgi-ip22/ip22-nvram.c
arch/mips/sgi-ip22/ip22-time.c
arch/mips/sibyte/Kconfig
arch/mips/sibyte/common/Makefile [new file with mode: 0644]
arch/mips/sibyte/common/sb_tbprof.c [moved from arch/mips/sibyte/sb1250/bcm1250_tbprof.c with 80% similarity]
arch/mips/sibyte/sb1250/Makefile
arch/mips/sibyte/sb1250/setup.c
arch/mips/sni/pcimt.c
arch/mips/sni/pcit.c
arch/mips/vr41xx/Kconfig
arch/powerpc/kernel/ppc_ksyms.c
arch/powerpc/lib/Makefile
arch/powerpc/lib/strcase.c [deleted file]
arch/ppc/8260_io/enet.c
arch/ppc/8260_io/fcc_enet.c
arch/ppc/8xx_io/enet.c
arch/ppc/8xx_io/fec.c
arch/ppc/kernel/ppc_ksyms.c
arch/ppc/lib/Makefile
arch/ppc/lib/strcase.c [deleted file]
arch/s390/Kconfig
arch/s390/Makefile
arch/s390/appldata/appldata_base.c
arch/s390/appldata/appldata_net_sum.c
arch/s390/crypto/sha1_s390.c
arch/s390/crypto/sha256_s390.c
arch/s390/defconfig
arch/s390/kernel/Makefile
arch/s390/kernel/compat_linux.c
arch/s390/kernel/compat_signal.c
arch/s390/kernel/dis.c [new file with mode: 0644]
arch/s390/kernel/early.c
arch/s390/kernel/entry.S
arch/s390/kernel/entry64.S
arch/s390/kernel/head64.S
arch/s390/kernel/ipl.c
arch/s390/kernel/module.c
arch/s390/kernel/process.c
arch/s390/kernel/setup.c
arch/s390/kernel/signal.c
arch/s390/kernel/smp.c
arch/s390/kernel/sys_s390.c
arch/s390/kernel/syscalls.S
arch/s390/kernel/time.c
arch/s390/kernel/traps.c
arch/s390/kernel/vmlinux.lds.S
arch/s390/kernel/vtime.c
arch/s390/lib/Makefile
arch/s390/lib/div64.c
arch/s390/mm/fault.c
arch/sh/lib/Makefile
arch/sh/lib/strcasecmp.c [deleted file]
arch/sparc/kernel/ebus.c
arch/sparc/kernel/of_device.c
arch/sparc/kernel/pcic.c
arch/sparc/kernel/prom.c
arch/sparc/kernel/time.c
arch/sparc64/Kconfig
arch/sparc64/kernel/central.c
arch/sparc64/kernel/chmc.c
arch/sparc64/kernel/ebus.c
arch/sparc64/kernel/irq.c
arch/sparc64/kernel/isa.c
arch/sparc64/kernel/of_device.c
arch/sparc64/kernel/pci.c
arch/sparc64/kernel/pci_common.c
arch/sparc64/kernel/pci_impl.h
arch/sparc64/kernel/pci_iommu.c
arch/sparc64/kernel/pci_psycho.c
arch/sparc64/kernel/pci_sabre.c
arch/sparc64/kernel/pci_schizo.c
arch/sparc64/kernel/pci_sun4v.c
arch/sparc64/kernel/process.c
arch/sparc64/kernel/prom.c
arch/sparc64/kernel/sbus.c
arch/sparc64/kernel/smp.c
arch/sparc64/kernel/sparc64_ksyms.c
arch/sparc64/kernel/time.c
arch/sparc64/kernel/ttable.S
arch/sparc64/mm/init.c
arch/sparc64/solaris/misc.c
arch/um/drivers/daemon_kern.c
arch/um/drivers/mcast_kern.c
arch/um/drivers/net_kern.c
arch/um/drivers/pcap_kern.c
arch/um/drivers/slip_kern.c
arch/um/drivers/slirp_kern.c
arch/um/os-Linux/drivers/ethertap_kern.c
arch/um/os-Linux/drivers/tuntap_kern.c
arch/x86_64/kernel/pci-gart.c
arch/x86_64/mm/pageattr.c
arch/xtensa/lib/Makefile
arch/xtensa/lib/strcasecmp.c [deleted file]
arch/xtensa/platform-iss/network.c
block/cfq-iosched.c
drivers/acpi/thermal.c
drivers/ata/pata_sis.c
drivers/atm/ambassador.c
drivers/atm/atmtcp.c
drivers/atm/eni.c
drivers/atm/eni.h
drivers/atm/fore200e.c
drivers/atm/fore200e.h
drivers/atm/he.c
drivers/atm/idt77252.c
drivers/atm/nicstar.c
drivers/block/aoe/aoe.h
drivers/block/aoe/aoecmd.c
drivers/block/aoe/aoenet.c
drivers/block/paride/pcd.c
drivers/block/paride/pf.c
drivers/block/pktcdvd.c
drivers/bluetooth/bfusb.c
drivers/bluetooth/bluecard_cs.c
drivers/bluetooth/bpa10x.c
drivers/bluetooth/bt3c_cs.c
drivers/bluetooth/btuart_cs.c
drivers/bluetooth/dtl1_cs.c
drivers/bluetooth/hci_h4.c
drivers/char/mxser.c
drivers/char/mxser_new.c
drivers/char/pcmcia/synclink_cs.c
drivers/char/random.c
drivers/connector/connector.c
drivers/hwmon/w83627ehf.c
drivers/ide/Kconfig
drivers/ide/pci/delkin_cb.c
drivers/ide/pci/hpt366.c
drivers/ieee1394/eth1394.c
drivers/ieee1394/eth1394.h
drivers/infiniband/hw/amso1100/c2.c
drivers/infiniband/hw/cxgb3/iwch_cm.c
drivers/infiniband/ulp/ipoib/ipoib_cm.c
drivers/infiniband/ulp/ipoib/ipoib_ib.c
drivers/isdn/act2000/module.c
drivers/isdn/gigaset/usb-gigaset.c
drivers/isdn/hardware/avm/b1dma.c
drivers/isdn/hardware/avm/c4.c
drivers/isdn/hisax/elsa_ser.c
drivers/isdn/hisax/isdnl2.c
drivers/isdn/hysdn/hycapi.c
drivers/isdn/hysdn/hysdn_net.c
drivers/isdn/hysdn/hysdn_sched.c
drivers/isdn/i4l/isdn_common.c
drivers/isdn/i4l/isdn_net.c
drivers/isdn/i4l/isdn_ppp.c
drivers/isdn/isdnloop/isdnloop.c
drivers/isdn/pcbit/capi.c
drivers/kvm/mmu.c
drivers/media/dvb/dvb-core/dvb_net.c
drivers/message/fusion/mptlan.c
drivers/mtd/maps/sun_uflash.c
drivers/net/3c501.c
drivers/net/3c505.c
drivers/net/3c507.c
drivers/net/3c509.c
drivers/net/3c515.c
drivers/net/3c523.c
drivers/net/3c527.c
drivers/net/3c59x.c
drivers/net/7990.c
drivers/net/8139cp.c
drivers/net/8139too.c
drivers/net/82596.c
drivers/net/Kconfig
drivers/net/Makefile
drivers/net/a2065.c
drivers/net/acenic.c
drivers/net/amd8111e.c
drivers/net/appletalk/cops.c
drivers/net/appletalk/ltpc.c
drivers/net/arcnet/arc-rawmode.c
drivers/net/arcnet/arcnet.c
drivers/net/arcnet/capmode.c
drivers/net/arcnet/rfc1051.c
drivers/net/arcnet/rfc1201.c
drivers/net/ariadne.c
drivers/net/arm/am79c961a.c
drivers/net/arm/at91_ether.c
drivers/net/arm/ep93xx_eth.c
drivers/net/arm/ether1.c
drivers/net/arm/ether3.c
drivers/net/at1700.c
drivers/net/atari_bionet.c
drivers/net/atari_pamsnet.c
drivers/net/atarilance.c
drivers/net/atl1/atl1_main.c
drivers/net/atp.c
drivers/net/au1000_eth.c
drivers/net/b44.c
drivers/net/bmac.c
drivers/net/bnx2.c
drivers/net/bnx2.h
drivers/net/bonding/bond_3ad.c
drivers/net/bonding/bond_alb.c
drivers/net/bonding/bond_main.c
drivers/net/cassini.c
drivers/net/chelsio/sge.c
drivers/net/cris/eth_v10.c
drivers/net/cs89x0.c
drivers/net/cxgb3/cxgb3_defs.h
drivers/net/cxgb3/cxgb3_offload.c
drivers/net/cxgb3/sge.c
drivers/net/cxgb3/t3_hw.c
drivers/net/de600.c
drivers/net/de620.c
drivers/net/declance.c
drivers/net/defxx.c
drivers/net/depca.c
drivers/net/dgrs.c
drivers/net/dl2k.c
drivers/net/dm9000.c
drivers/net/e100.c
drivers/net/e1000/e1000_main.c
drivers/net/eepro.c
drivers/net/eepro100.c
drivers/net/eexpress.c
drivers/net/ehea/ehea_main.c
drivers/net/epic100.c
drivers/net/eth16i.c
drivers/net/ewrk3.c
drivers/net/fealnx.c
drivers/net/fec.c
drivers/net/fec_8xx/fec_main.c
drivers/net/forcedeth.c
drivers/net/fs_enet/fs_enet-main.c
drivers/net/gianfar.c
drivers/net/hamachi.c
drivers/net/hamradio/baycom_ser_fdx.c
drivers/net/hamradio/bpqether.c
drivers/net/hamradio/dmascc.c
drivers/net/hamradio/hdlcdrv.c
drivers/net/hamradio/yam.c
drivers/net/hp100.c
drivers/net/ibm_emac/ibm_emac_core.c
drivers/net/ibmlana.c
drivers/net/ibmveth.c
drivers/net/ioc3-eth.c
drivers/net/irda/ali-ircc.c
drivers/net/irda/au1k_ir.c
drivers/net/irda/donauboe.c
drivers/net/irda/irda-usb.c
drivers/net/irda/mcs7780.c
drivers/net/irda/nsc-ircc.c
drivers/net/irda/pxaficp_ir.c
drivers/net/irda/sa1100_ir.c
drivers/net/irda/smsc-ircc2.c
drivers/net/irda/stir4200.c
drivers/net/irda/via-ircc.c
drivers/net/irda/vlsi_ir.c
drivers/net/irda/w83977af_ir.c
drivers/net/iseries_veth.c
drivers/net/ixgb/ixgb_main.c
drivers/net/ixp2000/ixpdev.c
drivers/net/lance.c
drivers/net/lasi_82596.c
drivers/net/lib8390.c
drivers/net/loopback.c
drivers/net/lp486e.c
drivers/net/mac89x0.c
drivers/net/macb.c
drivers/net/mace.c
drivers/net/macmace.c
drivers/net/meth.c
drivers/net/mipsnet.c
drivers/net/mv643xx_eth.c
drivers/net/myri10ge/myri10ge.c
drivers/net/myri_sbus.c
drivers/net/natsemi.c
drivers/net/netx-eth.c
drivers/net/netxen/netxen_nic_hw.c
drivers/net/netxen/netxen_nic_init.c
drivers/net/netxen/netxen_nic_main.c
drivers/net/ni5010.c
drivers/net/ni52.c
drivers/net/ni65.c
drivers/net/ns83820.c
drivers/net/pasemi_mac.c
drivers/net/pci-skeleton.c
drivers/net/pcmcia/3c574_cs.c
drivers/net/pcmcia/3c589_cs.c
drivers/net/pcmcia/axnet_cs.c
drivers/net/pcmcia/fmvj18x_cs.c
drivers/net/pcmcia/nmclan_cs.c
drivers/net/pcmcia/smc91c92_cs.c
drivers/net/pcmcia/xirc2ps_cs.c
drivers/net/pcnet32.c
drivers/net/plip.c
drivers/net/ppp_async.c
drivers/net/ppp_generic.c
drivers/net/ppp_synctty.c
drivers/net/pppoe.c
drivers/net/pppox.c
drivers/net/qla3xxx.c
drivers/net/r8169.c
drivers/net/rionet.c
drivers/net/rrunner.c
drivers/net/s2io.c
drivers/net/saa9730.c
drivers/net/sb1000.c
drivers/net/sb1250-mac.c
drivers/net/sc92031.c
drivers/net/seeq8005.c
drivers/net/sgiseeq.c
drivers/net/sis190.c
drivers/net/sis900.c
drivers/net/sk98lin/skge.c
drivers/net/skfp/skfddi.c
drivers/net/skge.c
drivers/net/sky2.c
drivers/net/sky2.h
drivers/net/slip.c
drivers/net/smc911x.c
drivers/net/smc9194.c
drivers/net/smc91x.c
drivers/net/sonic.c
drivers/net/spider_net.c
drivers/net/starfire.c
drivers/net/sun3_82586.c
drivers/net/sun3lance.c
drivers/net/sunbmac.c
drivers/net/sundance.c
drivers/net/sungem.c
drivers/net/sungem.h
drivers/net/sunhme.c
drivers/net/sunlance.c
drivers/net/sunqe.c
drivers/net/tc35815.c
drivers/net/tg3.c
drivers/net/tlan.c
drivers/net/tokenring/3c359.c
drivers/net/tokenring/ibmtr.c
drivers/net/tokenring/lanstreamer.c
drivers/net/tokenring/olympic.c
drivers/net/tokenring/smctr.c
drivers/net/tokenring/tms380tr.c
drivers/net/tsi108_eth.c
drivers/net/tulip/de2104x.c
drivers/net/tulip/de4x5.c
drivers/net/tulip/dmfe.c
drivers/net/tulip/interrupt.c
drivers/net/tulip/tulip_core.c
drivers/net/tulip/uli526x.c
drivers/net/tulip/winbond-840.c
drivers/net/tulip/xircom_cb.c
drivers/net/tulip/xircom_tulip_cb.c
drivers/net/tun.c
drivers/net/typhoon.c
drivers/net/via-rhine.c
drivers/net/via-velocity.c
drivers/net/wan/cosa.c
drivers/net/wan/cycx_x25.c
drivers/net/wan/dlci.c
drivers/net/wan/dscc4.c
drivers/net/wan/farsync.c
drivers/net/wan/hdlc_cisco.c
drivers/net/wan/hdlc_fr.c
drivers/net/wan/hostess_sv11.c
drivers/net/wan/lmc/lmc_main.c
drivers/net/wan/pc300_drv.c
drivers/net/wan/pc300_tty.c
drivers/net/wan/sbni.c
drivers/net/wan/sealevel.c
drivers/net/wan/syncppp.c
drivers/net/wan/z85230.c
drivers/net/wireless/Kconfig
drivers/net/wireless/airo.c
drivers/net/wireless/arlan-main.c
drivers/net/wireless/atmel.c
drivers/net/wireless/bcm43xx/Kconfig
drivers/net/wireless/bcm43xx/bcm43xx_dma.c
drivers/net/wireless/hostap/Kconfig
drivers/net/wireless/hostap/hostap_80211_rx.c
drivers/net/wireless/hostap/hostap_80211_tx.c
drivers/net/wireless/hostap/hostap_ap.c
drivers/net/wireless/hostap/hostap_hw.c
drivers/net/wireless/hostap/hostap_main.c
drivers/net/wireless/ipw2100.c
drivers/net/wireless/ipw2200.c
drivers/net/wireless/netwave_cs.c
drivers/net/wireless/orinoco.c
drivers/net/wireless/prism54/islpci_eth.c
drivers/net/wireless/ray_cs.c
drivers/net/wireless/strip.c
drivers/net/wireless/wavelan.c
drivers/net/wireless/wavelan_cs.c
drivers/net/wireless/zd1201.c
drivers/net/wireless/zd1211rw/Kconfig
drivers/net/wireless/zd1211rw/zd_usb.c
drivers/net/yellowfin.c
drivers/net/znet.c
drivers/parisc/led.c
drivers/parport/parport_sunbpp.c
drivers/pci/probe.c
drivers/s390/block/dasd.c
drivers/s390/block/dasd_devmap.c
drivers/s390/char/Makefile
drivers/s390/char/con3215.c
drivers/s390/char/con3270.c
drivers/s390/char/sclp.c
drivers/s390/char/sclp.h
drivers/s390/char/sclp_chp.c [new file with mode: 0644]
drivers/s390/char/sclp_config.c [new file with mode: 0644]
drivers/s390/char/sclp_cpi.c
drivers/s390/char/sclp_quiesce.c
drivers/s390/char/sclp_rw.c
drivers/s390/char/sclp_sdias.c [new file with mode: 0644]
drivers/s390/char/sclp_tty.c
drivers/s390/char/sclp_vt220.c
drivers/s390/char/vmlogrdr.c
drivers/s390/char/zcore.c [new file with mode: 0644]
drivers/s390/cio/Makefile
drivers/s390/cio/ccwgroup.c
drivers/s390/cio/chp.c [new file with mode: 0644]
drivers/s390/cio/chp.h [new file with mode: 0644]
drivers/s390/cio/chsc.c
drivers/s390/cio/chsc.h
drivers/s390/cio/cio.c
drivers/s390/cio/cio.h
drivers/s390/cio/cmf.c
drivers/s390/cio/css.c
drivers/s390/cio/css.h
drivers/s390/cio/device.c
drivers/s390/cio/device_fsm.c
drivers/s390/cio/device_ops.c
drivers/s390/cio/idset.c [new file with mode: 0644]
drivers/s390/cio/idset.h [new file with mode: 0644]
drivers/s390/cio/ioasm.h
drivers/s390/net/claw.c
drivers/s390/net/ctcmain.c
drivers/s390/net/lcs.c
drivers/s390/net/netiucv.c
drivers/s390/net/qeth_eddp.c
drivers/s390/net/qeth_main.c
drivers/s390/net/qeth_tso.h
drivers/s390/s390mach.c
drivers/s390/sysinfo.c
drivers/sbus/char/envctrl.c
drivers/sbus/char/flash.c
drivers/sbus/char/openprom.c
drivers/sbus/char/vfc_dev.c
drivers/sbus/sbus.c
drivers/scsi/Kconfig
drivers/scsi/Makefile
drivers/scsi/esp.c [deleted file]
drivers/scsi/esp.h [deleted file]
drivers/scsi/esp_scsi.c [new file with mode: 0644]
drivers/scsi/esp_scsi.h [new file with mode: 0644]
drivers/scsi/qlogicpti.c
drivers/scsi/scsi_netlink.c
drivers/scsi/scsi_transport_iscsi.c
drivers/scsi/sun_esp.c [new file with mode: 0644]
drivers/serial/8250.c
drivers/serial/icom.c
drivers/serial/icom.h
drivers/serial/sunsu.c
drivers/usb/atm/usbatm.c
drivers/usb/gadget/ether.c
drivers/usb/net/asix.c
drivers/usb/net/catc.c
drivers/usb/net/gl620a.c
drivers/usb/net/kaweth.c
drivers/usb/net/net1080.c
drivers/usb/net/pegasus.c
drivers/usb/net/pegasus.h
drivers/usb/net/rndis_host.c
drivers/usb/net/rtl8150.c
drivers/usb/net/usbnet.c
drivers/video/Kconfig
drivers/video/aty/atyfb_base.c
drivers/video/aty/radeon_base.c
drivers/video/aty/radeon_monitor.c
drivers/video/aty/radeonfb.h
drivers/video/cg3.c
drivers/video/igafb.c
fs/9p/vfs_inode.c
fs/Kconfig
fs/afs/Makefile
fs/afs/afs.h [new file with mode: 0644]
fs/afs/afs_cm.h [new file with mode: 0644]
fs/afs/afs_fs.h [new file with mode: 0644]
fs/afs/afs_vl.h [moved from fs/afs/vlclient.h with 74% similarity]
fs/afs/cache.c [new file with mode: 0644]
fs/afs/cache.h
fs/afs/callback.c
fs/afs/cell.c
fs/afs/cell.h [deleted file]
fs/afs/cmservice.c
fs/afs/cmservice.h [deleted file]
fs/afs/dir.c
fs/afs/errors.h [deleted file]
fs/afs/file.c
fs/afs/fsclient.c
fs/afs/fsclient.h [deleted file]
fs/afs/inode.c
fs/afs/internal.h
fs/afs/kafsasyncd.c [deleted file]
fs/afs/kafsasyncd.h [deleted file]
fs/afs/kafstimod.c [deleted file]
fs/afs/kafstimod.h [deleted file]
fs/afs/main.c
fs/afs/misc.c
fs/afs/mntpt.c
fs/afs/mount.h [deleted file]
fs/afs/proc.c
fs/afs/rxrpc.c [new file with mode: 0644]
fs/afs/security.c [new file with mode: 0644]
fs/afs/server.c
fs/afs/server.h [deleted file]
fs/afs/super.c
fs/afs/super.h [deleted file]
fs/afs/transport.h [deleted file]
fs/afs/types.h [deleted file]
fs/afs/use-rtnetlink.c [new file with mode: 0644]
fs/afs/vlclient.c
fs/afs/vlocation.c
fs/afs/vnode.c
fs/afs/vnode.h [deleted file]
fs/afs/volume.c
fs/afs/volume.h [deleted file]
fs/compat_ioctl.c
fs/ecryptfs/netlink.c
fs/nfs/write.c
fs/reiserfs/xattr.c
include/asm-alpha/socket.h
include/asm-alpha/sockios.h
include/asm-alpha/string.h
include/asm-arm/div64.h
include/asm-arm/socket.h
include/asm-arm/sockios.h
include/asm-arm26/socket.h
include/asm-arm26/sockios.h
include/asm-avr32/arch-at32ap/io.h [new file with mode: 0644]
include/asm-avr32/arch-at32ap/smc.h
include/asm-avr32/arch-at32ap/time.h [new file with mode: 0644]
include/asm-avr32/atomic.h
include/asm-avr32/bug.h
include/asm-avr32/io.h
include/asm-avr32/processor.h
include/asm-avr32/setup.h
include/asm-avr32/socket.h
include/asm-avr32/sockios.h
include/asm-avr32/sysreg.h
include/asm-avr32/system.h
include/asm-avr32/thread_info.h
include/asm-avr32/uaccess.h
include/asm-cris/socket.h
include/asm-cris/sockios.h
include/asm-frv/socket.h
include/asm-frv/sockios.h
include/asm-generic/div64.h
include/asm-generic/pgtable.h
include/asm-h8300/socket.h
include/asm-h8300/sockios.h
include/asm-i386/div64.h
include/asm-i386/socket.h
include/asm-i386/sockios.h
include/asm-ia64/socket.h
include/asm-ia64/sockios.h
include/asm-m32r/socket.h
include/asm-m32r/sockios.h
include/asm-m68k/div64.h
include/asm-m68k/socket.h
include/asm-m68k/sockios.h
include/asm-mips/bug.h
include/asm-mips/cacheflush.h
include/asm-mips/checksum.h
include/asm-mips/div64.h
include/asm-mips/fpu.h
include/asm-mips/jmr3927/irq.h [deleted file]
include/asm-mips/jmr3927/jmr3927.h
include/asm-mips/jmr3927/tx3927.h
include/asm-mips/jmr3927/txx927.h
include/asm-mips/paccess.h
include/asm-mips/sgi/hpc3.h
include/asm-mips/sgi/ip22.h
include/asm-mips/sgi/mc.h
include/asm-mips/sibyte/bcm1480_int.h
include/asm-mips/sibyte/bcm1480_mc.h
include/asm-mips/sibyte/bcm1480_regs.h
include/asm-mips/sibyte/bcm1480_scd.h
include/asm-mips/sibyte/board.h
include/asm-mips/sibyte/carmel.h
include/asm-mips/sibyte/sb1250_int.h
include/asm-mips/sibyte/sb1250_mac.h
include/asm-mips/sibyte/sb1250_mc.h
include/asm-mips/sibyte/sb1250_regs.h
include/asm-mips/sibyte/sb1250_scd.h
include/asm-mips/sibyte/swarm.h
include/asm-mips/socket.h
include/asm-mips/sockios.h
include/asm-mips/thread_info.h
include/asm-parisc/socket.h
include/asm-parisc/sockios.h
include/asm-powerpc/socket.h
include/asm-powerpc/sockios.h
include/asm-powerpc/string.h
include/asm-s390/bug.h
include/asm-s390/ccwgroup.h
include/asm-s390/chpid.h [new file with mode: 0644]
include/asm-s390/cio.h
include/asm-s390/ipl.h
include/asm-s390/lowcore.h
include/asm-s390/pgtable.h
include/asm-s390/processor.h
include/asm-s390/sclp.h
include/asm-s390/setup.h
include/asm-s390/smp.h
include/asm-s390/socket.h
include/asm-s390/sockios.h
include/asm-sh/socket.h
include/asm-sh/sockios.h
include/asm-sh/string.h
include/asm-sh64/sockios.h
include/asm-sparc/prom.h
include/asm-sparc/socket.h
include/asm-sparc/sockios.h
include/asm-sparc64/cpudata.h
include/asm-sparc64/device.h
include/asm-sparc64/ebus.h
include/asm-sparc64/floppy.h
include/asm-sparc64/io.h
include/asm-sparc64/iommu.h
include/asm-sparc64/isa.h
include/asm-sparc64/parport.h
include/asm-sparc64/pbm.h
include/asm-sparc64/pci.h
include/asm-sparc64/pgtable.h
include/asm-sparc64/prom.h
include/asm-sparc64/smp.h
include/asm-sparc64/socket.h
include/asm-sparc64/sockios.h
include/asm-sparc64/sparsemem.h
include/asm-sparc64/timer.h
include/asm-sparc64/ttable.h
include/asm-um/div64.h
include/asm-v850/socket.h
include/asm-v850/sockios.h
include/asm-x86_64/socket.h
include/asm-x86_64/sockios.h
include/asm-xtensa/div64.h
include/asm-xtensa/socket.h
include/asm-xtensa/sockios.h
include/keys/rxrpc-type.h [new file with mode: 0644]
include/linux/Kbuild
include/linux/atalk.h
include/linux/dccp.h
include/linux/fib_rules.h
include/linux/hdlc.h
include/linux/icmp.h
include/linux/icmpv6.h
include/linux/if_addr.h
include/linux/if_arp.h
include/linux/if_bridge.h
include/linux/if_ether.h
include/linux/if_link.h
include/linux/if_packet.h
include/linux/if_pppox.h
include/linux/if_tr.h
include/linux/if_vlan.h
include/linux/if_wanpipe_common.h [deleted file]
include/linux/igmp.h
include/linux/in.h
include/linux/in6.h
include/linux/ip.h
include/linux/ipv6.h
include/linux/jhash.h
include/linux/key.h
include/linux/ktime.h
include/linux/net.h
include/linux/netdevice.h
include/linux/netfilter.h
include/linux/netfilter/nf_conntrack_tcp.h
include/linux/netfilter/nfnetlink.h
include/linux/netfilter/nfnetlink_conntrack.h
include/linux/netfilter_bridge.h
include/linux/netfilter_bridge/ebt_802_3.h
include/linux/netfilter_bridge/ebt_arp.h
include/linux/netfilter_ipv4/Kbuild
include/linux/netfilter_ipv4/ip_conntrack.h [deleted file]
include/linux/netfilter_ipv4/ip_conntrack_amanda.h [deleted file]
include/linux/netfilter_ipv4/ip_conntrack_core.h [deleted file]
include/linux/netfilter_ipv4/ip_conntrack_ftp.h [deleted file]
include/linux/netfilter_ipv4/ip_conntrack_h323.h [deleted file]
include/linux/netfilter_ipv4/ip_conntrack_helper.h [deleted file]
include/linux/netfilter_ipv4/ip_conntrack_icmp.h [deleted file]
include/linux/netfilter_ipv4/ip_conntrack_irc.h [deleted file]
include/linux/netfilter_ipv4/ip_conntrack_pptp.h [deleted file]
include/linux/netfilter_ipv4/ip_conntrack_proto_gre.h [deleted file]
include/linux/netfilter_ipv4/ip_conntrack_protocol.h [deleted file]
include/linux/netfilter_ipv4/ip_conntrack_sctp.h [deleted file]
include/linux/netfilter_ipv4/ip_conntrack_sip.h [deleted file]
include/linux/netfilter_ipv4/ip_conntrack_tcp.h [deleted file]
include/linux/netfilter_ipv4/ip_conntrack_tftp.h [deleted file]
include/linux/netfilter_ipv4/ip_conntrack_tuple.h [deleted file]
include/linux/netfilter_ipv4/ip_nat.h [deleted file]
include/linux/netfilter_ipv4/ip_nat_core.h [deleted file]
include/linux/netfilter_ipv4/ip_nat_helper.h [deleted file]
include/linux/netfilter_ipv4/ip_nat_pptp.h [deleted file]
include/linux/netfilter_ipv4/ip_nat_protocol.h [deleted file]
include/linux/netfilter_ipv4/ip_nat_rule.h [deleted file]
include/linux/netfilter_ipv4/ipt_SAME.h
include/linux/netlink.h
include/linux/nfs_page.h
include/linux/nl80211.h [new file with mode: 0644]
include/linux/page-flags.h
include/linux/rtnetlink.h
include/linux/rxrpc.h [new file with mode: 0644]
include/linux/sctp.h
include/linux/sdla_fr.h [deleted file]
include/linux/skbuff.h
include/linux/socket.h
include/linux/string.h
include/linux/sysctl.h
include/linux/taskstats.h
include/linux/tcp.h
include/linux/udp.h
include/linux/workqueue.h
include/linux/xfrm.h
include/net/addrconf.h
include/net/af_rxrpc.h [new file with mode: 0644]
include/net/ax25.h
include/net/bluetooth/hci.h
include/net/cfg80211.h [new file with mode: 0644]
include/net/cipso_ipv4.h
include/net/compat.h
include/net/dn_fib.h
include/net/dn_route.h
include/net/esp.h
include/net/fib_rules.h
include/net/inet6_hashtables.h
include/net/inet_ecn.h
include/net/inet_sock.h
include/net/ip.h
include/net/ip6_fib.h
include/net/ip6_route.h
include/net/ip_fib.h
include/net/ipv6.h
include/net/ipx.h
include/net/iw_handler.h
include/net/llc_pdu.h
include/net/neighbour.h
include/net/netfilter/nf_conntrack.h
include/net/netfilter/nf_conntrack_compat.h [deleted file]
include/net/netfilter/nf_conntrack_core.h
include/net/netfilter/nf_conntrack_ecache.h
include/net/netfilter/nf_conntrack_l3proto.h
include/net/netfilter/nf_conntrack_l4proto.h
include/net/netfilter/nf_nat_rule.h
include/net/netlink.h
include/net/pkt_cls.h
include/net/pkt_sched.h
include/net/red.h
include/net/rtnetlink.h [new file with mode: 0644]
include/net/sch_generic.h
include/net/sctp/constants.h
include/net/sctp/structs.h
include/net/sctp/ulpevent.h
include/net/sctp/ulpqueue.h
include/net/sctp/user.h
include/net/sock.h
include/net/tcp.h
include/net/tcp_ecn.h
include/net/udp.h
include/net/udplite.h
include/net/wext.h [new file with mode: 0644]
include/net/wireless.h [new file with mode: 0644]
include/net/x25device.h
include/net/xfrm.h
include/rxrpc/call.h [deleted file]
include/rxrpc/connection.h [deleted file]
include/rxrpc/krxiod.h [deleted file]
include/rxrpc/krxsecd.h [deleted file]
include/rxrpc/krxtimod.h [deleted file]
include/rxrpc/message.h [deleted file]
include/rxrpc/packet.h
include/rxrpc/peer.h [deleted file]
include/rxrpc/rxrpc.h [deleted file]
include/rxrpc/transport.h [deleted file]
kernel/audit.c
kernel/hrtimer.c
kernel/sysctl.c
kernel/taskstats.c
kernel/time.c
kernel/timer.c
lib/Kconfig.debug
lib/Makefile
lib/div64.c
lib/kobject_uevent.c
lib/string.c
mm/migrate.c
mm/oom_kill.c
mm/page-writeback.c
mm/rmap.c
net/802/fddi.c
net/802/hippi.c
net/802/psnap.c
net/802/tr.c
net/8021q/vlan.c
net/8021q/vlan_dev.c
net/Kconfig
net/Makefile
net/appletalk/aarp.c
net/appletalk/ddp.c
net/atm/br2684.c
net/atm/clip.c
net/atm/ioctl.c
net/atm/lec.c
net/atm/mpc.c
net/atm/signaling.c
net/ax25/af_ax25.c
net/ax25/ax25_ds_subr.c
net/ax25/ax25_in.c
net/ax25/ax25_ip.c
net/ax25/ax25_out.c
net/ax25/ax25_subr.c
net/bluetooth/af_bluetooth.c
net/bluetooth/bnep/core.c
net/bluetooth/cmtp/core.c
net/bluetooth/hci_conn.c
net/bluetooth/hci_core.c
net/bluetooth/hci_event.c
net/bluetooth/hci_sock.c
net/bluetooth/l2cap.c
net/bluetooth/rfcomm/core.c
net/bluetooth/sco.c
net/bridge/br.c
net/bridge/br_device.c
net/bridge/br_fdb.c
net/bridge/br_forward.c
net/bridge/br_if.c
net/bridge/br_input.c
net/bridge/br_ioctl.c
net/bridge/br_netfilter.c
net/bridge/br_netlink.c
net/bridge/br_notify.c
net/bridge/br_private.h
net/bridge/br_stp.c
net/bridge/br_stp_bpdu.c
net/bridge/br_stp_if.c
net/bridge/br_sysfs_br.c
net/bridge/br_sysfs_if.c
net/bridge/netfilter/ebt_arp.c
net/bridge/netfilter/ebt_log.c
net/bridge/netfilter/ebt_ulog.c
net/compat.c
net/core/Makefile
net/core/datagram.c
net/core/dev.c
net/core/dev_mcast.c
net/core/ethtool.c
net/core/fib_rules.c
net/core/filter.c
net/core/gen_stats.c
net/core/link_watch.c
net/core/neighbour.c
net/core/net-sysfs.c
net/core/netpoll.c
net/core/pktgen.c
net/core/rtnetlink.c
net/core/skbuff.c
net/core/sock.c
net/core/sysctl_net_core.c
net/core/user_dma.c
net/core/utils.c
net/dccp/ackvec.c
net/dccp/ccids/ccid3.c
net/dccp/ccids/ccid3.h
net/dccp/ccids/lib/loss_interval.c
net/dccp/dccp.h
net/dccp/input.c
net/dccp/ipv4.c
net/dccp/ipv6.c
net/dccp/minisocks.c
net/dccp/options.c
net/dccp/output.c
net/dccp/probe.c
net/decnet/af_decnet.c
net/decnet/dn_dev.c
net/decnet/dn_fib.c
net/decnet/dn_neigh.c
net/decnet/dn_nsp_in.c
net/decnet/dn_nsp_out.c
net/decnet/dn_route.c
net/decnet/dn_rules.c
net/decnet/dn_table.c
net/decnet/netfilter/dn_rtmsg.c
net/econet/af_econet.c
net/ethernet/eth.c
net/ieee80211/Kconfig
net/ieee80211/ieee80211_crypt_wep.c
net/ieee80211/ieee80211_rx.c
net/ieee80211/ieee80211_tx.c
net/ipv4/Kconfig
net/ipv4/Makefile
net/ipv4/af_inet.c
net/ipv4/ah4.c
net/ipv4/arp.c
net/ipv4/cipso_ipv4.c
net/ipv4/devinet.c
net/ipv4/esp4.c
net/ipv4/fib_frontend.c
net/ipv4/fib_hash.c
net/ipv4/fib_rules.c
net/ipv4/fib_semantics.c
net/ipv4/fib_trie.c
net/ipv4/icmp.c
net/ipv4/igmp.c
net/ipv4/inet_diag.c
net/ipv4/inetpeer.c
net/ipv4/ip_forward.c
net/ipv4/ip_fragment.c
net/ipv4/ip_gre.c
net/ipv4/ip_input.c
net/ipv4/ip_options.c
net/ipv4/ip_output.c
net/ipv4/ip_sockglue.c
net/ipv4/ipcomp.c
net/ipv4/ipconfig.c
net/ipv4/ipip.c
net/ipv4/ipmr.c
net/ipv4/ipvs/ip_vs_app.c
net/ipv4/ipvs/ip_vs_core.c
net/ipv4/ipvs/ip_vs_dh.c
net/ipv4/ipvs/ip_vs_ftp.c
net/ipv4/ipvs/ip_vs_lblc.c
net/ipv4/ipvs/ip_vs_lblcr.c
net/ipv4/ipvs/ip_vs_proto_ah.c
net/ipv4/ipvs/ip_vs_proto_tcp.c
net/ipv4/ipvs/ip_vs_proto_udp.c
net/ipv4/ipvs/ip_vs_sh.c
net/ipv4/ipvs/ip_vs_xmit.c
net/ipv4/multipath_drr.c
net/ipv4/netfilter.c
net/ipv4/netfilter/Kconfig
net/ipv4/netfilter/Makefile
net/ipv4/netfilter/arp_tables.c
net/ipv4/netfilter/arpt_mangle.c
net/ipv4/netfilter/ip_conntrack_amanda.c [deleted file]
net/ipv4/netfilter/ip_conntrack_core.c [deleted file]
net/ipv4/netfilter/ip_conntrack_ftp.c [deleted file]
net/ipv4/netfilter/ip_conntrack_helper_h323.c [deleted file]
net/ipv4/netfilter/ip_conntrack_helper_pptp.c [deleted file]
net/ipv4/netfilter/ip_conntrack_irc.c [deleted file]
net/ipv4/netfilter/ip_conntrack_netbios_ns.c [deleted file]
net/ipv4/netfilter/ip_conntrack_netlink.c [deleted file]
net/ipv4/netfilter/ip_conntrack_proto_generic.c [deleted file]
net/ipv4/netfilter/ip_conntrack_proto_gre.c [deleted file]
net/ipv4/netfilter/ip_conntrack_proto_icmp.c [deleted file]
net/ipv4/netfilter/ip_conntrack_proto_sctp.c [deleted file]
net/ipv4/netfilter/ip_conntrack_proto_tcp.c [deleted file]
net/ipv4/netfilter/ip_conntrack_proto_udp.c [deleted file]
net/ipv4/netfilter/ip_conntrack_sip.c [deleted file]
net/ipv4/netfilter/ip_conntrack_standalone.c [deleted file]
net/ipv4/netfilter/ip_conntrack_tftp.c [deleted file]
net/ipv4/netfilter/ip_nat_amanda.c [deleted file]
net/ipv4/netfilter/ip_nat_core.c [deleted file]
net/ipv4/netfilter/ip_nat_ftp.c [deleted file]
net/ipv4/netfilter/ip_nat_helper.c [deleted file]
net/ipv4/netfilter/ip_nat_helper_h323.c [deleted file]
net/ipv4/netfilter/ip_nat_helper_pptp.c [deleted file]
net/ipv4/netfilter/ip_nat_irc.c [deleted file]
net/ipv4/netfilter/ip_nat_proto_gre.c [deleted file]
net/ipv4/netfilter/ip_nat_proto_icmp.c [deleted file]
net/ipv4/netfilter/ip_nat_proto_tcp.c [deleted file]
net/ipv4/netfilter/ip_nat_proto_udp.c [deleted file]
net/ipv4/netfilter/ip_nat_proto_unknown.c [deleted file]
net/ipv4/netfilter/ip_nat_rule.c [deleted file]
net/ipv4/netfilter/ip_nat_sip.c [deleted file]
net/ipv4/netfilter/ip_nat_snmp_basic.c [deleted file]
net/ipv4/netfilter/ip_nat_standalone.c [deleted file]
net/ipv4/netfilter/ip_nat_tftp.c [deleted file]
net/ipv4/netfilter/ip_queue.c
net/ipv4/netfilter/ip_tables.c
net/ipv4/netfilter/ipt_CLUSTERIP.c
net/ipv4/netfilter/ipt_ECN.c
net/ipv4/netfilter/ipt_LOG.c
net/ipv4/netfilter/ipt_MASQUERADE.c
net/ipv4/netfilter/ipt_NETMAP.c
net/ipv4/netfilter/ipt_REDIRECT.c
net/ipv4/netfilter/ipt_REJECT.c
net/ipv4/netfilter/ipt_SAME.c
net/ipv4/netfilter/ipt_TOS.c
net/ipv4/netfilter/ipt_TTL.c
net/ipv4/netfilter/ipt_ULOG.c
net/ipv4/netfilter/ipt_addrtype.c
net/ipv4/netfilter/ipt_ecn.c
net/ipv4/netfilter/ipt_iprange.c
net/ipv4/netfilter/ipt_recent.c
net/ipv4/netfilter/ipt_tos.c
net/ipv4/netfilter/ipt_ttl.c
net/ipv4/netfilter/iptable_filter.c
net/ipv4/netfilter/iptable_mangle.c
net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
net/ipv4/netfilter/nf_conntrack_proto_icmp.c
net/ipv4/netfilter/nf_nat_core.c
net/ipv4/netfilter/nf_nat_h323.c
net/ipv4/netfilter/nf_nat_helper.c
net/ipv4/netfilter/nf_nat_pptp.c
net/ipv4/netfilter/nf_nat_rule.c
net/ipv4/netfilter/nf_nat_sip.c
net/ipv4/netfilter/nf_nat_snmp_basic.c
net/ipv4/netfilter/nf_nat_standalone.c
net/ipv4/proc.c
net/ipv4/protocol.c
net/ipv4/raw.c
net/ipv4/route.c
net/ipv4/syncookies.c
net/ipv4/sysctl_net_ipv4.c
net/ipv4/tcp.c
net/ipv4/tcp_bic.c
net/ipv4/tcp_cong.c
net/ipv4/tcp_cubic.c
net/ipv4/tcp_htcp.c
net/ipv4/tcp_hybla.c
net/ipv4/tcp_illinois.c [new file with mode: 0644]
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_lp.c
net/ipv4/tcp_minisocks.c
net/ipv4/tcp_output.c
net/ipv4/tcp_probe.c
net/ipv4/tcp_timer.c
net/ipv4/tcp_vegas.c
net/ipv4/tcp_vegas.h [new file with mode: 0644]
net/ipv4/tcp_veno.c
net/ipv4/tcp_westwood.c
net/ipv4/tcp_yeah.c [new file with mode: 0644]
net/ipv4/tcp_yeah.h [new file with mode: 0644]
net/ipv4/udp.c
net/ipv4/udplite.c
net/ipv4/xfrm4_input.c
net/ipv4/xfrm4_mode_beet.c
net/ipv4/xfrm4_mode_transport.c
net/ipv4/xfrm4_mode_tunnel.c
net/ipv4/xfrm4_output.c
net/ipv4/xfrm4_policy.c
net/ipv4/xfrm4_tunnel.c
net/ipv6/Kconfig
net/ipv6/Makefile
net/ipv6/addrconf.c
net/ipv6/af_inet6.c
net/ipv6/ah6.c
net/ipv6/datagram.c
net/ipv6/esp6.c
net/ipv6/exthdrs.c
net/ipv6/fib6_rules.c
net/ipv6/icmp.c
net/ipv6/ip6_fib.c
net/ipv6/ip6_input.c
net/ipv6/ip6_output.c
net/ipv6/ip6_tunnel.c
net/ipv6/ipcomp6.c
net/ipv6/ipv6_sockglue.c
net/ipv6/ipv6_syms.c [deleted file]
net/ipv6/mcast.c
net/ipv6/mip6.c
net/ipv6/ndisc.c
net/ipv6/netfilter.c
net/ipv6/netfilter/ip6_queue.c
net/ipv6/netfilter/ip6_tables.c
net/ipv6/netfilter/ip6t_HL.c
net/ipv6/netfilter/ip6t_LOG.c
net/ipv6/netfilter/ip6t_REJECT.c
net/ipv6/netfilter/ip6t_eui64.c
net/ipv6/netfilter/ip6t_hl.c
net/ipv6/netfilter/ip6t_ipv6header.c
net/ipv6/netfilter/ip6table_filter.c
net/ipv6/netfilter/ip6table_mangle.c
net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
net/ipv6/netfilter/nf_conntrack_reasm.c
net/ipv6/proc.c
net/ipv6/protocol.c
net/ipv6/raw.c
net/ipv6/reassembly.c
net/ipv6/route.c
net/ipv6/sit.c
net/ipv6/tcp_ipv6.c
net/ipv6/udp.c
net/ipv6/udplite.c
net/ipv6/xfrm6_input.c
net/ipv6/xfrm6_mode_beet.c
net/ipv6/xfrm6_mode_ro.c
net/ipv6/xfrm6_mode_transport.c
net/ipv6/xfrm6_mode_tunnel.c
net/ipv6/xfrm6_output.c
net/ipv6/xfrm6_policy.c
net/ipv6/xfrm6_tunnel.c
net/ipx/af_ipx.c
net/ipx/ipx_route.c
net/irda/af_irda.c
net/irda/ircomm/ircomm_param.c
net/irda/irda_device.c
net/irda/irlan/irlan_common.c
net/irda/irlan/irlan_eth.c
net/irda/irlap_event.c
net/irda/irlap_frame.c
net/irda/irqueue.c
net/irda/irttp.c
net/irda/parameters.c
net/irda/qos.c
net/irda/wrapper.c
net/iucv/af_iucv.c
net/iucv/iucv.c
net/key/af_key.c
net/llc/llc_input.c
net/llc/llc_output.c
net/llc/llc_sap.c
net/netfilter/Kconfig
net/netfilter/core.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_conntrack_ecache.c
net/netfilter/nf_conntrack_expect.c
net/netfilter/nf_conntrack_ftp.c
net/netfilter/nf_conntrack_netbios_ns.c
net/netfilter/nf_conntrack_netlink.c
net/netfilter/nf_conntrack_proto.c
net/netfilter/nf_conntrack_proto_generic.c
net/netfilter/nf_conntrack_proto_sctp.c
net/netfilter/nf_conntrack_proto_tcp.c
net/netfilter/nf_conntrack_proto_udp.c
net/netfilter/nf_conntrack_standalone.c
net/netfilter/nfnetlink.c
net/netfilter/nfnetlink_log.c
net/netfilter/nfnetlink_queue.c
net/netfilter/x_tables.c
net/netfilter/xt_CONNMARK.c
net/netfilter/xt_CONNSECMARK.c
net/netfilter/xt_DSCP.c
net/netfilter/xt_NOTRACK.c
net/netfilter/xt_TCPMSS.c
net/netfilter/xt_connbytes.c
net/netfilter/xt_connmark.c
net/netfilter/xt_conntrack.c
net/netfilter/xt_dscp.c
net/netfilter/xt_hashlimit.c
net/netfilter/xt_helper.c
net/netfilter/xt_length.c
net/netfilter/xt_limit.c
net/netfilter/xt_mac.c
net/netfilter/xt_pkttype.c
net/netfilter/xt_realm.c
net/netfilter/xt_state.c
net/netlink/af_netlink.c
net/netlink/attr.c
net/netlink/genetlink.c
net/netrom/af_netrom.c
net/netrom/nr_dev.c
net/netrom/nr_in.c
net/netrom/nr_loopback.c
net/netrom/nr_out.c
net/netrom/nr_subr.c
net/packet/af_packet.c
net/rose/af_rose.c
net/rose/rose_loopback.c
net/rose/rose_route.c
net/rxrpc/Kconfig [new file with mode: 0644]
net/rxrpc/Makefile
net/rxrpc/af_rxrpc.c [new file with mode: 0644]
net/rxrpc/ar-accept.c [new file with mode: 0644]
net/rxrpc/ar-ack.c [new file with mode: 0644]
net/rxrpc/ar-call.c [new file with mode: 0644]
net/rxrpc/ar-connection.c [new file with mode: 0644]
net/rxrpc/ar-connevent.c [new file with mode: 0644]
net/rxrpc/ar-error.c [new file with mode: 0644]
net/rxrpc/ar-input.c [new file with mode: 0644]
net/rxrpc/ar-internal.h [new file with mode: 0644]
net/rxrpc/ar-key.c [new file with mode: 0644]
net/rxrpc/ar-local.c [new file with mode: 0644]
net/rxrpc/ar-output.c [new file with mode: 0644]
net/rxrpc/ar-peer.c [new file with mode: 0644]
net/rxrpc/ar-proc.c [new file with mode: 0644]
net/rxrpc/ar-recvmsg.c [new file with mode: 0644]
net/rxrpc/ar-security.c [new file with mode: 0644]
net/rxrpc/ar-skbuff.c [new file with mode: 0644]
net/rxrpc/ar-transport.c [new file with mode: 0644]
net/rxrpc/call.c [deleted file]
net/rxrpc/connection.c [deleted file]
net/rxrpc/internal.h [deleted file]
net/rxrpc/krxiod.c [deleted file]
net/rxrpc/krxsecd.c [deleted file]
net/rxrpc/krxtimod.c [deleted file]
net/rxrpc/main.c [deleted file]
net/rxrpc/peer.c [deleted file]
net/rxrpc/proc.c [deleted file]
net/rxrpc/rxkad.c [new file with mode: 0644]
net/rxrpc/rxrpc_syms.c [deleted file]
net/rxrpc/sysctl.c [deleted file]
net/rxrpc/transport.c [deleted file]
net/sched/Kconfig
net/sched/act_api.c
net/sched/act_gact.c
net/sched/act_ipt.c
net/sched/act_mirred.c
net/sched/act_pedit.c
net/sched/act_police.c
net/sched/act_simple.c
net/sched/cls_api.c
net/sched/cls_basic.c
net/sched/cls_fw.c
net/sched/cls_route.c
net/sched/cls_rsvp.c
net/sched/cls_rsvp.h
net/sched/cls_rsvp6.c
net/sched/cls_tcindex.c
net/sched/cls_u32.c
net/sched/em_u32.c
net/sched/ematch.c
net/sched/sch_api.c
net/sched/sch_atm.c
net/sched/sch_cbq.c
net/sched/sch_dsmark.c
net/sched/sch_generic.c
net/sched/sch_hfsc.c
net/sched/sch_htb.c
net/sched/sch_ingress.c
net/sched/sch_netem.c
net/sched/sch_prio.c
net/sched/sch_sfq.c
net/sched/sch_tbf.c
net/sched/sch_teql.c
net/sctp/associola.c
net/sctp/debug.c
net/sctp/input.c
net/sctp/inqueue.c
net/sctp/ipv6.c
net/sctp/output.c
net/sctp/outqueue.c
net/sctp/protocol.c
net/sctp/sm_make_chunk.c
net/sctp/sm_sideeffect.c
net/sctp/sm_statefuns.c
net/sctp/sm_statetable.c
net/sctp/socket.c
net/sctp/transport.c
net/sctp/ulpevent.c
net/sctp/ulpqueue.c
net/socket.c
net/sunrpc/cache.c
net/sunrpc/clnt.c
net/sunrpc/socklib.c
net/sunrpc/svcsock.c
net/sunrpc/xprt.c
net/tipc/config.c
net/tipc/eth_media.c
net/tipc/link.c
net/tipc/msg.h
net/tipc/netlink.c
net/tipc/port.c
net/tipc/socket.c
net/unix/af_unix.c
net/wanrouter/wanmain.c
net/wireless/Kconfig [new file with mode: 0644]
net/wireless/Makefile [new file with mode: 0644]
net/wireless/core.c [new file with mode: 0644]
net/wireless/core.h [new file with mode: 0644]
net/wireless/sysfs.c [new file with mode: 0644]
net/wireless/sysfs.h [new file with mode: 0644]
net/wireless/wext.c [moved from net/core/wireless.c with 56% similarity]
net/x25/af_x25.c
net/x25/x25_dev.c
net/x25/x25_in.c
net/x25/x25_out.c
net/xfrm/xfrm_algo.c
net/xfrm/xfrm_input.c
net/xfrm/xfrm_policy.c
net/xfrm/xfrm_state.c
net/xfrm/xfrm_user.c
security/keys/keyring.c
security/selinux/hooks.c
security/selinux/netlink.c
sound/sparc/amd7930.c
sound/sparc/cs4231.c

index bf62dbe..ebf9bf8 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -67,6 +67,8 @@ Koushik <raghavendra.koushik@neterion.com>
 Leonid I Ananiev <leonid.i.ananiev@intel.com>
 Linas Vepstas <linas@austin.ibm.com>
 Matthieu CASTET <castet.matthieu@free.fr>
+Michael Buesch <mb@bu3sch.de>
+Michael Buesch <mbuesch@freenet.de>
 Michel Dänzer <michel@tungstengraphics.com>
 Mitesh shah <mshah@teja.com>
 Morten Welinder <terra@gnome.org>
diff --git a/CREDITS b/CREDITS
index 6bd8ab8..dede114 100644 (file)
--- a/CREDITS
+++ b/CREDITS
@@ -317,6 +317,12 @@ S: 2322 37th Ave SW
 S: Seattle, Washington 98126-2010
 S: USA
 
+N: Johannes Berg
+E: johannes@sipsolutions.net
+W: http://johannes.sipsolutions.net/
+P: 1024D/9AB78CA5 AD02 0176 4E29 C137 1DF6 08D2 FC44 CF86 9AB7 8CA5
+D: powerpc & 802.11 hacker
+
 N: Stephen R. van den Berg (AKA BuGless)
 E: berg@pool.informatik.rwth-aachen.de
 D: General kernel, gcc, and libc hacker
@@ -2286,14 +2292,14 @@ S: D-90453 Nuernberg
 S: Germany
 
 N: Arnaldo Carvalho de Melo
-E: acme@mandriva.com
 E: acme@ghostprotocols.net
+E: arnaldo.melo@gmail.com
+E: acme@redhat.com
 W: http://oops.ghostprotocols.net:81/blog/
 P: 1024D/9224DF01 D5DF E3BB E3C8 BCBB F8AD  841A B6AB 4681 9224 DF01
 D: IPX, LLC, DCCP, cyc2x, wl3501_cs, net/ hacks
-S: Mandriva
-S: R. Tocantins, 89 - Cristo Rei
-S: 80050-430 - Curitiba - Paraná
+S: R. Brasílio Itiberê, 4270/1010 - Água Verde
+S: 80240-060 - Curitiba - Paraná
 S: Brazil
 
 N: Karsten Merker
index 19b4c96..6da6636 100644 (file)
@@ -211,15 +211,6 @@ Who:   Adrian Bunk <bunk@stusta.de>
 
 ---------------------------
 
-What:  IPv4 only connection tracking/NAT/helpers
-When:  2.6.22
-Why:   The new layer 3 independant connection tracking replaces the old
-       IPv4 only version. After some stabilization of the new code the
-       old one will be removed.
-Who:   Patrick McHardy <kaber@trash.net>
-
----------------------------
-
 What:  ACPI hooks (X86_SPEEDSTEP_CENTRINO_ACPI) in speedstep-centrino driver
 When:  December 2006
 Why:   Speedstep-centrino driver with ACPI hooks and acpi-cpufreq driver are
@@ -294,18 +285,6 @@ Who:       Richard Purdie <rpurdie@rpsys.net>
 
 ---------------------------
 
-What:  Wireless extensions over netlink (CONFIG_NET_WIRELESS_RTNETLINK)
-When:  with the merge of wireless-dev, 2.6.22 or later
-Why:   The option/code is
-        * not enabled on most kernels
-        * not required by any userspace tools (except an experimental one,
-          and even there only for some parts, others use ioctl)
-        * pointless since wext is no longer evolving and the ioctl
-          interface needs to be kept
-Who:   Johannes Berg <johannes@sipsolutions.net>
-
----------------------------
-
 What:  i8xx_tco watchdog driver
 When:  in 2.6.22
 Why:   the i8xx_tco watchdog driver has been replaced by the iTCO_wdt
@@ -313,3 +292,22 @@ Why:       the i8xx_tco watchdog driver has been replaced by the iTCO_wdt
 Who:   Wim Van Sebroeck <wim@iguana.be>
 
 ---------------------------
+
+What:  Multipath cached routing support in ipv4
+When:  in 2.6.23
+Why:   Code was merged, then submitter immediately disappeared leaving
+       us with no maintainer and lots of bugs.  The code should not have
+       been merged in the first place, and many aspects of it's
+       implementation are blocking more critical core networking
+       development.  It's marked EXPERIMENTAL and no distribution
+       enables it because it cause obscure crashes due to unfixable bugs
+       (interfaces don't return errors so memory allocation can't be
+       handled, calling contexts of these interfaces make handling
+       errors impossible too because they get called after we've
+       totally commited to creating a route object, for example).
+       This problem has existed for years and no forward progress
+       has ever been made, and nobody steps up to try and salvage
+       this code, so we're going to finally just get rid of it.
+Who:   David S. Miller <davem@davemloft.net>
+
+---------------------------
index 2f4237d..12ad6c7 100644 (file)
@@ -1,31 +1,82 @@
+                            ====================
                             kAFS: AFS FILESYSTEM
                             ====================
 
-ABOUT
-=====
+Contents:
+
+ - Overview.
+ - Usage.
+ - Mountpoints.
+ - Proc filesystem.
+ - The cell database.
+ - Security.
+ - Examples.
+
+
+========
+OVERVIEW
+========
 
-This filesystem provides a fairly simple AFS filesystem driver. It is under
-development and only provides very basic facilities. It does not yet support
-the following AFS features:
+This filesystem provides a fairly simple secure AFS filesystem driver. It is
+under development and does not yet provide the full feature set.  The features
+it does support include:
 
-       (*) Write support.
-       (*) Communications security.
-       (*) Local caching.
-       (*) pioctl() system call.
-       (*) Automatic mounting of embedded mountpoints.
+ (*) Security (currently only AFS kaserver and KerberosIV tickets).
 
+ (*) File reading.
 
+ (*) Automounting.
+
+It does not yet support the following AFS features:
+
+ (*) Write support.
+
+ (*) Local caching.
+
+ (*) pioctl() system call.
+
+
+===========
+COMPILATION
+===========
+
+The filesystem should be enabled by turning on the kernel configuration
+options:
+
+       CONFIG_AF_RXRPC         - The RxRPC protocol transport
+       CONFIG_RXKAD            - The RxRPC Kerberos security handler
+       CONFIG_AFS              - The AFS filesystem
+
+Additionally, the following can be turned on to aid debugging:
+
+       CONFIG_AF_RXRPC_DEBUG   - Permit AF_RXRPC debugging to be enabled
+       CONFIG_AFS_DEBUG        - Permit AFS debugging to be enabled
+
+They permit the debugging messages to be turned on dynamically by manipulating
+the masks in the following files:
+
+       /sys/module/af_rxrpc/parameters/debug
+       /sys/module/afs/parameters/debug
+
+
+=====
 USAGE
 =====
 
 When inserting the driver modules the root cell must be specified along with a
 list of volume location server IP addresses:
 
-       insmod rxrpc.o
+       insmod af_rxrpc.o
+       insmod rxkad.o
        insmod kafs.o rootcell=cambridge.redhat.com:172.16.18.73:172.16.18.91
 
-The first module is a driver for the RxRPC remote operation protocol, and the
-second is the actual filesystem driver for the AFS filesystem.
+The first module is the AF_RXRPC network protocol driver.  This provides the
+RxRPC remote operation protocol and may also be accessed from userspace.  See:
+
+       Documentation/networking/rxrpc.txt
+
+The second module is the kerberos RxRPC security driver, and the third module
+is the actual filesystem driver for the AFS filesystem.
 
 Once the module has been loaded, more modules can be added by the following
 procedure:
@@ -33,7 +84,7 @@ procedure:
        echo add grand.central.org 18.7.14.88:128.2.191.224 >/proc/fs/afs/cells
 
 Where the parameters to the "add" command are the name of a cell and a list of
-volume location servers within that cell.
+volume location servers within that cell, with the latter separated by colons.
 
 Filesystems can be mounted anywhere by commands similar to the following:
 
@@ -42,11 +93,6 @@ Filesystems can be mounted anywhere by commands similar to the following:
        mount -t afs "#root.afs." /afs
        mount -t afs "#root.cell." /afs/cambridge
 
-  NB: When using this on Linux 2.4, the mount command has to be different,
-      since the filesystem doesn't have access to the device name argument:
-
-       mount -t afs none /afs -ovol="#root.afs."
-
 Where the initial character is either a hash or a percent symbol depending on
 whether you definitely want a R/W volume (hash) or whether you'd prefer a R/O
 volume, but are willing to use a R/W volume instead (percent).
@@ -60,55 +106,66 @@ named volume will be looked up in the cell specified during insmod.
 Additional cells can be added through /proc (see later section).
 
 
+===========
 MOUNTPOINTS
 ===========
 
-AFS has a concept of mountpoints. These are specially formatted symbolic links
-(of the same form as the "device name" passed to mount). kAFS presents these
-to the user as directories that have special properties:
+AFS has a concept of mountpoints. In AFS terms, these are specially formatted
+symbolic links (of the same form as the "device name" passed to mount).  kAFS
+presents these to the user as directories that have a follow-link capability
+(ie: symbolic link semantics).  If anyone attempts to access them, they will
+automatically cause the target volume to be mounted (if possible) on that site.
 
-  (*) They cannot be listed. Running a program like "ls" on them will incur an
-      EREMOTE error (Object is remote).
+Automatically mounted filesystems will be automatically unmounted approximately
+twenty minutes after they were last used.  Alternatively they can be unmounted
+directly with the umount() system call.
 
-  (*) Other objects can't be looked up inside of them. This also incurs an
-      EREMOTE error.
+Manually unmounting an AFS volume will cause any idle submounts upon it to be
+culled first.  If all are culled, then the requested volume will also be
+unmounted, otherwise error EBUSY will be returned.
 
-  (*) They can be queried with the readlink() system call, which will return
-      the name of the mountpoint to which they point. The "readlink" program
-      will also work.
+This can be used by the administrator to attempt to unmount the whole AFS tree
+mounted on /afs in one go by doing:
 
-  (*) They can be mounted on (which symbolic links can't).
+       umount /afs
 
 
+===============
 PROC FILESYSTEM
 ===============
 
-The rxrpc module creates a number of files in various places in the /proc
-filesystem:
-
-  (*) Firstly, some information files are made available in a directory called
-      "/proc/net/rxrpc/". These list the extant transport endpoint, peer,
-      connection and call records.
-
-  (*) Secondly, some control files are made available in a directory called
-      "/proc/sys/rxrpc/". Currently, all these files can be used for is to
-      turn on various levels of tracing.
-
 The AFS modules creates a "/proc/fs/afs/" directory and populates it:
 
-  (*) A "cells" file that lists cells currently known to the afs module.
+  (*) A "cells" file that lists cells currently known to the afs module and
+      their usage counts:
+
+       [root@andromeda ~]# cat /proc/fs/afs/cells
+       USE NAME
+         3 cambridge.redhat.com
 
   (*) A directory per cell that contains files that list volume location
       servers, volumes, and active servers known within that cell.
 
+       [root@andromeda ~]# cat /proc/fs/afs/cambridge.redhat.com/servers
+       USE ADDR            STATE
+         4 172.16.18.91        0
+       [root@andromeda ~]# cat /proc/fs/afs/cambridge.redhat.com/vlservers
+       ADDRESS
+       172.16.18.91
+       [root@andromeda ~]# cat /proc/fs/afs/cambridge.redhat.com/volumes
+       USE STT VLID[0]  VLID[1]  VLID[2]  NAME
+         1 Val 20000000 20000001 20000002 root.afs
 
+
+=================
 THE CELL DATABASE
 =================
 
-The filesystem maintains an internal database of all the cells it knows and
-the IP addresses of the volume location servers for those cells. The cell to
-which the computer belongs is added to the database when insmod is performed
-by the "rootcell=" argument.
+The filesystem maintains an internal database of all the cells it knows and the
+IP addresses of the volume location servers for those cells.  The cell to which
+the system belongs is added to the database when insmod is performed by the
+"rootcell=" argument or, if compiled in, using a "kafs.rootcell=" argument on
+the kernel command line.
 
 Further cells can be added by commands similar to the following:
 
@@ -118,20 +175,65 @@ Further cells can be added by commands similar to the following:
 No other cell database operations are available at this time.
 
 
+========
+SECURITY
+========
+
+Secure operations are initiated by acquiring a key using the klog program.  A
+very primitive klog program is available at:
+
+       http://people.redhat.com/~dhowells/rxrpc/klog.c
+
+This should be compiled by:
+
+       make klog LDLIBS="-lcrypto -lcrypt -lkrb4 -lkeyutils"
+
+And then run as:
+
+       ./klog
+
+Assuming it's successful, this adds a key of type RxRPC, named for the service
+and cell, eg: "afs@<cellname>".  This can be viewed with the keyctl program or
+by cat'ing /proc/keys:
+
+       [root@andromeda ~]# keyctl show
+       Session Keyring
+              -3 --alswrv      0     0  keyring: _ses.3268
+               2 --alswrv      0     0   \_ keyring: _uid.0
+       111416553 --als--v      0     0   \_ rxrpc: afs@CAMBRIDGE.REDHAT.COM
+
+Currently the username, realm, password and proposed ticket lifetime are
+compiled in to the program.
+
+It is not required to acquire a key before using AFS facilities, but if one is
+not acquired then all operations will be governed by the anonymous user parts
+of the ACLs.
+
+If a key is acquired, then all AFS operations, including mounts and automounts,
+made by a possessor of that key will be secured with that key.
+
+If a file is opened with a particular key and then the file descriptor is
+passed to a process that doesn't have that key (perhaps over an AF_UNIX
+socket), then the operations on the file will be made with key that was used to
+open the file.
+
+
+========
 EXAMPLES
 ========
 
-Here's what I use to test this. Some of the names and IP addresses are local
-to my internal DNS. My "root.afs" partition has a mount point within it for
+Here's what I use to test this.  Some of the names and IP addresses are local
+to my internal DNS.  My "root.afs" partition has a mount point within it for
 some public volumes volumes.
 
-insmod -S /tmp/rxrpc.o 
-insmod -S /tmp/kafs.o rootcell=cambridge.redhat.com:172.16.18.73:172.16.18.91
+insmod /tmp/rxrpc.o
+insmod /tmp/rxkad.o
+insmod /tmp/kafs.o rootcell=cambridge.redhat.com:172.16.18.91
 
 mount -t afs \%root.afs. /afs
 mount -t afs \%cambridge.redhat.com:root.cell. /afs/cambridge.redhat.com/
 
-echo add grand.central.org 18.7.14.88:128.2.191.224 > /proc/fs/afs/cells 
+echo add grand.central.org 18.7.14.88:128.2.191.224 > /proc/fs/afs/cells
 mount -t afs "#grand.central.org:root.cell." /afs/grand.central.org/
 mount -t afs "#grand.central.org:root.archive." /afs/grand.central.org/archive
 mount -t afs "#grand.central.org:root.contrib." /afs/grand.central.org/contrib
@@ -141,15 +243,7 @@ mount -t afs "#grand.central.org:root.service." /afs/grand.central.org/service
 mount -t afs "#grand.central.org:root.software." /afs/grand.central.org/software
 mount -t afs "#grand.central.org:root.user." /afs/grand.central.org/user
 
-umount /afs/grand.central.org/user
-umount /afs/grand.central.org/software
-umount /afs/grand.central.org/service
-umount /afs/grand.central.org/project
-umount /afs/grand.central.org/doc
-umount /afs/grand.central.org/contrib
-umount /afs/grand.central.org/archive
-umount /afs/grand.central.org
-umount /afs/cambridge.redhat.com
 umount /afs
 rmmod kafs
+rmmod rxkad
 rmmod rxrpc
index 5484ab5..7aaf09b 100644 (file)
@@ -1421,6 +1421,15 @@ fewer messages that will be written. Message_burst controls when messages will
 be dropped.  The  default  settings  limit  warning messages to one every five
 seconds.
 
+warnings
+--------
+
+This controls console messages from the networking stack that can occur because
+of problems on the network like duplicate address or bad checksums. Normally,
+this should be enabled, but if the problem persists the messages can be
+disabled.
+
+
 netdev_max_backlog
 ------------------
 
index 60c665d..81d9aa0 100644 (file)
@@ -859,6 +859,18 @@ payload contents" for more information.
        void unregister_key_type(struct key_type *type);
 
 
+Under some circumstances, it may be desirable to desirable to deal with a
+bundle of keys.  The facility provides access to the keyring type for managing
+such a bundle:
+
+       struct key_type key_type_keyring;
+
+This can be used with a function such as request_key() to find a specific
+keyring in a process's keyrings.  A keyring thus found can then be searched
+with keyring_search().  Note that it is not possible to use request_key() to
+search a specific keyring, so using keyrings in this way is of limited utility.
+
+
 ===================================
 NOTES ON ACCESSING PAYLOAD CONTENTS
 ===================================
index de809e5..1da5666 100644 (file)
@@ -920,40 +920,9 @@ options, you may wish to use the "max_bonds" module parameter,
 documented above.
 
        To create multiple bonding devices with differing options, it
-is necessary to load the bonding driver multiple times.  Note that
-current versions of the sysconfig network initialization scripts
-handle this automatically; if your distro uses these scripts, no
-special action is needed.  See the section Configuring Bonding
-Devices, above, if you're not sure about your network initialization
-scripts.
-
-       To load multiple instances of the module, it is necessary to
-specify a different name for each instance (the module loading system
-requires that every loaded module, even multiple instances of the same
-module, have a unique name).  This is accomplished by supplying
-multiple sets of bonding options in /etc/modprobe.conf, for example:
-       
-alias bond0 bonding
-options bond0 -o bond0 mode=balance-rr miimon=100
-
-alias bond1 bonding
-options bond1 -o bond1 mode=balance-alb miimon=50
-
-       will load the bonding module two times.  The first instance is
-named "bond0" and creates the bond0 device in balance-rr mode with an
-miimon of 100.  The second instance is named "bond1" and creates the
-bond1 device in balance-alb mode with an miimon of 50.
-
-       In some circumstances (typically with older distributions),
-the above does not work, and the second bonding instance never sees
-its options.  In that case, the second options line can be substituted
-as follows:
-
-install bond1 /sbin/modprobe --ignore-install bonding -o bond1 \
-       mode=balance-alb miimon=50
+is necessary to use bonding parameters exported by sysfs, documented
+in the section below.
 
-       This may be repeated any number of times, specifying a new and
-unique name in place of bond1 for each subsequent instance.
 
 3.4 Configuring Bonding Manually via Sysfs
 ------------------------------------------
index 387482e..4504cc5 100644 (file)
@@ -57,6 +57,16 @@ DCCP_SOCKOPT_SEND_CSCOV is for the receiver and has a different meaning: it
        coverage value are also acceptable. The higher the number, the more
        restrictive this setting (see [RFC 4340, sec. 9.2.1]).
 
+The following two options apply to CCID 3 exclusively and are getsockopt()-only.
+In either case, a TFRC info struct (defined in <linux/tfrc.h>) is returned.
+DCCP_SOCKOPT_CCID_RX_INFO
+       Returns a `struct tfrc_rx_info' in optval; the buffer for optval and
+       optlen must be set to at least sizeof(struct tfrc_rx_info).
+DCCP_SOCKOPT_CCID_TX_INFO
+       Returns a `struct tfrc_tx_info' in optval; the buffer for optval and
+       optlen must be set to at least sizeof(struct tfrc_tx_info).
+
+
 Sysctl variables
 ================
 Several DCCP default parameters can be managed by the following sysctls
index d3aae1f..af6a63a 100644 (file)
@@ -179,11 +179,31 @@ tcp_fin_timeout - INTEGER
        because they eat maximum 1.5K of memory, but they tend
        to live longer. Cf. tcp_max_orphans.
 
-tcp_frto - BOOLEAN
+tcp_frto - INTEGER
        Enables F-RTO, an enhanced recovery algorithm for TCP retransmission
        timeouts.  It is particularly beneficial in wireless environments
        where packet loss is typically due to random radio interference
-       rather than intermediate router congestion.
+       rather than intermediate router congestion. If set to 1, basic
+       version is enabled. 2 enables SACK enhanced F-RTO, which is
+       EXPERIMENTAL. The basic version can be used also when SACK is
+       enabled for a flow through tcp_sack sysctl.
+
+tcp_frto_response - INTEGER
+       When F-RTO has detected that a TCP retransmission timeout was
+       spurious (i.e, the timeout would have been avoided had TCP set a
+       longer retransmission timeout), TCP has several options what to do
+       next. Possible values are:
+               0 Rate halving based; a smooth and conservative response,
+                 results in halved cwnd and ssthresh after one RTT
+               1 Very conservative response; not recommended because even
+                 though being valid, it interacts poorly with the rest of
+                 Linux TCP, halves cwnd and ssthresh immediately
+               2 Aggressive response; undoes congestion control measures
+                 that are now known to be unnecessary (ignoring the
+                 possibility of a lost retransmission that would require
+                 TCP to be more cautious), cwnd and ssthresh are restored
+                 to the values prior timeout
+       Default: 0 (rate halving based)
 
 tcp_keepalive_time - INTEGER
        How often TCP sends out keepalive messages when keepalive is enabled.
@@ -851,6 +871,15 @@ accept_redirects - BOOLEAN
        Functional default: enabled if local forwarding is disabled.
                            disabled if local forwarding is enabled.
 
+accept_source_route - INTEGER
+       Accept source routing (routing extension header).
+
+       > 0: Accept routing header.
+       = 0: Accept only routing header type 2.
+       < 0: Do not accept routing header.
+
+       Default: 0
+
 autoconf - BOOLEAN
        Autoconfigure addresses using Prefix Information in Router 
        Advertisements.
@@ -986,7 +1015,12 @@ bridge-nf-call-ip6tables - BOOLEAN
        Default: 1
 
 bridge-nf-filter-vlan-tagged - BOOLEAN
-       1 : pass bridged vlan-tagged ARP/IP traffic to arptables/iptables.
+       1 : pass bridged vlan-tagged ARP/IP/IPv6 traffic to {arp,ip,ip6}tables.
+       0 : disable this.
+       Default: 1
+
+bridge-nf-filter-pppoe-tagged - BOOLEAN
+       1 : pass bridged pppoe-tagged IP/IPv6 traffic to {ip,ip6}tables.
        0 : disable this.
        Default: 1
 
diff --git a/Documentation/networking/rxrpc.txt b/Documentation/networking/rxrpc.txt
new file mode 100644 (file)
index 0000000..cae231b
--- /dev/null
@@ -0,0 +1,859 @@
+                           ======================
+                           RxRPC NETWORK PROTOCOL
+                           ======================
+
+The RxRPC protocol driver provides a reliable two-phase transport on top of UDP
+that can be used to perform RxRPC remote operations.  This is done over sockets
+of AF_RXRPC family, using sendmsg() and recvmsg() with control data to send and
+receive data, aborts and errors.
+
+Contents of this document:
+
+ (*) Overview.
+
+ (*) RxRPC protocol summary.
+
+ (*) AF_RXRPC driver model.
+
+ (*) Control messages.
+
+ (*) Socket options.
+
+ (*) Security.
+
+ (*) Example client usage.
+
+ (*) Example server usage.
+
+ (*) AF_RXRPC kernel interface.
+
+
+========
+OVERVIEW
+========
+
+RxRPC is a two-layer protocol.  There is a session layer which provides
+reliable virtual connections using UDP over IPv4 (or IPv6) as the transport
+layer, but implements a real network protocol; and there's the presentation
+layer which renders structured data to binary blobs and back again using XDR
+(as does SunRPC):
+
+               +-------------+
+               | Application |
+               +-------------+
+               |     XDR     |         Presentation
+               +-------------+
+               |    RxRPC    |         Session
+               +-------------+
+               |     UDP     |         Transport
+               +-------------+
+
+
+AF_RXRPC provides:
+
+ (1) Part of an RxRPC facility for both kernel and userspace applications by
+     making the session part of it a Linux network protocol (AF_RXRPC).
+
+ (2) A two-phase protocol.  The client transmits a blob (the request) and then
+     receives a blob (the reply), and the server receives the request and then
+     transmits the reply.
+
+ (3) Retention of the reusable bits of the transport system set up for one call
+     to speed up subsequent calls.
+
+ (4) A secure protocol, using the Linux kernel's key retention facility to
+     manage security on the client end.  The server end must of necessity be
+     more active in security negotiations.
+
+AF_RXRPC does not provide XDR marshalling/presentation facilities.  That is
+left to the application.  AF_RXRPC only deals in blobs.  Even the operation ID
+is just the first four bytes of the request blob, and as such is beyond the
+kernel's interest.
+
+
+Sockets of AF_RXRPC family are:
+
+ (1) created as type SOCK_DGRAM;
+
+ (2) provided with a protocol of the type of underlying transport they're going
+     to use - currently only PF_INET is supported.
+
+
+The Andrew File System (AFS) is an example of an application that uses this and
+that has both kernel (filesystem) and userspace (utility) components.
+
+
+======================
+RXRPC PROTOCOL SUMMARY
+======================
+
+An overview of the RxRPC protocol:
+
+ (*) RxRPC sits on top of another networking protocol (UDP is the only option
+     currently), and uses this to provide network transport.  UDP ports, for
+     example, provide transport endpoints.
+
+ (*) RxRPC supports multiple virtual "connections" from any given transport
+     endpoint, thus allowing the endpoints to be shared, even to the same
+     remote endpoint.
+
+ (*) Each connection goes to a particular "service".  A connection may not go
+     to multiple services.  A service may be considered the RxRPC equivalent of
+     a port number.  AF_RXRPC permits multiple services to share an endpoint.
+
+ (*) Client-originating packets are marked, thus a transport endpoint can be
+     shared between client and server connections (connections have a
+     direction).
+
+ (*) Up to a billion connections may be supported concurrently between one
+     local transport endpoint and one service on one remote endpoint.  An RxRPC
+     connection is described by seven numbers:
+
+       Local address   }
+       Local port      } Transport (UDP) address
+       Remote address  }
+       Remote port     }
+       Direction
+       Connection ID
+       Service ID
+
+ (*) Each RxRPC operation is a "call".  A connection may make up to four
+     billion calls, but only up to four calls may be in progress on a
+     connection at any one time.
+
+ (*) Calls are two-phase and asymmetric: the client sends its request data,
+     which the service receives; then the service transmits the reply data
+     which the client receives.
+
+ (*) The data blobs are of indefinite size, the end of a phase is marked with a
+     flag in the packet.  The number of packets of data making up one blob may
+     not exceed 4 billion, however, as this would cause the sequence number to
+     wrap.
+
+ (*) The first four bytes of the request data are the service operation ID.
+
+ (*) Security is negotiated on a per-connection basis.  The connection is
+     initiated by the first data packet on it arriving.  If security is
+     requested, the server then issues a "challenge" and then the client
+     replies with a "response".  If the response is successful, the security is
+     set for the lifetime of that connection, and all subsequent calls made
+     upon it use that same security.  In the event that the server lets a
+     connection lapse before the client, the security will be renegotiated if
+     the client uses the connection again.
+
+ (*) Calls use ACK packets to handle reliability.  Data packets are also
+     explicitly sequenced per call.
+
+ (*) There are two types of positive acknowledgement: hard-ACKs and soft-ACKs.
+     A hard-ACK indicates to the far side that all the data received to a point
+     has been received and processed; a soft-ACK indicates that the data has
+     been received but may yet be discarded and re-requested.  The sender may
+     not discard any transmittable packets until they've been hard-ACK'd.
+
+ (*) Reception of a reply data packet implicitly hard-ACK's all the data
+     packets that make up the request.
+
+ (*) An call is complete when the request has been sent, the reply has been
+     received and the final hard-ACK on the last packet of the reply has
+     reached the server.
+
+ (*) An call may be aborted by either end at any time up to its completion.
+
+
+=====================
+AF_RXRPC DRIVER MODEL
+=====================
+
+About the AF_RXRPC driver:
+
+ (*) The AF_RXRPC protocol transparently uses internal sockets of the transport
+     protocol to represent transport endpoints.
+
+ (*) AF_RXRPC sockets map onto RxRPC connection bundles.  Actual RxRPC
+     connections are handled transparently.  One client socket may be used to
+     make multiple simultaneous calls to the same service.  One server socket
+     may handle calls from many clients.
+
+ (*) Additional parallel client connections will be initiated to support extra
+     concurrent calls, up to a tunable limit.
+
+ (*) Each connection is retained for a certain amount of time [tunable] after
+     the last call currently using it has completed in case a new call is made
+     that could reuse it.
+
+ (*) Each internal UDP socket is retained [tunable] for a certain amount of
+     time [tunable] after the last connection using it discarded, in case a new
+     connection is made that could use it.
+
+ (*) A client-side connection is only shared between calls if they have have
+     the same key struct describing their security (and assuming the calls
+     would otherwise share the connection).  Non-secured calls would also be
+     able to share connections with each other.
+
+ (*) A server-side connection is shared if the client says it is.
+
+ (*) ACK'ing is handled by the protocol driver automatically, including ping
+     replying.
+
+ (*) SO_KEEPALIVE automatically pings the other side to keep the connection
+     alive [TODO].
+
+ (*) If an ICMP error is received, all calls affected by that error will be
+     aborted with an appropriate network error passed through recvmsg().
+
+
+Interaction with the user of the RxRPC socket:
+
+ (*) A socket is made into a server socket by binding an address with a
+     non-zero service ID.
+
+ (*) In the client, sending a request is achieved with one or more sendmsgs,
+     followed by the reply being received with one or more recvmsgs.
+
+ (*) The first sendmsg for a request to be sent from a client contains a tag to
+     be used in all other sendmsgs or recvmsgs associated with that call.  The
+     tag is carried in the control data.
+
+ (*) connect() is used to supply a default destination address for a client
+     socket.  This may be overridden by supplying an alternate address to the
+     first sendmsg() of a call (struct msghdr::msg_name).
+
+ (*) If connect() is called on an unbound client, a random local port will
+     bound before the operation takes place.
+
+ (*) A server socket may also be used to make client calls.  To do this, the
+     first sendmsg() of the call must specify the target address.  The server's
+     transport endpoint is used to send the packets.
+
+ (*) Once the application has received the last message associated with a call,
+     the tag is guaranteed not to be seen again, and so it can be used to pin
+     client resources.  A new call can then be initiated with the same tag
+     without fear of interference.
+
+ (*) In the server, a request is received with one or more recvmsgs, then the
+     the reply is transmitted with one or more sendmsgs, and then the final ACK
+     is received with a last recvmsg.
+
+ (*) When sending data for a call, sendmsg is given MSG_MORE if there's more
+     data to come on that call.
+
+ (*) When receiving data for a call, recvmsg flags MSG_MORE if there's more
+     data to come for that call.
+
+ (*) When receiving data or messages for a call, MSG_EOR is flagged by recvmsg
+     to indicate the terminal message for that call.
+
+ (*) A call may be aborted by adding an abort control message to the control
+     data.  Issuing an abort terminates the kernel's use of that call's tag.
+     Any messages waiting in the receive queue for that call will be discarded.
+
+ (*) Aborts, busy notifications and challenge packets are delivered by recvmsg,
+     and control data messages will be set to indicate the context.  Receiving
+     an abort or a busy message terminates the kernel's use of that call's tag.
+
+ (*) The control data part of the msghdr struct is used for a number of things:
+
+     (*) The tag of the intended or affected call.
+
+     (*) Sending or receiving errors, aborts and busy notifications.
+
+     (*) Notifications of incoming calls.
+
+     (*) Sending debug requests and receiving debug replies [TODO].
+
+ (*) When the kernel has received and set up an incoming call, it sends a
+     message to server application to let it know there's a new call awaiting
+     its acceptance [recvmsg reports a special control message].  The server
+     application then uses sendmsg to assign a tag to the new call.  Once that
+     is done, the first part of the request data will be delivered by recvmsg.
+
+ (*) The server application has to provide the server socket with a keyring of
+     secret keys corresponding to the security types it permits.  When a secure
+     connection is being set up, the kernel looks up the appropriate secret key
+     in the keyring and then sends a challenge packet to the client and
+     receives a response packet.  The kernel then checks the authorisation of
+     the packet and either aborts the connection or sets up the security.
+
+ (*) The name of the key a client will use to secure its communications is
+     nominated by a socket option.
+
+
+Notes on recvmsg:
+
+ (*) If there's a sequence of data messages belonging to a particular call on
+     the receive queue, then recvmsg will keep working through them until:
+
+     (a) it meets the end of that call's received data,
+
+     (b) it meets a non-data message,
+
+     (c) it meets a message belonging to a different call, or
+
+     (d) it fills the user buffer.
+
+     If recvmsg is called in blocking mode, it will keep sleeping, awaiting the
+     reception of further data, until one of the above four conditions is met.
+
+ (2) MSG_PEEK operates similarly, but will return immediately if it has put any
+     data in the buffer rather than sleeping until it can fill the buffer.
+
+ (3) If a data message is only partially consumed in filling a user buffer,
+     then the remainder of that message will be left on the front of the queue
+     for the next taker.  MSG_TRUNC will never be flagged.
+
+ (4) If there is more data to be had on a call (it hasn't copied the last byte
+     of the last data message in that phase yet), then MSG_MORE will be
+     flagged.
+
+
+================
+CONTROL MESSAGES
+================
+
+AF_RXRPC makes use of control messages in sendmsg() and recvmsg() to multiplex
+calls, to invoke certain actions and to report certain conditions.  These are:
+
+       MESSAGE ID              SRT DATA        MEANING
+       ======================= === =========== ===============================
+       RXRPC_USER_CALL_ID      sr- User ID     App's call specifier
+       RXRPC_ABORT             srt Abort code  Abort code to issue/received
+       RXRPC_ACK               -rt n/a         Final ACK received
+       RXRPC_NET_ERROR         -rt error num   Network error on call
+       RXRPC_BUSY              -rt n/a         Call rejected (server busy)
+       RXRPC_LOCAL_ERROR       -rt error num   Local error encountered
+       RXRPC_NEW_CALL          -r- n/a         New call received
+       RXRPC_ACCEPT            s-- n/a         Accept new call
+
+       (SRT = usable in Sendmsg / delivered by Recvmsg / Terminal message)
+
+ (*) RXRPC_USER_CALL_ID
+
+     This is used to indicate the application's call ID.  It's an unsigned long
+     that the app specifies in the client by attaching it to the first data
+     message or in the server by passing it in association with an RXRPC_ACCEPT
+     message.  recvmsg() passes it in conjunction with all messages except
+     those of the RXRPC_NEW_CALL message.
+
+ (*) RXRPC_ABORT
+
+     This is can be used by an application to abort a call by passing it to
+     sendmsg, or it can be delivered by recvmsg to indicate a remote abort was
+     received.  Either way, it must be associated with an RXRPC_USER_CALL_ID to
+     specify the call affected.  If an abort is being sent, then error EBADSLT
+     will be returned if there is no call with that user ID.
+
+ (*) RXRPC_ACK
+
+     This is delivered to a server application to indicate that the final ACK
+     of a call was received from the client.  It will be associated with an
+     RXRPC_USER_CALL_ID to indicate the call that's now complete.
+
+ (*) RXRPC_NET_ERROR
+
+     This is delivered to an application to indicate that an ICMP error message
+     was encountered in the process of trying to talk to the peer.  An
+     errno-class integer value will be included in the control message data
+     indicating the problem, and an RXRPC_USER_CALL_ID will indicate the call
+     affected.
+
+ (*) RXRPC_BUSY
+
+     This is delivered to a client application to indicate that a call was
+     rejected by the server due to the server being busy.  It will be
+     associated with an RXRPC_USER_CALL_ID to indicate the rejected call.
+
+ (*) RXRPC_LOCAL_ERROR
+
+     This is delivered to an application to indicate that a local error was
+     encountered and that a call has been aborted because of it.  An
+     errno-class integer value will be included in the control message data
+     indicating the problem, and an RXRPC_USER_CALL_ID will indicate the call
+     affected.
+
+ (*) RXRPC_NEW_CALL
+
+     This is delivered to indicate to a server application that a new call has
+     arrived and is awaiting acceptance.  No user ID is associated with this,
+     as a user ID must subsequently be assigned by doing an RXRPC_ACCEPT.
+
+ (*) RXRPC_ACCEPT
+
+     This is used by a server application to attempt to accept a call and
+     assign it a user ID.  It should be associated with an RXRPC_USER_CALL_ID
+     to indicate the user ID to be assigned.  If there is no call to be
+     accepted (it may have timed out, been aborted, etc.), then sendmsg will
+     return error ENODATA.  If the user ID is already in use by another call,
+     then error EBADSLT will be returned.
+
+
+==============
+SOCKET OPTIONS
+==============
+
+AF_RXRPC sockets support a few socket options at the SOL_RXRPC level:
+
+ (*) RXRPC_SECURITY_KEY
+
+     This is used to specify the description of the key to be used.  The key is
+     extracted from the calling process's keyrings with request_key() and
+     should be of "rxrpc" type.
+
+     The optval pointer points to the description string, and optlen indicates
+     how long the string is, without the NUL terminator.
+
+ (*) RXRPC_SECURITY_KEYRING
+
+     Similar to above but specifies a keyring of server secret keys to use (key
+     type "keyring").  See the "Security" section.
+
+ (*) RXRPC_EXCLUSIVE_CONNECTION
+
+     This is used to request that new connections should be used for each call
+     made subsequently on this socket.  optval should be NULL and optlen 0.
+
+ (*) RXRPC_MIN_SECURITY_LEVEL
+
+     This is used to specify the minimum security level required for calls on
+     this socket.  optval must point to an int containing one of the following
+     values:
+
+     (a) RXRPC_SECURITY_PLAIN
+
+        Encrypted checksum only.
+
+     (b) RXRPC_SECURITY_AUTH
+
+        Encrypted checksum plus packet padded and first eight bytes of packet
+        encrypted - which includes the actual packet length.
+
+     (c) RXRPC_SECURITY_ENCRYPTED
+
+        Encrypted checksum plus entire packet padded and encrypted, including
+        actual packet length.
+
+
+========
+SECURITY
+========
+
+Currently, only the kerberos 4 equivalent protocol has been implemented
+(security index 2 - rxkad).  This requires the rxkad module to be loaded and,
+on the client, tickets of the appropriate type to be obtained from the AFS
+kaserver or the kerberos server and installed as "rxrpc" type keys.  This is
+normally done using the klog program.  An example simple klog program can be
+found at:
+
+       http://people.redhat.com/~dhowells/rxrpc/klog.c
+
+The payload provided to add_key() on the client should be of the following
+form:
+
+       struct rxrpc_key_sec2_v1 {
+               uint16_t        security_index; /* 2 */
+               uint16_t        ticket_length;  /* length of ticket[] */
+               uint32_t        expiry;         /* time at which expires */
+               uint8_t         kvno;           /* key version number */
+               uint8_t         __pad[3];
+               uint8_t         session_key[8]; /* DES session key */
+               uint8_t         ticket[0];      /* the encrypted ticket */
+       };
+
+Where the ticket blob is just appended to the above structure.
+
+
+For the server, keys of type "rxrpc_s" must be made available to the server.
+They have a description of "<serviceID>:<securityIndex>" (eg: "52:2" for an
+rxkad key for the AFS VL service).  When such a key is created, it should be
+given the server's secret key as the instantiation data (see the example
+below).
+
+       add_key("rxrpc_s", "52:2", secret_key, 8, keyring);
+
+A keyring is passed to the server socket by naming it in a sockopt.  The server
+socket then looks the server secret keys up in this keyring when secure
+incoming connections are made.  This can be seen in an example program that can
+be found at:
+
+       http://people.redhat.com/~dhowells/rxrpc/listen.c
+
+
+====================
+EXAMPLE CLIENT USAGE
+====================
+
+A client would issue an operation by:
+
+ (1) An RxRPC socket is set up by:
+
+       client = socket(AF_RXRPC, SOCK_DGRAM, PF_INET);
+
+     Where the third parameter indicates the protocol family of the transport
+     socket used - usually IPv4 but it can also be IPv6 [TODO].
+
+ (2) A local address can optionally be bound:
+
+       struct sockaddr_rxrpc srx = {
+               .srx_family     = AF_RXRPC,
+               .srx_service    = 0,  /* we're a client */
+               .transport_type = SOCK_DGRAM,   /* type of transport socket */
+               .transport.sin_family   = AF_INET,
+               .transport.sin_port     = htons(7000), /* AFS callback */
+               .transport.sin_address  = 0,  /* all local interfaces */
+       };
+       bind(client, &srx, sizeof(srx));
+
+     This specifies the local UDP port to be used.  If not given, a random
+     non-privileged port will be used.  A UDP port may be shared between
+     several unrelated RxRPC sockets.  Security is handled on a basis of
+     per-RxRPC virtual connection.
+
+ (3) The security is set:
+
+       const char *key = "AFS:cambridge.redhat.com";
+       setsockopt(client, SOL_RXRPC, RXRPC_SECURITY_KEY, key, strlen(key));
+
+     This issues a request_key() to get the key representing the security
+     context.  The minimum security level can be set:
+
+       unsigned int sec = RXRPC_SECURITY_ENCRYPTED;
+       setsockopt(client, SOL_RXRPC, RXRPC_MIN_SECURITY_LEVEL,
+                  &sec, sizeof(sec));
+
+ (4) The server to be contacted can then be specified (alternatively this can
+     be done through sendmsg):
+
+       struct sockaddr_rxrpc srx = {
+               .srx_family     = AF_RXRPC,
+               .srx_service    = VL_SERVICE_ID,
+               .transport_type = SOCK_DGRAM,   /* type of transport socket */
+               .transport.sin_family   = AF_INET,
+               .transport.sin_port     = htons(7005), /* AFS volume manager */
+               .transport.sin_address  = ...,
+       };
+       connect(client, &srx, sizeof(srx));
+
+ (5) The request data should then be posted to the server socket using a series
+     of sendmsg() calls, each with the following control message attached:
+
+       RXRPC_USER_CALL_ID      - specifies the user ID for this call
+
+     MSG_MORE should be set in msghdr::msg_flags on all but the last part of
+     the request.  Multiple requests may be made simultaneously.
+
+     If a call is intended to go to a destination other then the default
+     specified through connect(), then msghdr::msg_name should be set on the
+     first request message of that call.
+
+ (6) The reply data will then be posted to the server socket for recvmsg() to
+     pick up.  MSG_MORE will be flagged by recvmsg() if there's more reply data
+     for a particular call to be read.  MSG_EOR will be set on the terminal
+     read for a call.
+
+     All data will be delivered with the following control message attached:
+
+       RXRPC_USER_CALL_ID      - specifies the user ID for this call
+
+     If an abort or error occurred, this will be returned in the control data
+     buffer instead, and MSG_EOR will be flagged to indicate the end of that
+     call.
+
+
+====================
+EXAMPLE SERVER USAGE
+====================
+
+A server would be set up to accept operations in the following manner:
+
+ (1) An RxRPC socket is created by:
+
+       server = socket(AF_RXRPC, SOCK_DGRAM, PF_INET);
+
+     Where the third parameter indicates the address type of the transport
+     socket used - usually IPv4.
+
+ (2) Security is set up if desired by giving the socket a keyring with server
+     secret keys in it:
+
+       keyring = add_key("keyring", "AFSkeys", NULL, 0,
+                         KEY_SPEC_PROCESS_KEYRING);
+
+       const char secret_key[8] = {
+               0xa7, 0x83, 0x8a, 0xcb, 0xc7, 0x83, 0xec, 0x94 };
+       add_key("rxrpc_s", "52:2", secret_key, 8, keyring);
+
+       setsockopt(server, SOL_RXRPC, RXRPC_SECURITY_KEYRING, "AFSkeys", 7);
+
+     The keyring can be manipulated after it has been given to the socket. This
+     permits the server to add more keys, replace keys, etc. whilst it is live.
+
+ (2) A local address must then be bound:
+
+       struct sockaddr_rxrpc srx = {
+               .srx_family     = AF_RXRPC,
+               .srx_service    = VL_SERVICE_ID, /* RxRPC service ID */
+               .transport_type = SOCK_DGRAM,   /* type of transport socket */
+               .transport.sin_family   = AF_INET,
+               .transport.sin_port     = htons(7000), /* AFS callback */
+               .transport.sin_address  = 0,  /* all local interfaces */
+       };
+       bind(server, &srx, sizeof(srx));
+
+ (3) The server is then set to listen out for incoming calls:
+
+       listen(server, 100);
+
+ (4) The kernel notifies the server of pending incoming connections by sending
+     it a message for each.  This is received with recvmsg() on the server
+     socket.  It has no data, and has a single dataless control message
+     attached:
+
+       RXRPC_NEW_CALL
+
+     The address that can be passed back by recvmsg() at this point should be
+     ignored since the call for which the message was posted may have gone by
+     the time it is accepted - in which case the first call still on the queue
+     will be accepted.
+
+ (5) The server then accepts the new call by issuing a sendmsg() with two
+     pieces of control data and no actual data:
+
+       RXRPC_ACCEPT            - indicate connection acceptance
+       RXRPC_USER_CALL_ID      - specify user ID for this call
+
+ (6) The first request data packet will then be posted to the server socket for
+     recvmsg() to pick up.  At that point, the RxRPC address for the call can
+     be read from the address fields in the msghdr struct.
+
+     Subsequent request data will be posted to the server socket for recvmsg()
+     to collect as it arrives.  All but the last piece of the request data will
+     be delivered with MSG_MORE flagged.
+
+     All data will be delivered with the following control message attached:
+
+       RXRPC_USER_CALL_ID      - specifies the user ID for this call
+
+ (8) The reply data should then be posted to the server socket using a series
+     of sendmsg() calls, each with the following control messages attached:
+
+       RXRPC_USER_CALL_ID      - specifies the user ID for this call
+
+     MSG_MORE should be set in msghdr::msg_flags on all but the last message
+     for a particular call.
+
+ (9) The final ACK from the client will be posted for retrieval by recvmsg()
+     when it is received.  It will take the form of a dataless message with two
+     control messages attached:
+
+       RXRPC_USER_CALL_ID      - specifies the user ID for this call
+       RXRPC_ACK               - indicates final ACK (no data)
+
+     MSG_EOR will be flagged to indicate that this is the final message for
+     this call.
+
+(10) Up to the point the final packet of reply data is sent, the call can be
+     aborted by calling sendmsg() with a dataless message with the following
+     control messages attached:
+
+       RXRPC_USER_CALL_ID      - specifies the user ID for this call
+       RXRPC_ABORT             - indicates abort code (4 byte data)
+
+     Any packets waiting in the socket's receive queue will be discarded if
+     this is issued.
+
+Note that all the communications for a particular service take place through
+the one server socket, using control messages on sendmsg() and recvmsg() to
+determine the call affected.
+
+
+=========================
+AF_RXRPC KERNEL INTERFACE
+=========================
+
+The AF_RXRPC module also provides an interface for use by in-kernel utilities
+such as the AFS filesystem.  This permits such a utility to:
+
+ (1) Use different keys directly on individual client calls on one socket
+     rather than having to open a whole slew of sockets, one for each key it
+     might want to use.
+
+ (2) Avoid having RxRPC call request_key() at the point of issue of a call or
+     opening of a socket.  Instead the utility is responsible for requesting a
+     key at the appropriate point.  AFS, for instance, would do this during VFS
+     operations such as open() or unlink().  The key is then handed through
+     when the call is initiated.
+
+ (3) Request the use of something other than GFP_KERNEL to allocate memory.
+
+ (4) Avoid the overhead of using the recvmsg() call.  RxRPC messages can be
+     intercepted before they get put into the socket Rx queue and the socket
+     buffers manipulated directly.
+
+To use the RxRPC facility, a kernel utility must still open an AF_RXRPC socket,
+bind an addess as appropriate and listen if it's to be a server socket, but
+then it passes this to the kernel interface functions.
+
+The kernel interface functions are as follows:
+
+ (*) Begin a new client call.
+
+       struct rxrpc_call *
+       rxrpc_kernel_begin_call(struct socket *sock,
+                               struct sockaddr_rxrpc *srx,
+                               struct key *key,
+                               unsigned long user_call_ID,
+                               gfp_t gfp);
+
+     This allocates the infrastructure to make a new RxRPC call and assigns
+     call and connection numbers.  The call will be made on the UDP port that
+     the socket is bound to.  The call will go to the destination address of a
+     connected client socket unless an alternative is supplied (srx is
+     non-NULL).
+
+     If a key is supplied then this will be used to secure the call instead of
+     the key bound to the socket with the RXRPC_SECURITY_KEY sockopt.  Calls
+     secured in this way will still share connections if at all possible.
+
+     The user_call_ID is equivalent to that supplied to sendmsg() in the
+     control data buffer.  It is entirely feasible to use this to point to a
+     kernel data structure.
+
+     If this function is successful, an opaque reference to the RxRPC call is
+     returned.  The caller now holds a reference on this and it must be
+     properly ended.
+
+ (*) End a client call.
+
+       void rxrpc_kernel_end_call(struct rxrpc_call *call);
+
+     This is used to end a previously begun call.  The user_call_ID is expunged
+     from AF_RXRPC's knowledge and will not be seen again in association with
+     the specified call.
+
+ (*) Send data through a call.
+
+       int rxrpc_kernel_send_data(struct rxrpc_call *call, struct msghdr *msg,
+                                  size_t len);
+
+     This is used to supply either the request part of a client call or the
+     reply part of a server call.  msg.msg_iovlen and msg.msg_iov specify the
+     data buffers to be used.  msg_iov may not be NULL and must point
+     exclusively to in-kernel virtual addresses.  msg.msg_flags may be given
+     MSG_MORE if there will be subsequent data sends for this call.
+
+     The msg must not specify a destination address, control data or any flags
+     other than MSG_MORE.  len is the total amount of data to transmit.
+
+ (*) Abort a call.
+
+       void rxrpc_kernel_abort_call(struct rxrpc_call *call, u32 abort_code);
+
+     This is used to abort a call if it's still in an abortable state.  The
+     abort code specified will be placed in the ABORT message sent.
+
+ (*) Intercept received RxRPC messages.
+
+       typedef void (*rxrpc_interceptor_t)(struct sock *sk,
+                                           unsigned long user_call_ID,
+                                           struct sk_buff *skb);
+
+       void
+       rxrpc_kernel_intercept_rx_messages(struct socket *sock,
+                                          rxrpc_interceptor_t interceptor);
+
+     This installs an interceptor function on the specified AF_RXRPC socket.
+     All messages that would otherwise wind up in the socket's Rx queue are
+     then diverted to this function.  Note that care must be taken to process
+     the messages in the right order to maintain DATA message sequentiality.
+
+     The interceptor function itself is provided with the address of the socket
+     and handling the incoming message, the ID assigned by the kernel utility
+     to the call and the socket buffer containing the message.
+
+     The skb->mark field indicates the type of message:
+
+       MARK                            MEANING
+       =============================== =======================================
+       RXRPC_SKB_MARK_DATA             Data message
+       RXRPC_SKB_MARK_FINAL_ACK        Final ACK received for an incoming call
+       RXRPC_SKB_MARK_BUSY             Client call rejected as server busy
+       RXRPC_SKB_MARK_REMOTE_ABORT     Call aborted by peer
+       RXRPC_SKB_MARK_NET_ERROR        Network error detected
+       RXRPC_SKB_MARK_LOCAL_ERROR      Local error encountered
+       RXRPC_SKB_MARK_NEW_CALL         New incoming call awaiting acceptance
+
+     The remote abort message can be probed with rxrpc_kernel_get_abort_code().
+     The two error messages can be probed with rxrpc_kernel_get_error_number().
+     A new call can be accepted with rxrpc_kernel_accept_call().
+
+     Data messages can have their contents extracted with the usual bunch of
+     socket buffer manipulation functions.  A data message can be determined to
+     be the last one in a sequence with rxrpc_kernel_is_data_last().  When a
+     data message has been used up, rxrpc_kernel_data_delivered() should be
+     called on it..
+
+     Non-data messages should be handled to rxrpc_kernel_free_skb() to dispose
+     of.  It is possible to get extra refs on all types of message for later
+     freeing, but this may pin the state of a call until the message is finally
+     freed.
+
+ (*) Accept an incoming call.
+
+       struct rxrpc_call *
+       rxrpc_kernel_accept_call(struct socket *sock,
+                                unsigned long user_call_ID);
+
+     This is used to accept an incoming call and to assign it a call ID.  This
+     function is similar to rxrpc_kernel_begin_call() and calls accepted must
+     be ended in the same way.
+
+     If this function is successful, an opaque reference to the RxRPC call is
+     returned.  The caller now holds a reference on this and it must be
+     properly ended.
+
+ (*) Reject an incoming call.
+
+       int rxrpc_kernel_reject_call(struct socket *sock);
+
+     This is used to reject the first incoming call on the socket's queue with
+     a BUSY message.  -ENODATA is returned if there were no incoming calls.
+     Other errors may be returned if the call had been aborted (-ECONNABORTED)
+     or had timed out (-ETIME).
+
+ (*) Record the delivery of a data message and free it.
+
+       void rxrpc_kernel_data_delivered(struct sk_buff *skb);
+
+     This is used to record a data message as having been delivered and to
+     update the ACK state for the call.  The socket buffer will be freed.
+
+ (*) Free a message.
+
+       void rxrpc_kernel_free_skb(struct sk_buff *skb);
+
+     This is used to free a non-DATA socket buffer intercepted from an AF_RXRPC
+     socket.
+
+ (*) Determine if a data message is the last one on a call.
+
+       bool rxrpc_kernel_is_data_last(struct sk_buff *skb);
+
+     This is used to determine if a socket buffer holds the last data message
+     to be received for a call (true will be returned if it does, false
+     if not).
+
+     The data message will be part of the reply on a client call and the
+     request on an incoming call.  In the latter case there will be more
+     messages, but in the former case there will not.
+
+ (*) Get the abort code from an abort message.
+
+       u32 rxrpc_kernel_get_abort_code(struct sk_buff *skb);
+
+     This is used to extract the abort code from a remote abort message.
+
+ (*) Get the error number from a local or network error message.
+
+       int rxrpc_kernel_get_error_number(struct sk_buff *skb);
+
+     This is used to extract the error number from a message indicating either
+     a local error occurred or a network error occurred.
index 653978d..07dd6d9 100644 (file)
@@ -250,7 +250,6 @@ PRODUCT COMPONENTS AND RELATED FILES
        sdladrv.h       SDLA support module API definitions
        sdlasfm.h       SDLA firmware module definitions
        if_wanpipe.h    WANPIPE Socket definitions
-       if_wanpipe_common.h     WANPIPE Socket/Driver common definitions.
        sdlapci.h       WANPIPE PCI definitions
        
 
diff --git a/Documentation/s390/crypto/crypto-API.txt b/Documentation/s390/crypto/crypto-API.txt
deleted file mode 100644 (file)
index 71ae6ca..0000000
+++ /dev/null
@@ -1,83 +0,0 @@
-crypto-API support for z990 Message Security Assist (MSA) instructions
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-AUTHOR:        Thomas Spatzier (tspat@de.ibm.com)
-
-
-1. Introduction crypto-API
-~~~~~~~~~~~~~~~~~~~~~~~~~~
-See Documentation/crypto/api-intro.txt for an introduction/description of the
-kernel crypto API.
-According to api-intro.txt support for z990 crypto instructions has been added
-in the algorithm api layer of the crypto API. Several files containing z990
-optimized implementations of crypto algorithms are placed in the
-arch/s390/crypto directory.
-
-
-2. Probing for availability of MSA
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-It should be possible to use Kernels with the z990 crypto implementations both
-on machines with MSA available and on those without MSA (pre z990 or z990
-without MSA). Therefore a simple probing mechanism has been implemented:
-In the init function of each crypto module the availability of MSA and of the
-respective crypto algorithm in particular will be tested. If the algorithm is
-available the module will load and register its algorithm with the crypto API.
-
-If the respective crypto algorithm is not available, the init function will
-return -ENOSYS. In that case a fallback to the standard software implementation
-of the crypto algorithm must be taken ( -> the standard crypto modules are
-also built when compiling the kernel).
-
-
-3. Ensuring z990 crypto module preference
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-If z990 crypto instructions are available the optimized modules should be
-preferred instead of standard modules.
-
-3.1. compiled-in modules
-~~~~~~~~~~~~~~~~~~~~~~~~
-For compiled-in modules it has to be ensured that the z990 modules are linked
-before the standard crypto modules. Then, on system startup the init functions
-of z990 crypto modules will be called first and query for availability of z990
-crypto instructions. If instruction is available, the z990 module will register
-its crypto algorithm implementation -> the load of the standard module will fail
-since the algorithm is already registered.
-If z990 crypto instruction is not available the load of the z990 module will
-fail -> the standard module will load and register its algorithm.
-
-3.2. dynamic modules
-~~~~~~~~~~~~~~~~~~~~
-A system administrator has to take care of giving preference to z990 crypto
-modules. If MSA is available appropriate lines have to be added to
-/etc/modprobe.conf.
-
-Example:       z990 crypto instruction for SHA1 algorithm is available
-
-               add the following line to /etc/modprobe.conf (assuming the
-               z990 crypto modules for SHA1 is called sha1_z990):
-
-               alias sha1 sha1_z990
-
-               -> when the sha1 algorithm is requested through the crypto API
-               (which has a module autoloader) the z990 module will be loaded.
-
-TBD:   a userspace module probing mechanism
-       something like 'probe sha1 sha1_z990 sha1' in modprobe.conf
-       -> try module sha1_z990, if it fails to load standard module sha1
-       the 'probe' statement is currently not supported in modprobe.conf
-
-
-4. Currently implemented z990 crypto algorithms
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-The following crypto algorithms with z990 MSA support are currently implemented.
-The name of each algorithm under which it is registered in crypto API and the
-name of the respective module is given in square brackets.
-
-- SHA1 Digest Algorithm [sha1 -> sha1_z990]
-- DES Encrypt/Decrypt Algorithm (64bit key) [des -> des_z990]
-- Triple DES Encrypt/Decrypt Algorithm (128bit key) [des3_ede128 -> des_z990]
-- Triple DES Encrypt/Decrypt Algorithm (192bit key) [des3_ede -> des_z990]
-
-In order to load, for example, the sha1_z990 module when the sha1 algorithm is
-requested (see 3.2.) add 'alias sha1 sha1_z990' to /etc/modprobe.conf.
-
diff --git a/Documentation/s390/zfcpdump.txt b/Documentation/s390/zfcpdump.txt
new file mode 100644 (file)
index 0000000..cf45d27
--- /dev/null
@@ -0,0 +1,87 @@
+s390 SCSI dump tool (zfcpdump)
+
+System z machines (z900 or higher) provide hardware support for creating system
+dumps on SCSI disks. The dump process is initiated by booting a dump tool, which
+has to create a dump of the current (probably crashed) Linux image. In order to
+not overwrite memory of the crashed Linux with data of the dump tool, the
+hardware saves some memory plus the register sets of the boot cpu before the
+dump tool is loaded. There exists an SCLP hardware interface to obtain the saved
+memory afterwards. Currently 32 MB are saved.
+
+This zfcpdump implementation consists of a Linux dump kernel together with
+a userspace dump tool, which are loaded together into the saved memory region
+below 32 MB. zfcpdump is installed on a SCSI disk using zipl (as contained in
+the s390-tools package) to make the device bootable. The operator of a Linux
+system can then trigger a SCSI dump by booting the SCSI disk, where zfcpdump
+resides on.
+
+The kernel part of zfcpdump is implemented as a debugfs file under "zcore/mem",
+which exports memory and registers of the crashed Linux in an s390
+standalone dump format. It can be used in the same way as e.g. /dev/mem. The
+dump format defines a 4K header followed by plain uncompressed memory. The
+register sets are stored in the prefix pages of the respective cpus. To build a
+dump enabled kernel with the zcore driver, the kernel config option
+CONFIG_ZFCPDUMP has to be set. When reading from "zcore/mem", the part of
+memory, which has been saved by hardware is read by the driver via the SCLP
+hardware interface. The second part is just copied from the non overwritten real
+memory.
+
+The userspace application of zfcpdump can reside e.g. in an intitramfs or an
+initrd. It reads from zcore/mem and writes the system dump to a file on a
+SCSI disk.
+
+To build a zfcpdump kernel use the following settings in your kernel
+configuration:
+ * CONFIG_ZFCPDUMP=y
+ * Enable ZFCP driver
+ * Enable SCSI driver
+ * Enable ext2 and ext3 filesystems
+ * Disable as many features as possible to keep the kernel small.
+   E.g. network support is not needed at all.
+
+To use the zfcpdump userspace application in an initramfs you have to do the
+following:
+
+ * Copy the zfcpdump executable somewhere into your Linux tree.
+   E.g. to "arch/s390/boot/zfcpdump. If you do not want to include
+   shared libraries, compile the tool with the "-static" gcc option.
+ * If you want to include e2fsck, add it to your source tree, too. The zfcpdump
+   application attempts to start /sbin/e2fsck from the ramdisk.
+ * Use an initramfs config file like the following:
+
+   dir /dev 755 0 0
+   nod /dev/console 644 0 0 c 5 1
+   nod /dev/null 644 0 0 c 1 3
+   nod /dev/sda1 644 0 0 b 8 1
+   nod /dev/sda2 644 0 0 b 8 2
+   nod /dev/sda3 644 0 0 b 8 3
+   nod /dev/sda4 644 0 0 b 8 4
+   nod /dev/sda5 644 0 0 b 8 5
+   nod /dev/sda6 644 0 0 b 8 6
+   nod /dev/sda7 644 0 0 b 8 7
+   nod /dev/sda8 644 0 0 b 8 8
+   nod /dev/sda9 644 0 0 b 8 9
+   nod /dev/sda10 644 0 0 b 8 10
+   nod /dev/sda11 644 0 0 b 8 11
+   nod /dev/sda12 644 0 0 b 8 12
+   nod /dev/sda13 644 0 0 b 8 13
+   nod /dev/sda14 644 0 0 b 8 14
+   nod /dev/sda15 644 0 0 b 8 15
+   file /init arch/s390/boot/zfcpdump 755 0 0
+   file /sbin/e2fsck arch/s390/boot/e2fsck 755 0 0
+   dir /proc 755 0 0
+   dir /sys 755 0 0
+   dir /mnt 755 0 0
+   dir /sbin 755 0 0
+
+ * Issue "make image" to build the zfcpdump image with initramfs.
+
+In a Linux distribution the zfcpdump enabled kernel image must be copied to
+/usr/share/zfcpdump/zfcpdump.image, where the s390 zipl tool is looking for the
+dump kernel when preparing a SCSI dump disk.
+
+If you use a ramdisk copy it to "/usr/share/zfcpdump/zfcpdump.rd".
+
+For more information on how to use zfcpdump refer to the s390 'Using the Dump
+Tools book', which is available from
+http://www.ibm.com/developerworks/linux/linux390.
index 625a21d..85f51e5 100644 (file)
@@ -293,7 +293,3 @@ Debugging
                        stuck (default)
 
 Miscellaneous
-
-  noreplacement  Don't replace instructions with more appropriate ones
-                for the CPU. This may be useful on asymmetric MP systems
-                where some CPUs have less capabilities than others.
index ef84419..f56c7e1 100644 (file)
@@ -384,7 +384,7 @@ S:  Supported
 
 APPLETALK NETWORK LAYER
 P:     Arnaldo Carvalho de Melo
-M:     acme@conectiva.com.br
+M:     acme@ghostprotocols.net
 S:     Maintained
 
 ARC FRAMEBUFFER DRIVER
@@ -656,6 +656,7 @@ S:  Supported
 ATMEL WIRELESS DRIVER
 P:     Simon Kelley
 M:     simon@thekelleys.org.uk
+L:     linux-wireless@vger.kernel.org
 W:     http://www.thekelleys.org.uk/atmel
 W:     http://atmelwlandriver.sourceforge.net/
 S:     Maintained
@@ -711,6 +712,7 @@ P:  Larry Finger
 M:     Larry.Finger@lwfinger.net
 P:     Stefano Brivio
 M:     st3@riseup.net
+L:     linux-wireless@vger.kernel.org
 W:     http://bcm43xx.berlios.de/
 S:     Maintained
 
@@ -892,6 +894,12 @@ M: maxextreme@gmail.com
 L:     linux-kernel@vger.kernel.org
 S:     Maintained
 
+CFG80211 and NL80211
+P:     Johannes Berg
+M:     johannes@sipsolutions.net
+L:     linux-wireless@vger.kernel.org
+S:     Maintained
+
 COMMON INTERNET FILE SYSTEM (CIFS)
 P:     Steve French
 M:     sfrench@samba.org
@@ -1034,9 +1042,8 @@ S:        Maintained
 
 CYCLADES 2X SYNC CARD DRIVER
 P:     Arnaldo Carvalho de Melo
-M:     acme@conectiva.com.br
-W:     http://advogato.org/person/acme
-L:     cycsyn-devel@bazar.conectiva.com.br
+M:     acme@ghostprotocols.net
+W:     http://oops.ghostprotocols.net:81/blog
 S:     Maintained
 
 CYCLADES ASYNC MUX DRIVER
@@ -1077,7 +1084,7 @@ S:        Maintained
 
 DCCP PROTOCOL
 P:     Arnaldo Carvalho de Melo
-M:     acme@mandriva.com
+M:     acme@ghostprotocols.net
 L:     dccp@vger.kernel.org
 W:     http://linux-net.osdl.org/index.php/DCCP
 S:     Maintained
@@ -1318,7 +1325,7 @@ S:        Maintained
 ETHERNET BRIDGE
 P:     Stephen Hemminger
 M:     shemminger@linux-foundation.org
-L:     bridge@lists.osdl.org
+L:     bridge@lists.linux-foundation.org
 W:     http://bridge.sourceforge.net/
 S:     Maintained
 
@@ -1355,6 +1362,11 @@ M:       kevin.curtis@farsite.co.uk
 W:     http://www.farsite.co.uk/
 S:     Supported
 
+FAULT INJECTION SUPPORT
+P:     Akinobu Mita
+M:     akinobu.mita@gmail.com
+S:     Supported
+
 FRAMEBUFFER LAYER
 P:     Antonino Daplas
 M:     adaplas@gmail.com
@@ -1553,6 +1565,7 @@ S:        Supported
 HOST AP DRIVER
 P:     Jouni Malinen
 M:     jkmaline@cc.hut.fi
+L:     linux-wireless@vger.kernel.org
 L:     hostap@shmoo.com
 W:     http://hostap.epitest.fi/
 S:     Maintained
@@ -1690,7 +1703,7 @@ S:        Maintained
 
 IEEE 1394 SUBSYSTEM
 P:     Ben Collins
-M:     bcollins@debian.org
+M:     ben.collins@ubuntu.com
 P:     Stefan Richter
 M:     stefanr@s5r6.in-berlin.de
 L:     linux1394-devel@lists.sourceforge.net
@@ -1698,25 +1711,11 @@ W:      http://www.linux1394.org/
 T:     git kernel.org:/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6.git
 S:     Maintained
 
-IEEE 1394 IPV4 DRIVER (eth1394)
-P:     Stefan Richter
-M:     stefanr@s5r6.in-berlin.de
-L:     linux1394-devel@lists.sourceforge.net
-S:     Odd Fixes
-
-IEEE 1394 PCILYNX DRIVER
-P:     Jody McIntyre
-M:     scjody@modernduck.com
-P:     Stefan Richter
-M:     stefanr@s5r6.in-berlin.de
-L:     linux1394-devel@lists.sourceforge.net
-S:     Odd Fixes
-
-IEEE 1394 RAW I/O DRIVER
-P:     Ben Collins
-M:     bcollins@debian.org
+IEEE 1394 RAW I/O DRIVER (raw1394)
 P:     Dan Dennedy
 M:     dan@dennedy.org
+P:     Stefan Richter
+M:     stefanr@s5r6.in-berlin.de
 L:     linux1394-devel@lists.sourceforge.net
 S:     Maintained
 
@@ -1839,6 +1838,7 @@ P:        Yi Zhu
 M:     yi.zhu@intel.com
 P:     James Ketrenos
 M:     jketreno@linux.intel.com
+L:     linux-wireless@vger.kernel.org
 L:     ipw2100-devel@lists.sourceforge.net
 L:     http://lists.sourceforge.net/mailman/listinfo/ipw2100-devel
 W:     http://ipw2100.sourceforge.net
@@ -1849,6 +1849,7 @@ P:        Yi Zhu
 M:     yi.zhu@intel.com
 P:     James Ketrenos
 M:     jketreno@linux.intel.com
+L:     linux-wireless@vger.kernel.org
 L:     ipw2100-devel@lists.sourceforge.net
 L:     http://lists.sourceforge.net/mailman/listinfo/ipw2100-devel
 W:     http://ipw2200.sourceforge.net
@@ -1880,7 +1881,7 @@ S:        Supported
 
 IPX NETWORK LAYER
 P:     Arnaldo Carvalho de Melo
-M:     acme@conectiva.com.br
+M:     acme@ghostprotocols.net
 L:     netdev@vger.kernel.org
 S:     Maintained
 
@@ -1951,7 +1952,7 @@ P:        Vivek Goyal
 M:     vgoyal@in.ibm.com
 P:     Haren Myneni
 M:     hbabu@us.ibm.com
-L:     fastboot@lists.osdl.org
+L:     fastboot@lists.linux-foundation.org
 L:     linux-kernel@vger.kernel.org
 W:     http://lse.sourceforge.net/kdump/
 S:     Maintained
@@ -1978,7 +1979,7 @@ S:        Maintained
 
 KERNEL JANITORS
 P:     Several
-L:     kernel-janitors@lists.osdl.org
+L:     kernel-janitors@lists.linux-foundation.org
 W:     http://www.kerneljanitors.org/
 S:     Maintained
 
@@ -2001,7 +2002,7 @@ P:        Eric Biederman
 M:     ebiederm@xmission.com
 W:     http://www.xmission.com/~ebiederm/files/kexec/
 L:     linux-kernel@vger.kernel.org
-L:     fastboot@lists.osdl.org
+L:     fastboot@lists.linux-foundation.org
 S:     Maintained
 
 KPROBES
@@ -2117,7 +2118,7 @@ S:        Supported
 
 LLC (802.2)
 P:     Arnaldo Carvalho de Melo
-M:     acme@conectiva.com.br
+M:     acme@ghostprotocols.net
 S:     Maintained
 
 LINUX FOR 64BIT POWERPC
@@ -2339,7 +2340,7 @@ S:        Maintained
 NETEM NETWORK EMULATOR
 P:     Stephen Hemminger
 M:     shemminger@linux-foundation.org
-L:     netem@lists.osdl.org
+L:     netem@lists.linux-foundation.org
 S:     Maintained
 
 NETFILTER/IPTABLES/IPCHAINS
@@ -2541,6 +2542,7 @@ P:        Pavel Roskin
 M:     proski@gnu.org
 P:     David Gibson
 M:     hermes@gibson.dropbear.id.au
+L:     linux-wireless@vger.kernel.org
 L:     orinoco-users@lists.sourceforge.net
 L:     orinoco-devel@lists.sourceforge.net
 W:     http://www.nongnu.org/orinoco/
@@ -2720,7 +2722,7 @@ S:        Supported
 PRISM54 WIRELESS DRIVER
 P:     Prism54 Development Team
 M:     developers@islsm.org
-L:     netdev@vger.kernel.org
+L:     linux-wireless@vger.kernel.org
 W:     http://prism54.org
 S:     Maintained
 
@@ -2791,7 +2793,7 @@ S:        Maintained
 RAYLINK/WEBGEAR 802.11 WIRELESS LAN DRIVER
 P:     Corey Thomas
 M:     corey@world.std.com
-L:     linux-kernel@vger.kernel.org
+L:     linux-wireless@vger.kernel.org
 S:     Maintained
 
 RANDOM NUMBER DRIVER
@@ -3054,7 +3056,7 @@ M:        josejx@gentoo.org
 P:     Daniel Drake
 M:     dsd@gentoo.org
 W:     http://softmac.sipsolutions.net/
-L:     netdev@vger.kernel.org
+L:     linux-wireless@vger.kernel.org
 S:     Maintained
 
 SOFTWARE RAID (Multiple Disks) SUPPORT
@@ -3068,7 +3070,7 @@ S:        Supported
 SOFTWARE SUSPEND:
 P:     Pavel Machek
 M:     pavel@suse.cz
-L:     linux-pm@lists.osdl.org
+L:     linux-pm@lists.linux-foundation.org
 S:     Maintained
 
 SONIC NETWORK DRIVER
@@ -3759,6 +3761,7 @@ S:        Maintained
 WAVELAN NETWORK DRIVER & WIRELESS EXTENSIONS
 P:     Jean Tourrilhes
 M:     jt@hpl.hp.com
+L:     linux-wireless@vger.kernel.org
 W:     http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/
 S:     Maintained
 
@@ -3775,8 +3778,9 @@ S:        Maintained
 
 WL3501 WIRELESS PCMCIA CARD DRIVER
 P:     Arnaldo Carvalho de Melo
-M:     acme@conectiva.com.br
-W:     http://advogato.org/person/acme
+M:     acme@ghostprotocols.net
+L:     linux-wireless@vger.kernel.org
+W:     http://oops.ghostprotocols.net:81/blog
 S:     Maintained
 
 X.25 NETWORK LAYER
@@ -3839,6 +3843,7 @@ M:        dsd@gentoo.org
 P:     Ulrich Kunitz
 M:     kune@deine-taler.de
 W:     http://zd1211.ath.cx/wiki/DriverRewrite
+L:     linux-wireless@vger.kernel.org
 L:     zd1211-devs@lists.sourceforge.net (subscribers-only)
 S:     Maintained
 
index 234bae6..d970cb1 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 2
 PATCHLEVEL = 6
 SUBLEVEL = 21
-EXTRAVERSION = -rc7
+EXTRAVERSION =
 NAME = Nocturnal Monster Puppy
 
 # *DOCUMENTATION*
index 21cf624..ea098f3 100644 (file)
@@ -36,7 +36,6 @@ lib-y =       __divqu.o __remqu.o __divlu.o __remlu.o \
        $(ev6-y)csum_ipv6_magic.o \
        $(ev6-y)clear_page.o \
        $(ev6-y)copy_page.o \
-       strcasecmp.o \
        fpreg.o \
        callback_srm.o srm_puts.o srm_printk.o
 
diff --git a/arch/alpha/lib/strcasecmp.c b/arch/alpha/lib/strcasecmp.c
deleted file mode 100644 (file)
index 4e57a21..0000000
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- *  linux/arch/alpha/lib/strcasecmp.c
- */
-
-#include <linux/string.h>
-
-
-/* We handle nothing here except the C locale.  Since this is used in
-   only one place, on strings known to contain only 7 bit ASCII, this
-   is ok.  */
-
-int strcasecmp(const char *a, const char *b)
-{
-       int ca, cb;
-
-       do {
-               ca = *a++ & 0xff;
-               cb = *b++ & 0xff;
-               if (ca >= 'A' && ca <= 'Z')
-                       ca += 'a' - 'A';
-               if (cb >= 'A' && cb <= 'Z')
-                       cb += 'a' - 'A';
-       } while (ca == cb && ca != '\0');
-
-       return ca - cb;
-}
index ce4013a..3ec7658 100644 (file)
@@ -57,9 +57,6 @@ config ARCH_HAS_ILOG2_U64
        bool
        default n
 
-config GENERIC_BUST_SPINLOCK
-       bool
-
 config GENERIC_HWEIGHT
        bool
        default y
@@ -68,6 +65,11 @@ config GENERIC_CALIBRATE_DELAY
        bool
        default y
 
+config GENERIC_BUG
+       bool
+       default y
+       depends on BUG
+
 source "init/Kconfig"
 
 menu "System Type and features"
@@ -106,6 +108,9 @@ choice
 config BOARD_ATSTK1000
        bool "ATSTK1000 evaluation board"
        select BOARD_ATSTK1002 if CPU_AT32AP7000
+
+config BOARD_ATNGW100
+       bool "ATNGW100 Network Gateway"
 endchoice
 
 choice
@@ -116,6 +121,8 @@ config      LOADER_U_BOOT
        bool "U-Boot (or similar) bootloader"
 endchoice
 
+source "arch/avr32/mach-at32ap/Kconfig"
+
 config LOAD_ADDRESS
        hex
        default 0x10000000 if LOADER_U_BOOT=y && CPU_AT32AP7000=y
index 7b842e9..6115fc1 100644 (file)
@@ -27,6 +27,7 @@ head-$(CONFIG_LOADER_U_BOOT)          += arch/avr32/boot/u-boot/head.o
 head-y                                 += arch/avr32/kernel/head.o
 core-$(CONFIG_PLATFORM_AT32AP)         += arch/avr32/mach-at32ap/
 core-$(CONFIG_BOARD_ATSTK1000)         += arch/avr32/boards/atstk1000/
+core-$(CONFIG_BOARD_ATNGW100)          += arch/avr32/boards/atngw100/
 core-$(CONFIG_LOADER_U_BOOT)           += arch/avr32/boot/u-boot/
 core-y                                 += arch/avr32/kernel/
 core-y                                 += arch/avr32/mm/
diff --git a/arch/avr32/boards/atngw100/Makefile b/arch/avr32/boards/atngw100/Makefile
new file mode 100644 (file)
index 0000000..c740aa1
--- /dev/null
@@ -0,0 +1 @@
+obj-y                          += setup.o flash.o
diff --git a/arch/avr32/boards/atngw100/flash.c b/arch/avr32/boards/atngw100/flash.c
new file mode 100644 (file)
index 0000000..f9b32a8
--- /dev/null
@@ -0,0 +1,95 @@
+/*
+ * ATNGW100 board-specific flash initialization
+ *
+ * Copyright (C) 2005-2006 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/physmap.h>
+
+#include <asm/arch/smc.h>
+
+static struct smc_config flash_config __initdata = {
+       .ncs_read_setup         = 0,
+       .nrd_setup              = 40,
+       .ncs_write_setup        = 0,
+       .nwe_setup              = 10,
+
+       .ncs_read_pulse         = 80,
+       .nrd_pulse              = 40,
+       .ncs_write_pulse        = 65,
+       .nwe_pulse              = 55,
+
+       .read_cycle             = 120,
+       .write_cycle            = 120,
+
+       .bus_width              = 2,
+       .nrd_controlled         = 1,
+       .nwe_controlled         = 1,
+       .byte_write             = 1,
+};
+
+static struct mtd_partition flash_parts[] = {
+       {
+               .name           = "u-boot",
+               .offset         = 0x00000000,
+               .size           = 0x00020000,           /* 128 KiB */
+               .mask_flags     = MTD_WRITEABLE,
+       },
+       {
+               .name           = "root",
+               .offset         = 0x00020000,
+               .size           = 0x007d0000,
+       },
+       {
+               .name           = "env",
+               .offset         = 0x007f0000,
+               .size           = 0x00010000,
+               .mask_flags     = MTD_WRITEABLE,
+       },
+};
+
+static struct physmap_flash_data flash_data = {
+       .width          = 2,
+       .nr_parts       = ARRAY_SIZE(flash_parts),
+       .parts          = flash_parts,
+};
+
+static struct resource flash_resource = {
+       .start          = 0x00000000,
+       .end            = 0x007fffff,
+       .flags          = IORESOURCE_MEM,
+};
+
+static struct platform_device flash_device = {
+       .name           = "physmap-flash",
+       .id             = 0,
+       .resource       = &flash_resource,
+       .num_resources  = 1,
+       .dev            = {
+               .platform_data = &flash_data,
+       },
+};
+
+/* This needs to be called after the SMC has been initialized */
+static int __init atngw100_flash_init(void)
+{
+       int ret;
+
+       ret = smc_set_configuration(0, &flash_config);
+       if (ret < 0) {
+               printk(KERN_ERR "atngw100: failed to set NOR flash timing\n");
+               return ret;
+       }
+
+       platform_device_register(&flash_device);
+
+       return 0;
+}
+device_initcall(atngw100_flash_init);
diff --git a/arch/avr32/boards/atngw100/setup.c b/arch/avr32/boards/atngw100/setup.c
new file mode 100644 (file)
index 0000000..9bc37d4
--- /dev/null
@@ -0,0 +1,124 @@
+/*
+ * Board-specific setup code for the ATNGW100 Network Gateway
+ *
+ * Copyright (C) 2005-2006 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/clk.h>
+#include <linux/etherdevice.h>
+#include <linux/init.h>
+#include <linux/linkage.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/spi/spi.h>
+
+#include <asm/io.h>
+#include <asm/setup.h>
+
+#include <asm/arch/at32ap7000.h>
+#include <asm/arch/board.h>
+#include <asm/arch/init.h>
+
+/* Initialized by bootloader-specific startup code. */
+struct tag *bootloader_tags __initdata;
+
+struct eth_addr {
+       u8 addr[6];
+};
+static struct eth_addr __initdata hw_addr[2];
+static struct eth_platform_data __initdata eth_data[2];
+
+static struct spi_board_info spi0_board_info[] __initdata = {
+       {
+               .modalias       = "mtd_dataflash",
+               .max_speed_hz   = 10000000,
+               .chip_select    = 0,
+       },
+};
+
+/*
+ * The next two functions should go away as the boot loader is
+ * supposed to initialize the macb address registers with a valid
+ * ethernet address. But we need to keep it around for a while until
+ * we can be reasonably sure the boot loader does this.
+ *
+ * The phy_id is ignored as the driver will probe for it.
+ */
+static int __init parse_tag_ethernet(struct tag *tag)
+{
+       int i;
+
+       i = tag->u.ethernet.mac_index;
+       if (i < ARRAY_SIZE(hw_addr))
+               memcpy(hw_addr[i].addr, tag->u.ethernet.hw_address,
+                      sizeof(hw_addr[i].addr));
+
+       return 0;
+}
+__tagtable(ATAG_ETHERNET, parse_tag_ethernet);
+
+static void __init set_hw_addr(struct platform_device *pdev)
+{
+       struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       const u8 *addr;
+       void __iomem *regs;
+       struct clk *pclk;
+
+       if (!res)
+               return;
+       if (pdev->id >= ARRAY_SIZE(hw_addr))
+               return;
+
+       addr = hw_addr[pdev->id].addr;
+       if (!is_valid_ether_addr(addr))
+               return;
+
+       /*
+        * Since this is board-specific code, we'll cheat and use the
+        * physical address directly as we happen to know that it's
+        * the same as the virtual address.
+        */
+       regs = (void __iomem __force *)res->start;
+       pclk = clk_get(&pdev->dev, "pclk");
+       if (!pclk)
+               return;
+
+       clk_enable(pclk);
+       __raw_writel((addr[3] << 24) | (addr[2] << 16)
+                    | (addr[1] << 8) | addr[0], regs + 0x98);
+       __raw_writel((addr[5] << 8) | addr[4], regs + 0x9c);
+       clk_disable(pclk);
+       clk_put(pclk);
+}
+
+struct platform_device *at32_usart_map[1];
+unsigned int at32_nr_usarts = 1;
+
+void __init setup_board(void)
+{
+       at32_map_usart(1, 0);   /* USART 1: /dev/ttyS0, DB9 */
+       at32_setup_serial_console(0);
+}
+
+static int __init atngw100_init(void)
+{
+       /*
+        * ATNGW100 uses 16-bit SDRAM interface, so we don't need to
+        * reserve any pins for it.
+        */
+
+       at32_add_system_devices();
+
+       at32_add_device_usart(0);
+
+       set_hw_addr(at32_add_device_eth(0, &eth_data[0]));
+       set_hw_addr(at32_add_device_eth(1, &eth_data[1]));
+
+       at32_add_device_spi(0, spi0_board_info, ARRAY_SIZE(spi0_board_info));
+
+       return 0;
+}
+postcore_initcall(atngw100_init);
index 5974768..abe6ca2 100644 (file)
@@ -33,7 +33,7 @@ struct eth_addr {
 static struct eth_addr __initdata hw_addr[2];
 
 static struct eth_platform_data __initdata eth_data[2];
-extern struct lcdc_platform_data atstk1000_fb0_data;
+static struct lcdc_platform_data atstk1000_fb0_data;
 
 static struct spi_board_info spi0_board_info[] __initdata = {
        {
@@ -148,6 +148,8 @@ static int __init atstk1002_init(void)
        set_hw_addr(at32_add_device_eth(0, &eth_data[0]));
 
        at32_add_device_spi(0, spi0_board_info, ARRAY_SIZE(spi0_board_info));
+       atstk1000_fb0_data.fbmem_start = fbmem_start;
+       atstk1000_fb0_data.fbmem_size = fbmem_size;
        at32_add_device_lcdc(0, &atstk1000_fb0_data);
 
        return 0;
index 272c011..2bc4b88 100644 (file)
 
 /* Initialized by bootloader-specific startup code. */
 struct tag *bootloader_tags __initdata;
-
-struct lcdc_platform_data __initdata atstk1000_fb0_data;
-
-void __init board_setup_fbmem(unsigned long fbmem_start,
-                             unsigned long fbmem_size)
-{
-       if (!fbmem_size)
-               return;
-
-       if (!fbmem_start) {
-               void *fbmem;
-
-               fbmem = alloc_bootmem_low_pages(fbmem_size);
-               fbmem_start = __pa(fbmem);
-       } else {
-               pg_data_t *pgdat;
-
-               for_each_online_pgdat(pgdat) {
-                       if (fbmem_start >= pgdat->bdata->node_boot_start
-                           && fbmem_start <= pgdat->bdata->node_low_pfn)
-                               reserve_bootmem_node(pgdat, fbmem_start,
-                                                    fbmem_size);
-               }
-       }
-
-       printk("%luKiB framebuffer memory at address 0x%08lx\n",
-              fbmem_size >> 10, fbmem_start);
-       atstk1000_fb0_data.fbmem_start = fbmem_start;
-       atstk1000_fb0_data.fbmem_size = fbmem_size;
-}
diff --git a/arch/avr32/configs/atngw100_defconfig b/arch/avr32/configs/atngw100_defconfig
new file mode 100644 (file)
index 0000000..c254ffc
--- /dev/null
@@ -0,0 +1,1085 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.21-rc6
+# Thu Apr 12 16:35:07 2007
+#
+CONFIG_AVR32=y
+CONFIG_GENERIC_GPIO=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_HARDIRQS_SW_RESEND=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+CONFIG_GENERIC_TIME=y
+# CONFIG_ARCH_HAS_ILOG2_U32 is not set
+# CONFIG_ARCH_HAS_ILOG2_U64 is not set
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_GENERIC_BUG=y
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+
+#
+# Code maturity level options
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+
+#
+# General setup
+#
+CONFIG_LOCALVERSION=""
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+# CONFIG_IPC_NS is not set
+CONFIG_SYSVIPC_SYSCTL=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+# CONFIG_TASKSTATS is not set
+# CONFIG_UTS_NS is not set
+# CONFIG_AUDIT is not set
+# CONFIG_IKCONFIG is not set
+CONFIG_SYSFS_DEPRECATED=y
+# CONFIG_RELAY is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_SYSCTL=y
+CONFIG_EMBEDDED=y
+# CONFIG_SYSCTL_SYSCALL is not set
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+# CONFIG_BASE_FULL is not set
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SHMEM=y
+CONFIG_SLAB=y
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_RT_MUTEXES=y
+# CONFIG_TINY_SHMEM is not set
+CONFIG_BASE_SMALL=1
+# CONFIG_SLOB is not set
+
+#
+# Loadable module support
+#
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_KMOD=y
+
+#
+# Block layer
+#
+CONFIG_BLOCK=y
+# CONFIG_LBD is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_LSF is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+# CONFIG_IOSCHED_AS is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+CONFIG_IOSCHED_CFQ=y
+# CONFIG_DEFAULT_AS is not set
+# CONFIG_DEFAULT_DEADLINE is not set
+CONFIG_DEFAULT_CFQ=y
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="cfq"
+
+#
+# System Type and features
+#
+CONFIG_SUBARCH_AVR32B=y
+CONFIG_MMU=y
+CONFIG_PERFORMANCE_COUNTERS=y
+CONFIG_PLATFORM_AT32AP=y
+CONFIG_CPU_AT32AP7000=y
+# CONFIG_BOARD_ATSTK1000 is not set
+CONFIG_BOARD_ATNGW100=y
+CONFIG_LOADER_U_BOOT=y
+
+#
+# Atmel AVR32 AP options
+#
+# CONFIG_AP7000_32_BIT_SMC is not set
+CONFIG_AP7000_16_BIT_SMC=y
+# CONFIG_AP7000_8_BIT_SMC is not set
+CONFIG_LOAD_ADDRESS=0x10000000
+CONFIG_ENTRY_ADDRESS=0x90000000
+CONFIG_PHYS_OFFSET=0x10000000
+CONFIG_PREEMPT_NONE=y
+# CONFIG_PREEMPT_VOLUNTARY is not set
+# CONFIG_PREEMPT is not set
+# CONFIG_HAVE_ARCH_BOOTMEM_NODE is not set
+# CONFIG_ARCH_HAVE_MEMORY_PRESENT is not set
+# CONFIG_NEED_NODE_MEMMAP_SIZE is not set
+CONFIG_ARCH_FLATMEM_ENABLE=y
+# CONFIG_ARCH_DISCONTIGMEM_ENABLE is not set
+# CONFIG_ARCH_SPARSEMEM_ENABLE is not set
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+# CONFIG_SPARSEMEM_STATIC is not set
+CONFIG_SPLIT_PTLOCK_CPUS=4
+# CONFIG_RESOURCES_64BIT is not set
+CONFIG_ZONE_DMA_FLAG=0
+# CONFIG_OWNERSHIP_TRACE is not set
+# CONFIG_HZ_100 is not set
+CONFIG_HZ_250=y
+# CONFIG_HZ_300 is not set
+# CONFIG_HZ_1000 is not set
+CONFIG_HZ=250
+CONFIG_CMDLINE=""
+
+#
+# Bus options
+#
+
+#
+# PCCARD (PCMCIA/CardBus) support
+#
+# CONFIG_PCCARD is not set
+
+#
+# Executable file formats
+#
+CONFIG_BINFMT_ELF=y
+# CONFIG_BINFMT_MISC is not set
+
+#
+# Networking
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+# CONFIG_NETDEBUG is not set
+CONFIG_PACKET=y
+CONFIG_PACKET_MMAP=y
+CONFIG_UNIX=y
+CONFIG_XFRM=y
+CONFIG_XFRM_USER=y
+# CONFIG_XFRM_SUB_POLICY is not set
+# CONFIG_XFRM_MIGRATE is not set
+CONFIG_NET_KEY=y
+# CONFIG_NET_KEY_MIGRATE is not set
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_ASK_IP_FIB_HASH=y
+# CONFIG_IP_FIB_TRIE is not set
+CONFIG_IP_FIB_HASH=y
+# CONFIG_IP_MULTIPLE_TABLES is not set
+# CONFIG_IP_ROUTE_MULTIPATH is not set
+# CONFIG_IP_ROUTE_VERBOSE is not set
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+# CONFIG_IP_PNP_BOOTP is not set
+# CONFIG_IP_PNP_RARP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+# CONFIG_IP_PIMSM_V2 is not set
+# CONFIG_ARPD is not set
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=y
+CONFIG_INET_ESP=y
+CONFIG_INET_IPCOMP=y
+CONFIG_INET_XFRM_TUNNEL=y
+CONFIG_INET_TUNNEL=y
+CONFIG_INET_XFRM_MODE_TRANSPORT=y
+CONFIG_INET_XFRM_MODE_TUNNEL=y
+CONFIG_INET_XFRM_MODE_BEET=y
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
+
+#
+# IP: Virtual Server Configuration
+#
+# CONFIG_IP_VS is not set
+CONFIG_IPV6=y
+# CONFIG_IPV6_PRIVACY is not set
+# CONFIG_IPV6_ROUTER_PREF is not set
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+# CONFIG_IPV6_MIP6 is not set
+CONFIG_INET6_XFRM_TUNNEL=y
+CONFIG_INET6_TUNNEL=y
+CONFIG_INET6_XFRM_MODE_TRANSPORT=y
+CONFIG_INET6_XFRM_MODE_TUNNEL=y
+CONFIG_INET6_XFRM_MODE_BEET=y
+# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set
+CONFIG_IPV6_SIT=y
+# CONFIG_IPV6_TUNNEL is not set
+# CONFIG_IPV6_MULTIPLE_TABLES is not set
+# CONFIG_NETWORK_SECMARK is not set
+CONFIG_NETFILTER=y
+# CONFIG_NETFILTER_DEBUG is not set
+
+#
+# Core Netfilter Configuration
+#
+# CONFIG_NETFILTER_NETLINK is not set
+CONFIG_NF_CONNTRACK_ENABLED=m
+CONFIG_NF_CONNTRACK_SUPPORT=y
+# CONFIG_IP_NF_CONNTRACK_SUPPORT is not set
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CT_ACCT=y
+CONFIG_NF_CONNTRACK_MARK=y
+# CONFIG_NF_CONNTRACK_EVENTS is not set
+CONFIG_NF_CT_PROTO_GRE=m
+# CONFIG_NF_CT_PROTO_SCTP is not set
+CONFIG_NF_CONNTRACK_AMANDA=m
+CONFIG_NF_CONNTRACK_FTP=m
+CONFIG_NF_CONNTRACK_H323=m
+CONFIG_NF_CONNTRACK_IRC=m
+CONFIG_NF_CONNTRACK_NETBIOS_NS=m
+CONFIG_NF_CONNTRACK_PPTP=m
+CONFIG_NF_CONNTRACK_SANE=m
+CONFIG_NF_CONNTRACK_SIP=m
+CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NETFILTER_XTABLES=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
+# CONFIG_NETFILTER_XT_TARGET_CONNMARK is not set
+# CONFIG_NETFILTER_XT_TARGET_DSCP is not set
+CONFIG_NETFILTER_XT_TARGET_MARK=m
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_NFLOG=m
+# CONFIG_NETFILTER_XT_TARGET_NOTRACK is not set
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
+CONFIG_NETFILTER_XT_MATCH_COMMENT=m
+CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+# CONFIG_NETFILTER_XT_MATCH_DCCP is not set
+# CONFIG_NETFILTER_XT_MATCH_DSCP is not set
+CONFIG_NETFILTER_XT_MATCH_ESP=m
+CONFIG_NETFILTER_XT_MATCH_HELPER=m
+CONFIG_NETFILTER_XT_MATCH_LENGTH=m
+CONFIG_NETFILTER_XT_MATCH_LIMIT=m
+CONFIG_NETFILTER_XT_MATCH_MAC=m
+CONFIG_NETFILTER_XT_MATCH_MARK=m
+CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
+CONFIG_NETFILTER_XT_MATCH_QUOTA=m
+CONFIG_NETFILTER_XT_MATCH_REALM=m
+# CONFIG_NETFILTER_XT_MATCH_SCTP is not set
+CONFIG_NETFILTER_XT_MATCH_STATE=m
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
+CONFIG_NETFILTER_XT_MATCH_STRING=m
+CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
+
+#
+# IP: Netfilter Configuration
+#
+CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_NF_CONNTRACK_PROC_COMPAT=y
+# CONFIG_IP_NF_QUEUE is not set
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_MATCH_IPRANGE=m
+CONFIG_IP_NF_MATCH_TOS=m
+CONFIG_IP_NF_MATCH_RECENT=m
+CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_AH=m
+CONFIG_IP_NF_MATCH_TTL=m
+CONFIG_IP_NF_MATCH_OWNER=m
+CONFIG_IP_NF_MATCH_ADDRTYPE=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_TARGET_LOG=m
+# CONFIG_IP_NF_TARGET_ULOG is not set
+CONFIG_NF_NAT=m
+CONFIG_NF_NAT_NEEDED=y
+CONFIG_IP_NF_TARGET_MASQUERADE=m
+CONFIG_IP_NF_TARGET_REDIRECT=m
+CONFIG_IP_NF_TARGET_NETMAP=m
+CONFIG_IP_NF_TARGET_SAME=m
+CONFIG_NF_NAT_SNMP_BASIC=m
+CONFIG_NF_NAT_PROTO_GRE=m
+CONFIG_NF_NAT_FTP=m
+CONFIG_NF_NAT_IRC=m
+CONFIG_NF_NAT_TFTP=m
+CONFIG_NF_NAT_AMANDA=m
+CONFIG_NF_NAT_PPTP=m
+CONFIG_NF_NAT_H323=m
+CONFIG_NF_NAT_SIP=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_TARGET_TOS=m
+CONFIG_IP_NF_TARGET_ECN=m
+CONFIG_IP_NF_TARGET_TTL=m
+CONFIG_IP_NF_TARGET_CLUSTERIP=m
+CONFIG_IP_NF_RAW=m
+CONFIG_IP_NF_ARPTABLES=m
+CONFIG_IP_NF_ARPFILTER=m
+CONFIG_IP_NF_ARP_MANGLE=m
+
+#
+# IPv6: Netfilter Configuration (EXPERIMENTAL)
+#
+CONFIG_NF_CONNTRACK_IPV6=m
+CONFIG_IP6_NF_QUEUE=m
+CONFIG_IP6_NF_IPTABLES=m
+CONFIG_IP6_NF_MATCH_RT=m
+CONFIG_IP6_NF_MATCH_OPTS=m
+CONFIG_IP6_NF_MATCH_FRAG=m
+CONFIG_IP6_NF_MATCH_HL=m
+CONFIG_IP6_NF_MATCH_OWNER=m
+CONFIG_IP6_NF_MATCH_IPV6HEADER=m
+CONFIG_IP6_NF_MATCH_AH=m
+CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_EUI64=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_LOG=m
+CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_TARGET_HL=m
+CONFIG_IP6_NF_RAW=m
+
+#
+# DCCP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_DCCP is not set
+
+#
+# SCTP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_SCTP is not set
+
+#
+# TIPC Configuration (EXPERIMENTAL)
+#
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+CONFIG_VLAN_8021Q=m
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+
+#
+# QoS and/or fair queueing
+#
+# CONFIG_NET_SCHED is not set
+CONFIG_NET_CLS_ROUTE=y
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_IEEE80211 is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_STANDALONE=y
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+# CONFIG_FW_LOADER is not set
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_DEBUG_DEVRES is not set
+# CONFIG_SYS_HYPERVISOR is not set
+
+#
+# Connector - unified userspace <-> kernelspace linker
+#
+# CONFIG_CONNECTOR is not set
+
+#
+# Memory Technology Devices (MTD)
+#
+CONFIG_MTD=y
+# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_CONCAT is not set
+CONFIG_MTD_PARTITIONS=y
+# CONFIG_MTD_REDBOOT_PARTS is not set
+CONFIG_MTD_CMDLINE_PARTS=y
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLKDEVS=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+# CONFIG_SSFDC is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+CONFIG_MTD_CFI=y
+# CONFIG_MTD_JEDECPROBE is not set
+CONFIG_MTD_GEN_PROBE=y
+# CONFIG_MTD_CFI_ADV_OPTIONS is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+# CONFIG_MTD_CFI_INTELEXT is not set
+CONFIG_MTD_CFI_AMDSTD=y
+# CONFIG_MTD_CFI_STAA is not set
+CONFIG_MTD_CFI_UTIL=y
+# CONFIG_MTD_RAM is not set
+# CONFIG_MTD_ROM is not set
+# CONFIG_MTD_ABSENT is not set
+# CONFIG_MTD_OBSOLETE_CHIPS is not set
+
+#
+# Mapping drivers for chip access
+#
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_PHYSMAP_START=0x80000000
+CONFIG_MTD_PHYSMAP_LEN=0x0
+CONFIG_MTD_PHYSMAP_BANKWIDTH=2
+# CONFIG_MTD_PLATRAM is not set
+
+#
+# Self-contained MTD device drivers
+#
+CONFIG_MTD_DATAFLASH=y
+# CONFIG_MTD_M25P80 is not set
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+
+#
+# NAND Flash Device Drivers
+#
+# CONFIG_MTD_NAND is not set
+
+#
+# OneNAND Flash Device Drivers
+#
+# CONFIG_MTD_ONENAND is not set
+
+#
+# Parallel port support
+#
+# CONFIG_PARPORT is not set
+
+#
+# Plug and Play support
+#
+# CONFIG_PNPACPI is not set
+
+#
+# Block devices
+#
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=m
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+CONFIG_BLK_DEV_NBD=m
+CONFIG_BLK_DEV_RAM=m
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=4096
+CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+
+#
+# Misc devices
+#
+
+#
+# ATA/ATAPI/MFM/RLL support
+#
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+# CONFIG_SCSI is not set
+# CONFIG_SCSI_NETLINK is not set
+
+#
+# Serial ATA (prod) and Parallel ATA (experimental) drivers
+#
+# CONFIG_ATA is not set
+
+#
+# Multi-device support (RAID and LVM)
+#
+# CONFIG_MD is not set
+
+#
+# Fusion MPT device support
+#
+# CONFIG_FUSION is not set
+
+#
+# IEEE 1394 (FireWire) support
+#
+
+#
+# I2O device support
+#
+
+#
+# Network device support
+#
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_EQUALIZER is not set
+CONFIG_TUN=m
+
+#
+# PHY device support
+#
+# CONFIG_PHYLIB is not set
+
+#
+# Ethernet (10 or 100Mbit)
+#
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=y
+CONFIG_MACB=y
+
+#
+# Ethernet (1000 Mbit)
+#
+
+#
+# Ethernet (10000 Mbit)
+#
+
+#
+# Token Ring devices
+#
+
+#
+# Wireless LAN (non-hamradio)
+#
+# CONFIG_NET_RADIO is not set
+
+#
+# Wan interfaces
+#
+# CONFIG_WAN is not set
+CONFIG_PPP=m
+# CONFIG_PPP_MULTILINK is not set
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_ASYNC=m
+# CONFIG_PPP_SYNC_TTY is not set
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_MPPE=m
+CONFIG_PPPOE=m
+# CONFIG_SLIP is not set
+CONFIG_SLHC=m
+# CONFIG_SHAPER is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+
+#
+# ISDN subsystem
+#
+# CONFIG_ISDN is not set
+
+#
+# Telephony Support
+#
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+# CONFIG_INPUT is not set
+
+#
+# Hardware I/O ports
+#
+# CONFIG_SERIO is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+# CONFIG_VT is not set
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_SERIAL_ATMEL=y
+CONFIG_SERIAL_ATMEL_CONSOLE=y
+# CONFIG_SERIAL_ATMEL_TTYAT is not set
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_UNIX98_PTYS=y
+# CONFIG_LEGACY_PTYS is not set
+
+#
+# IPMI
+#
+# CONFIG_IPMI_HANDLER is not set
+
+#
+# Watchdog Cards
+#
+# CONFIG_WATCHDOG is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_RTC is not set
+# CONFIG_GEN_RTC is not set
+# CONFIG_DTLK is not set
+# CONFIG_R3964 is not set
+# CONFIG_RAW_DRIVER is not set
+
+#
+# TPM devices
+#
+# CONFIG_TCG_TPM is not set
+
+#
+# I2C support
+#
+# CONFIG_I2C is not set
+
+#
+# SPI support
+#
+CONFIG_SPI=y
+# CONFIG_SPI_DEBUG is not set
+CONFIG_SPI_MASTER=y
+
+#
+# SPI Master Controller Drivers
+#
+CONFIG_SPI_ATMEL=y
+# CONFIG_SPI_BITBANG is not set
+
+#
+# SPI Protocol Masters
+#
+# CONFIG_SPI_AT25 is not set
+
+#
+# Dallas's 1-wire bus
+#
+# CONFIG_W1 is not set
+
+#
+# Hardware Monitoring support
+#
+# CONFIG_HWMON is not set
+# CONFIG_HWMON_VID is not set
+
+#
+# Multifunction device drivers
+#
+# CONFIG_MFD_SM501 is not set
+
+#
+# Multimedia devices
+#
+# CONFIG_VIDEO_DEV is not set
+
+#
+# Digital Video Broadcasting Devices
+#
+# CONFIG_DVB is not set
+
+#
+# Graphics support
+#
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+# CONFIG_FB is not set
+
+#
+# Sound
+#
+# CONFIG_SOUND is not set
+
+#
+# USB support
+#
+# CONFIG_USB_ARCH_HAS_HCD is not set
+# CONFIG_USB_ARCH_HAS_OHCI is not set
+# CONFIG_USB_ARCH_HAS_EHCI is not set
+
+#
+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+#
+
+#
+# USB Gadget Support
+#
+# CONFIG_USB_GADGET is not set
+
+#
+# MMC/SD Card support
+#
+# CONFIG_MMC is not set
+
+#
+# LED devices
+#
+# CONFIG_NEW_LEDS is not set
+
+#
+# LED drivers
+#
+
+#
+# LED Triggers
+#
+
+#
+# InfiniBand support
+#
+
+#
+# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
+#
+
+#
+# Real Time Clock
+#
+# CONFIG_RTC_CLASS is not set
+
+#
+# DMA Engine support
+#
+# CONFIG_DMA_ENGINE is not set
+
+#
+# DMA Clients
+#
+
+#
+# DMA Devices
+#
+
+#
+# Auxiliary Display support
+#
+
+#
+# Virtualization
+#
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT2_FS_XIP is not set
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_FS_XATTR is not set
+# CONFIG_EXT4DEV_FS is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
+# CONFIG_OCFS2_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_INOTIFY is not set
+# CONFIG_QUOTA is not set
+# CONFIG_DNOTIFY is not set
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+CONFIG_FUSE_FS=m
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_FAT_DEFAULT_CODEPAGE=850
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+# CONFIG_PROC_KCORE is not set
+CONFIG_PROC_SYSCTL=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_RAMFS=y
+CONFIG_CONFIGFS_FS=y
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_FS_DEBUG=0
+CONFIG_JFFS2_FS_WRITEBUFFER=y
+# CONFIG_JFFS2_SUMMARY is not set
+# CONFIG_JFFS2_FS_XATTR is not set
+# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
+CONFIG_JFFS2_ZLIB=y
+CONFIG_JFFS2_RTIME=y
+# CONFIG_JFFS2_RUBIN is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+
+#
+# Network File Systems
+#
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V3_ACL is not set
+# CONFIG_NFS_V4 is not set
+# CONFIG_NFS_DIRECTIO is not set
+# CONFIG_NFSD is not set
+CONFIG_ROOT_NFS=y
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+# CONFIG_RPCSEC_GSS_KRB5 is not set
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+CONFIG_SMB_FS=m
+# CONFIG_SMB_NLS_DEFAULT is not set
+CONFIG_CIFS=m
+# CONFIG_CIFS_STATS is not set
+# CONFIG_CIFS_WEAK_PW_HASH is not set
+# CONFIG_CIFS_XATTR is not set
+# CONFIG_CIFS_DEBUG2 is not set
+# CONFIG_CIFS_EXPERIMENTAL is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+# CONFIG_9P_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+
+#
+# Native Language Support
+#
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+# CONFIG_NLS_CODEPAGE_437 is not set
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+CONFIG_NLS_CODEPAGE_850=y
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+# CONFIG_NLS_ASCII is not set
+CONFIG_NLS_ISO8859_1=y
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+CONFIG_NLS_UTF8=y
+
+#
+# Distributed Lock Manager
+#
+# CONFIG_DLM is not set
+
+#
+# Kernel hacking
+#
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+# CONFIG_PRINTK_TIME is not set
+CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_UNUSED_SYMBOLS is not set
+# CONFIG_DEBUG_FS is not set
+# CONFIG_HEADERS_CHECK is not set
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_SHIRQ is not set
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_TIMER_STATS is not set
+# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_MUTEXES is not set
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+# CONFIG_DEBUG_KOBJECT is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+# CONFIG_DEBUG_INFO is not set
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_LIST is not set
+CONFIG_FRAME_POINTER=y
+# CONFIG_FORCED_INLINING is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_FAULT_INJECTION is not set
+# CONFIG_KPROBES is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+
+#
+# Cryptographic options
+#
+CONFIG_CRYPTO=y
+CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_BLKCIPHER=y
+CONFIG_CRYPTO_HASH=y
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_HMAC=y
+# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_NULL is not set
+# CONFIG_CRYPTO_MD4 is not set
+CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_SHA1=y
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_WP512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+# CONFIG_CRYPTO_GF128MUL is not set
+CONFIG_CRYPTO_ECB=m
+CONFIG_CRYPTO_CBC=y
+CONFIG_CRYPTO_PCBC=m
+# CONFIG_CRYPTO_LRW is not set
+CONFIG_CRYPTO_DES=y
+# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_AES is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+# CONFIG_CRYPTO_TEA is not set
+CONFIG_CRYPTO_ARC4=m
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_ANUBIS is not set
+CONFIG_CRYPTO_DEFLATE=y
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_CAMELLIA is not set
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Hardware crypto devices
+#
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+CONFIG_CRC_CCITT=m
+# CONFIG_CRC16 is not set
+CONFIG_CRC32=y
+# CONFIG_LIBCRC32C is not set
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=y
+CONFIG_TEXTSEARCH=y
+CONFIG_TEXTSEARCH_KMP=m
+CONFIG_TEXTSEARCH_BM=m
+CONFIG_TEXTSEARCH_FSM=m
+CONFIG_PLIST=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
index 2e72fd2..2714cf6 100644 (file)
@@ -209,16 +209,17 @@ static const char *mmu_types[] = {
 void __init setup_processor(void)
 {
        unsigned long config0, config1;
+       unsigned long features;
        unsigned cpu_id, cpu_rev, arch_id, arch_rev, mmu_type;
        unsigned tmp;
 
-       config0 = sysreg_read(CONFIG0); /* 0x0000013e; */
-       config1 = sysreg_read(CONFIG1); /* 0x01f689a2; */
-       cpu_id = config0 >> 24;
-       cpu_rev = (config0 >> 16) & 0xff;
-       arch_id = (config0 >> 13) & 0x07;
-       arch_rev = (config0 >> 10) & 0x07;
-       mmu_type = (config0 >> 7) & 0x03;
+       config0 = sysreg_read(CONFIG0);
+       config1 = sysreg_read(CONFIG1);
+       cpu_id = SYSREG_BFEXT(PROCESSORID, config0);
+       cpu_rev = SYSREG_BFEXT(PROCESSORREVISION, config0);
+       arch_id = SYSREG_BFEXT(AT, config0);
+       arch_rev = SYSREG_BFEXT(AR, config0);
+       mmu_type = SYSREG_BFEXT(MMUT, config0);
 
        boot_cpu_data.arch_type = arch_id;
        boot_cpu_data.cpu_type = cpu_id;
@@ -226,16 +227,16 @@ void __init setup_processor(void)
        boot_cpu_data.cpu_revision = cpu_rev;
        boot_cpu_data.tlb_config = mmu_type;
 
-       tmp = (config1 >> 13) & 0x07;
+       tmp = SYSREG_BFEXT(ILSZ, config1);
        if (tmp) {
-               boot_cpu_data.icache.ways = 1 << ((config1 >> 10) & 0x07);
-               boot_cpu_data.icache.sets = 1 << ((config1 >> 16) & 0x0f);
+               boot_cpu_data.icache.ways = 1 << SYSREG_BFEXT(IASS, config1);
+               boot_cpu_data.icache.sets = 1 << SYSREG_BFEXT(ISET, config1);
                boot_cpu_data.icache.linesz = 1 << (tmp + 1);
        }
-       tmp = (config1 >> 3) & 0x07;
+       tmp = SYSREG_BFEXT(DLSZ, config1);
        if (tmp) {
-               boot_cpu_data.dcache.ways = 1 << (config1 & 0x07);
-               boot_cpu_data.dcache.sets = 1 << ((config1 >> 6) & 0x0f);
+               boot_cpu_data.dcache.ways = 1 << SYSREG_BFEXT(DASS, config1);
+               boot_cpu_data.dcache.sets = 1 << SYSREG_BFEXT(DSET, config1);
                boot_cpu_data.dcache.linesz = 1 << (tmp + 1);
        }
 
@@ -250,16 +251,39 @@ void __init setup_processor(void)
                cpu_names[cpu_id], cpu_id, cpu_rev,
                arch_names[arch_id], arch_rev);
        printk ("CPU: MMU configuration: %s\n", mmu_types[mmu_type]);
+
        printk ("CPU: features:");
-       if (config0 & (1 << 6))
-               printk(" fpu");
-       if (config0 & (1 << 5))
-               printk(" java");
-       if (config0 & (1 << 4))
-               printk(" perfctr");
-       if (config0 & (1 << 3))
+       features = 0;
+       if (config0 & SYSREG_BIT(CONFIG0_R)) {
+               features |= AVR32_FEATURE_RMW;
+               printk(" rmw");
+       }
+       if (config0 & SYSREG_BIT(CONFIG0_D)) {
+               features |= AVR32_FEATURE_DSP;
+               printk(" dsp");
+       }
+       if (config0 & SYSREG_BIT(CONFIG0_S)) {
+               features |= AVR32_FEATURE_SIMD;
+               printk(" simd");
+       }
+       if (config0 & SYSREG_BIT(CONFIG0_O)) {
+               features |= AVR32_FEATURE_OCD;
                printk(" ocd");
+       }
+       if (config0 & SYSREG_BIT(CONFIG0_P)) {
+               features |= AVR32_FEATURE_PCTR;
+               printk(" perfctr");
+       }
+       if (config0 & SYSREG_BIT(CONFIG0_J)) {
+               features |= AVR32_FEATURE_JAVA;
+               printk(" java");
+       }
+       if (config0 & SYSREG_BIT(CONFIG0_F)) {
+               features |= AVR32_FEATURE_FPU;
+               printk(" fpu");
+       }
        printk("\n");
+       boot_cpu_data.features = features;
 }
 
 #ifdef CONFIG_PROC_FS
index eeb6679..42657f1 100644 (file)
@@ -100,55 +100,49 @@ dtlb_miss_write:
 
        .global tlb_miss_common
 tlb_miss_common:
-       mfsr    r0, SYSREG_PTBR
-       mfsr    r1, SYSREG_TLBEAR
+       mfsr    r0, SYSREG_TLBEAR
+       mfsr    r1, SYSREG_PTBR
 
        /* Is it the vmalloc space? */
-       bld     r1, 31
+       bld     r0, 31
        brcs    handle_vmalloc_miss
 
        /* First level lookup */
 pgtbl_lookup:
-       lsr     r2, r1, PGDIR_SHIFT
-       ld.w    r0, r0[r2 << 2]
-       bld     r0, _PAGE_BIT_PRESENT
+       lsr     r2, r0, PGDIR_SHIFT
+       ld.w    r3, r1[r2 << 2]
+       bfextu  r1, r0, PAGE_SHIFT, PGDIR_SHIFT - PAGE_SHIFT
+       bld     r3, _PAGE_BIT_PRESENT
        brcc    page_table_not_present
 
-       /* TODO: Check access rights on page table if necessary */
-
        /* Translate to virtual address in P1. */
-       andl    r0, 0xf000
-       sbr     r0, 31
+       andl    r3, 0xf000
+       sbr     r3, 31
 
        /* Second level lookup */
-       lsl     r1, (32 - PGDIR_SHIFT)
-       lsr     r1, (32 - PGDIR_SHIFT) + PAGE_SHIFT
-       add     r2, r0, r1 << 2
-       ld.w    r1, r2[0]
-       bld     r1, _PAGE_BIT_PRESENT
+       ld.w    r2, r3[r1 << 2]
+       mfsr    r0, SYSREG_TLBARLO
+       bld     r2, _PAGE_BIT_PRESENT
        brcc    page_not_present
 
        /* Mark the page as accessed */
-       sbr     r1, _PAGE_BIT_ACCESSED
-       st.w    r2[0], r1
+       sbr     r2, _PAGE_BIT_ACCESSED
+       st.w    r3[r1 << 2], r2
 
        /* Drop software flags */
-       andl    r1, _PAGE_FLAGS_HARDWARE_MASK & 0xffff
-       mtsr    SYSREG_TLBELO, r1
+       andl    r2, _PAGE_FLAGS_HARDWARE_MASK & 0xffff
+       mtsr    SYSREG_TLBELO, r2
 
        /* Figure out which entry we want to replace */
-       mfsr    r0, SYSREG_TLBARLO
+       mfsr    r1, SYSREG_MMUCR
        clz     r2, r0
        brcc    1f
-       mov     r1, -1                  /* All entries have been accessed, */
-       mtsr    SYSREG_TLBARLO, r1      /* so reset TLBAR */
-       mov     r2, 0                   /* and start at 0 */
-1:     mfsr    r1, SYSREG_MMUCR
-       lsl     r2, 14
-       andl    r1, 0x3fff, COH
-       or      r1, r2
-       mtsr    SYSREG_MMUCR, r1
+       mov     r3, -1                  /* All entries have been accessed, */
+       mov     r2, 0                   /* so start at 0 */
+       mtsr    SYSREG_TLBARLO, r3      /* and reset TLBAR */
 
+1:     bfins   r1, r2, SYSREG_DRP_OFFSET, SYSREG_DRP_SIZE
+       mtsr    SYSREG_MMUCR, r1
        tlbw
 
        tlbmiss_restore
@@ -156,8 +150,8 @@ pgtbl_lookup:
 
 handle_vmalloc_miss:
        /* Simply do the lookup in init's page table */
-       mov     r0, lo(swapper_pg_dir)
-       orh     r0, hi(swapper_pg_dir)
+       mov     r1, lo(swapper_pg_dir)
+       orh     r1, hi(swapper_pg_dir)
        rjmp    pgtbl_lookup
 
 
@@ -340,12 +334,34 @@ do_bus_error_read:
 do_nmi_ll:
        sub     sp, 4
        stmts   --sp, r0-lr
-       /* FIXME: Make sure RAR_NMI and RSR_NMI are pushed instead of *_EX */
-       rcall   save_full_context_ex
+       mfsr    r9, SYSREG_RSR_NMI
+       mfsr    r8, SYSREG_RAR_NMI
+       bfextu  r0, r9, MODE_SHIFT, 3
+       brne    2f
+
+1:     pushm   r8, r9  /* PC and SR */
        mfsr    r12, SYSREG_ECR
        mov     r11, sp
        rcall   do_nmi
-       rjmp    bad_return
+       popm    r8-r9
+       mtsr    SYSREG_RAR_NMI, r8
+       tst     r0, r0
+       mtsr    SYSREG_RSR_NMI, r9
+       brne    3f
+
+       ldmts   sp++, r0-lr
+       sub     sp, -4          /* skip r12_orig */
+       rete
+
+2:     sub     r10, sp, -(FRAME_SIZE_FULL - REG_LR)
+       stdsp   sp[4], r10      /* replace saved SP */
+       rjmp    1b
+
+3:     popm    lr
+       sub     sp, -4          /* skip sp */
+       popm    r0-r12
+       sub     sp, -4          /* skip r12_orig */
+       rete
 
 handle_address_fault:
        sub     sp, 4
@@ -630,9 +646,12 @@ irq_level\level:
        rcall   do_IRQ
 
        lddsp   r4, sp[REG_SR]
-       andh    r4, (MODE_MASK >> 16), COH
+       bfextu  r4, r4, SYSREG_M0_OFFSET, 3
+       cp.w    r4, MODE_SUPERVISOR >> SYSREG_M0_OFFSET
+       breq    2f
+       cp.w    r4, MODE_USER >> SYSREG_M0_OFFSET
 #ifdef CONFIG_PREEMPT
-       brne    2f
+       brne    3f
 #else
        brne    1f
 #endif
@@ -649,9 +668,18 @@ irq_level\level:
        sub     sp, -4          /* ignore r12_orig */
        rete
 
+2:     get_thread_info r0
+       ld.w    r1, r0[TI_flags]
+       bld     r1, TIF_CPU_GOING_TO_SLEEP
 #ifdef CONFIG_PREEMPT
-2:
-       get_thread_info r0
+       brcc    3f
+#else
+       brcc    1b
+#endif
+       sub     r1, pc, . - cpu_idle_skip_sleep
+       stdsp   sp[REG_PC], r1
+#ifdef CONFIG_PREEMPT
+3:     get_thread_info r0
        ld.w    r2, r0[TI_preempt_count]
        cp.w    r2, 0
        brne    1b
@@ -662,12 +690,32 @@ irq_level\level:
        bld     r4, SYSREG_GM_OFFSET
        brcs    1b
        rcall   preempt_schedule_irq
-       rjmp    1b
 #endif
+       rjmp    1b
        .endm
 
        .section .irq.text,"ax",@progbits
 
+.global cpu_idle_sleep
+cpu_idle_sleep:
+       mask_interrupts
+       get_thread_info r8
+       ld.w    r9, r8[TI_flags]
+       bld     r9, TIF_NEED_RESCHED
+       brcs    cpu_idle_enable_int_and_exit
+       sbr     r9, TIF_CPU_GOING_TO_SLEEP
+       st.w    r8[TI_flags], r9
+       unmask_interrupts
+       sleep 0
+cpu_idle_skip_sleep:
+       mask_interrupts
+       ld.w    r9, r8[TI_flags]
+       cbr     r9, TIF_CPU_GOING_TO_SLEEP
+       st.w    r8[TI_flags], r9
+cpu_idle_enable_int_and_exit:
+       unmask_interrupts
+       retal   r12
+
        .global irq_level0
        .global irq_level1
        .global irq_level2
index b599eae..1167fe9 100644 (file)
  * published by the Free Software Foundation.
  */
 
-#include <linux/moduleloader.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
+#include <linux/bug.h>
 #include <linux/elf.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleloader.h>
 #include <linux/vmalloc.h>
 
 void *module_alloc(unsigned long size)
@@ -315,10 +316,10 @@ int module_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
        vfree(module->arch.syminfo);
        module->arch.syminfo = NULL;
 
-       return 0;
+       return module_bug_finalize(hdr, sechdrs, module);
 }
 
 void module_arch_cleanup(struct module *module)
 {
-
+       module_bug_cleanup(module);
 }
index 0b43259..4e4181e 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/fs.h>
 #include <linux/ptrace.h>
 #include <linux/reboot.h>
+#include <linux/uaccess.h>
 #include <linux/unistd.h>
 
 #include <asm/sysreg.h>
@@ -19,6 +20,8 @@
 void (*pm_power_off)(void) = NULL;
 EXPORT_SYMBOL(pm_power_off);
 
+extern void cpu_idle_sleep(void);
+
 /*
  * This file handles the architecture-dependent parts of process handling..
  */
@@ -27,9 +30,8 @@ void cpu_idle(void)
 {
        /* endless idle loop with no priority at all */
        while (1) {
-               /* TODO: Enter sleep mode */
                while (!need_resched())
-                       cpu_relax();
+                       cpu_idle_sleep();
                preempt_enable_no_resched();
                schedule();
                preempt_disable();
@@ -114,39 +116,178 @@ void release_thread(struct task_struct *dead_task)
        /* do nothing */
 }
 
+static void dump_mem(const char *str, const char *log_lvl,
+                    unsigned long bottom, unsigned long top)
+{
+       unsigned long p;
+       int i;
+
+       printk("%s%s(0x%08lx to 0x%08lx)\n", log_lvl, str, bottom, top);
+
+       for (p = bottom & ~31; p < top; ) {
+               printk("%s%04lx: ", log_lvl, p & 0xffff);
+
+               for (i = 0; i < 8; i++, p += 4) {
+                       unsigned int val;
+
+                       if (p < bottom || p >= top)
+                               printk("         ");
+                       else {
+                               if (__get_user(val, (unsigned int __user *)p)) {
+                                       printk("\n");
+                                       goto out;
+                               }
+                               printk("%08x ", val);
+                       }
+               }
+               printk("\n");
+       }
+
+out:
+       return;
+}
+
+static inline int valid_stack_ptr(struct thread_info *tinfo, unsigned long p)
+{
+       return (p > (unsigned long)tinfo)
+               && (p < (unsigned long)tinfo + THREAD_SIZE - 3);
+}
+
+#ifdef CONFIG_FRAME_POINTER
+static void show_trace_log_lvl(struct task_struct *tsk, unsigned long *sp,
+                              struct pt_regs *regs, const char *log_lvl)
+{
+       unsigned long lr, fp;
+       struct thread_info *tinfo;
+
+       if (regs)
+               fp = regs->r7;
+       else if (tsk == current)
+               asm("mov %0, r7" : "=r"(fp));
+       else
+               fp = tsk->thread.cpu_context.r7;
+
+       /*
+        * Walk the stack as long as the frame pointer (a) is within
+        * the kernel stack of the task, and (b) it doesn't move
+        * downwards.
+        */
+       tinfo = task_thread_info(tsk);
+       printk("%sCall trace:\n", log_lvl);
+       while (valid_stack_ptr(tinfo, fp)) {
+               unsigned long new_fp;
+
+               lr = *(unsigned long *)fp;
+#ifdef CONFIG_KALLSYMS
+               printk("%s [<%08lx>] ", log_lvl, lr);
+#else
+               printk(" [<%08lx>] ", lr);
+#endif
+               print_symbol("%s\n", lr);
+
+               new_fp = *(unsigned long *)(fp + 4);
+               if (new_fp <= fp)
+                       break;
+               fp = new_fp;
+       }
+       printk("\n");
+}
+#else
+static void show_trace_log_lvl(struct task_struct *tsk, unsigned long *sp,
+                              struct pt_regs *regs, const char *log_lvl)
+{
+       unsigned long addr;
+
+       printk("%sCall trace:\n", log_lvl);
+
+       while (!kstack_end(sp)) {
+               addr = *sp++;
+               if (kernel_text_address(addr)) {
+#ifdef CONFIG_KALLSYMS
+                       printk("%s [<%08lx>] ", log_lvl, addr);
+#else
+                       printk(" [<%08lx>] ", addr);
+#endif
+                       print_symbol("%s\n", addr);
+               }
+       }
+       printk("\n");
+}
+#endif
+
+void show_stack_log_lvl(struct task_struct *tsk, unsigned long sp,
+                       struct pt_regs *regs, const char *log_lvl)
+{
+       struct thread_info *tinfo;
+
+       if (sp == 0) {
+               if (tsk)
+                       sp = tsk->thread.cpu_context.ksp;
+               else
+                       sp = (unsigned long)&tinfo;
+       }
+       if (!tsk)
+               tsk = current;
+
+       tinfo = task_thread_info(tsk);
+
+       if (valid_stack_ptr(tinfo, sp)) {
+               dump_mem("Stack: ", log_lvl, sp,
+                        THREAD_SIZE + (unsigned long)tinfo);
+               show_trace_log_lvl(tsk, (unsigned long *)sp, regs, log_lvl);
+       }
+}
+
+void show_stack(struct task_struct *tsk, unsigned long *stack)
+{
+       show_stack_log_lvl(tsk, (unsigned long)stack, NULL, "");
+}
+
+void dump_stack(void)
+{
+       unsigned long stack;
+
+       show_trace_log_lvl(current, &stack, NULL, "");
+}
+EXPORT_SYMBOL(dump_stack);
+
 static const char *cpu_modes[] = {
        "Application", "Supervisor", "Interrupt level 0", "Interrupt level 1",
        "Interrupt level 2", "Interrupt level 3", "Exception", "NMI"
 };
 
-void show_regs(struct pt_regs *regs)
+void show_regs_log_lvl(struct pt_regs *regs, const char *log_lvl)
 {
        unsigned long sp = regs->sp;
        unsigned long lr = regs->lr;
        unsigned long mode = (regs->sr & MODE_MASK) >> MODE_SHIFT;
 
-       if (!user_mode(regs))
+       if (!user_mode(regs)) {
                sp = (unsigned long)regs + FRAME_SIZE_FULL;
 
-       print_symbol("PC is at %s\n", instruction_pointer(regs));
-       print_symbol("LR is at %s\n", lr);
-       printk("pc : [<%08lx>]    lr : [<%08lx>]    %s\n"
-              "sp : %08lx  r12: %08lx  r11: %08lx\n",
-              instruction_pointer(regs),
-              lr, print_tainted(), sp, regs->r12, regs->r11);
-       printk("r10: %08lx  r9 : %08lx  r8 : %08lx\n",
-              regs->r10, regs->r9, regs->r8);
-       printk("r7 : %08lx  r6 : %08lx  r5 : %08lx  r4 : %08lx\n",
-              regs->r7, regs->r6, regs->r5, regs->r4);
-       printk("r3 : %08lx  r2 : %08lx  r1 : %08lx  r0 : %08lx\n",
-              regs->r3, regs->r2, regs->r1, regs->r0);
-       printk("Flags: %c%c%c%c%c\n",
+               printk("%s", log_lvl);
+               print_symbol("PC is at %s\n", instruction_pointer(regs));
+               printk("%s", log_lvl);
+               print_symbol("LR is at %s\n", lr);
+       }
+
+       printk("%spc : [<%08lx>]    lr : [<%08lx>]    %s\n"
+              "%ssp : %08lx  r12: %08lx  r11: %08lx\n",
+              log_lvl, instruction_pointer(regs), lr, print_tainted(),
+              log_lvl, sp, regs->r12, regs->r11);
+       printk("%sr10: %08lx  r9 : %08lx  r8 : %08lx\n",
+              log_lvl, regs->r10, regs->r9, regs->r8);
+       printk("%sr7 : %08lx  r6 : %08lx  r5 : %08lx  r4 : %08lx\n",
+              log_lvl, regs->r7, regs->r6, regs->r5, regs->r4);
+       printk("%sr3 : %08lx  r2 : %08lx  r1 : %08lx  r0 : %08lx\n",
+              log_lvl, regs->r3, regs->r2, regs->r1, regs->r0);
+       printk("%sFlags: %c%c%c%c%c\n", log_lvl,
               regs->sr & SR_Q ? 'Q' : 'q',
               regs->sr & SR_V ? 'V' : 'v',
               regs->sr & SR_N ? 'N' : 'n',
               regs->sr & SR_Z ? 'Z' : 'z',
               regs->sr & SR_C ? 'C' : 'c');
-       printk("Mode bits: %c%c%c%c%c%c%c%c%c\n",
+       printk("%sMode bits: %c%c%c%c%c%c%c%c%c\n", log_lvl,
               regs->sr & SR_H ? 'H' : 'h',
               regs->sr & SR_R ? 'R' : 'r',
               regs->sr & SR_J ? 'J' : 'j',
@@ -156,9 +297,21 @@ void show_regs(struct pt_regs *regs)
               regs->sr & SR_I1M ? '1' : '.',
               regs->sr & SR_I0M ? '0' : '.',
               regs->sr & SR_GM ? 'G' : 'g');
-       printk("CPU Mode: %s\n", cpu_modes[mode]);
+       printk("%sCPU Mode: %s\n", log_lvl, cpu_modes[mode]);
+       printk("%sProcess: %s [%d] (task: %p thread: %p)\n",
+              log_lvl, current->comm, current->pid, current,
+              task_thread_info(current));
+}
+
+void show_regs(struct pt_regs *regs)
+{
+       unsigned long sp = regs->sp;
+
+       if (!user_mode(regs))
+               sp = (unsigned long)regs + FRAME_SIZE_FULL;
 
-       show_trace(NULL, (unsigned long *)sp, regs);
+       show_regs_log_lvl(regs, "");
+       show_trace_log_lvl(current, (unsigned long *)sp, regs, "");
 }
 EXPORT_SYMBOL(show_regs);
 
index a1a7c3c..b279d66 100644 (file)
@@ -8,12 +8,14 @@
 
 #include <linux/clk.h>
 #include <linux/init.h>
+#include <linux/initrd.h>
 #include <linux/sched.h>
 #include <linux/console.h>
 #include <linux/ioport.h>
 #include <linux/bootmem.h>
 #include <linux/fs.h>
 #include <linux/module.h>
+#include <linux/pfn.h>
 #include <linux/root_dev.h>
 #include <linux/cpu.h>
 #include <linux/kernel.h>
 
 extern int root_mountflags;
 
-/*
- * Bootloader-provided information about physical memory
- */
-struct tag_mem_range *mem_phys;
-struct tag_mem_range *mem_reserved;
-struct tag_mem_range *mem_ramdisk;
-
 /*
  * Initialize loops_per_jiffy as 5000000 (500MIPS).
  * Better make it too large than too small...
@@ -48,48 +43,193 @@ EXPORT_SYMBOL(boot_cpu_data);
 static char __initdata command_line[COMMAND_LINE_SIZE];
 
 /*
- * Should be more than enough, but if you have a _really_ complex
- * setup, you might need to increase the size of this...
+ * Standard memory resources
  */
-static struct tag_mem_range __initdata mem_range_cache[32];
-static unsigned mem_range_next_free;
+static struct resource __initdata kernel_data = {
+       .name   = "Kernel data",
+       .start  = 0,
+       .end    = 0,
+       .flags  = IORESOURCE_MEM,
+};
+static struct resource __initdata kernel_code = {
+       .name   = "Kernel code",
+       .start  = 0,
+       .end    = 0,
+       .flags  = IORESOURCE_MEM,
+       .sibling = &kernel_data,
+};
 
 /*
- * Standard memory resources
+ * Available system RAM and reserved regions as singly linked
+ * lists. These lists are traversed using the sibling pointer in
+ * struct resource and are kept sorted at all times.
  */
-static struct resource mem_res[] = {
-       {
-               .name   = "Kernel code",
-               .start  = 0,
-               .end    = 0,
-               .flags  = IORESOURCE_MEM
-       },
-       {
-               .name   = "Kernel data",
-               .start  = 0,
-               .end    = 0,
-               .flags  = IORESOURCE_MEM,
-       },
-};
+static struct resource *__initdata system_ram;
+static struct resource *__initdata reserved = &kernel_code;
+
+/*
+ * We need to allocate these before the bootmem allocator is up and
+ * running, so we need this "cache". 32 entries are probably enough
+ * for all but the most insanely complex systems.
+ */
+static struct resource __initdata res_cache[32];
+static unsigned int __initdata res_cache_next_free;
+
+static void __init resource_init(void)
+{
+       struct resource *mem, *res;
+       struct resource *new;
+
+       kernel_code.start = __pa(init_mm.start_code);
+
+       for (mem = system_ram; mem; mem = mem->sibling) {
+               new = alloc_bootmem_low(sizeof(struct resource));
+               memcpy(new, mem, sizeof(struct resource));
+
+               new->sibling = NULL;
+               if (request_resource(&iomem_resource, new))
+                       printk(KERN_WARNING "Bad RAM resource %08x-%08x\n",
+                              mem->start, mem->end);
+       }
+
+       for (res = reserved; res; res = res->sibling) {
+               new = alloc_bootmem_low(sizeof(struct resource));
+               memcpy(new, res, sizeof(struct resource));
+
+               new->sibling = NULL;
+               if (insert_resource(&iomem_resource, new))
+                       printk(KERN_WARNING
+                              "Bad reserved resource %s (%08x-%08x)\n",
+                              res->name, res->start, res->end);
+       }
+}
+
+static void __init
+add_physical_memory(resource_size_t start, resource_size_t end)
+{
+       struct resource *new, *next, **pprev;
+
+       for (pprev = &system_ram, next = system_ram; next;
+            pprev = &next->sibling, next = next->sibling) {
+               if (end < next->start)
+                       break;
+               if (start <= next->end) {
+                       printk(KERN_WARNING
+                              "Warning: Physical memory map is broken\n");
+                       printk(KERN_WARNING
+                              "Warning: %08x-%08x overlaps %08x-%08x\n",
+                              start, end, next->start, next->end);
+                       return;
+               }
+       }
+
+       if (res_cache_next_free >= ARRAY_SIZE(res_cache)) {
+               printk(KERN_WARNING
+                      "Warning: Failed to add physical memory %08x-%08x\n",
+                      start, end);
+               return;
+       }
+
+       new = &res_cache[res_cache_next_free++];
+       new->start = start;
+       new->end = end;
+       new->name = "System RAM";
+       new->flags = IORESOURCE_MEM;
+
+       *pprev = new;
+}
+
+static int __init
+add_reserved_region(resource_size_t start, resource_size_t end,
+                   const char *name)
+{
+       struct resource *new, *next, **pprev;
+
+       if (end < start)
+               return -EINVAL;
+
+       if (res_cache_next_free >= ARRAY_SIZE(res_cache))
+               return -ENOMEM;
+
+       for (pprev = &reserved, next = reserved; next;
+            pprev = &next->sibling, next = next->sibling) {
+               if (end < next->start)
+                       break;
+               if (start <= next->end)
+                       return -EBUSY;
+       }
+
+       new = &res_cache[res_cache_next_free++];
+       new->start = start;
+       new->end = end;
+       new->name = name;
+       new->flags = IORESOURCE_MEM;
+
+       *pprev = new;
+
+       return 0;
+}
+
+static unsigned long __init
+find_free_region(const struct resource *mem, resource_size_t size,
+                resource_size_t align)
+{
+       struct resource *res;
+       unsigned long target;
+
+       target = ALIGN(mem->start, align);
+       for (res = reserved; res; res = res->sibling) {
+               if ((target + size) <= res->start)
+                       break;
+               if (target <= res->end)
+                       target = ALIGN(res->end + 1, align);
+       }
+
+       if ((target + size) > (mem->end + 1))
+               return mem->end + 1;
+
+       return target;
+}
+
+static int __init
+alloc_reserved_region(resource_size_t *start, resource_size_t size,
+                     resource_size_t align, const char *name)
+{
+       struct resource *mem;
+       resource_size_t target;
+       int ret;
+
+       for (mem = system_ram; mem; mem = mem->sibling) {
+               target = find_free_region(mem, size, align);
+               if (target <= mem->end) {
+                       ret = add_reserved_region(target, target + size - 1,
+                                                 name);
+                       if (!ret)
+                               *start = target;
+                       return ret;
+               }
+       }
 
-#define kernel_code    mem_res[0]
-#define kernel_data    mem_res[1]
+       return -ENOMEM;
+}
 
 /*
  * Early framebuffer allocation. Works as follows:
  *   - If fbmem_size is zero, nothing will be allocated or reserved.
  *   - If fbmem_start is zero when setup_bootmem() is called,
- *     fbmem_size bytes will be allocated from the bootmem allocator.
+ *     a block of fbmem_size bytes will be reserved before bootmem
+ *     initialization. It will be aligned to the largest page size
+ *     that fbmem_size is a multiple of.
  *   - If fbmem_start is nonzero, an area of size fbmem_size will be
- *     reserved at the physical address fbmem_start if necessary. If
- *     the area isn't in a memory region known to the kernel, it will
- *     be left alone.
+ *     reserved at the physical address fbmem_start if possible. If
+ *     it collides with other reserved memory, a different block of
+ *     same size will be allocated, just as if fbmem_start was zero.
  *
  * Board-specific code may use these variables to set up platform data
  * for the framebuffer driver if fbmem_size is nonzero.
  */
-static unsigned long __initdata fbmem_start;
-static unsigned long __initdata fbmem_size;
+resource_size_t __initdata fbmem_start;
+resource_size_t __initdata fbmem_size;
 
 /*
  * "fbmem=xxx[kKmM]" allocates the specified amount of boot memory for
@@ -103,48 +243,42 @@ static unsigned long __initdata fbmem_size;
  */
 static int __init early_parse_fbmem(char *p)
 {
+       int ret;
+       unsigned long align;
+
        fbmem_size = memparse(p, &p);
-       if (*p == '@')
+       if (*p == '@') {
                fbmem_start = memparse(p, &p);
-       return 0;
-}
-early_param("fbmem", early_parse_fbmem);
-
-static inline void __init resource_init(void)
-{
-       struct tag_mem_range *region;
-
-       kernel_code.start = __pa(init_mm.start_code);
-       kernel_code.end = __pa(init_mm.end_code - 1);
-       kernel_data.start = __pa(init_mm.end_code);
-       kernel_data.end = __pa(init_mm.brk - 1);
-
-       for (region = mem_phys; region; region = region->next) {
-               struct resource *res;
-               unsigned long phys_start, phys_end;
-
-               if (region->size == 0)
-                       continue;
-
-               phys_start = region->addr;
-               phys_end = phys_start + region->size - 1;
-
-               res = alloc_bootmem_low(sizeof(*res));
-               res->name = "System RAM";
-               res->start = phys_start;
-               res->end = phys_end;
-               res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
-
-               request_resource (&iomem_resource, res);
+               ret = add_reserved_region(fbmem_start,
+                                         fbmem_start + fbmem_size - 1,
+                                         "Framebuffer");
+               if (ret) {
+                       printk(KERN_WARNING
+                              "Failed to reserve framebuffer memory\n");
+                       fbmem_start = 0;
+               }
+       }
 
-               if (kernel_code.start >= res->start &&
-                   kernel_code.end <= res->end)
-                       request_resource (res, &kernel_code);
-               if (kernel_data.start >= res->start &&
-                   kernel_data.end <= res->end)
-                       request_resource (res, &kernel_data);
+       if (!fbmem_start) {
+               if ((fbmem_size & 0x000fffffUL) == 0)
+                       align = 0x100000;       /* 1 MiB */
+               else if ((fbmem_size & 0x0000ffffUL) == 0)
+                       align = 0x10000;        /* 64 KiB */
+               else
+                       align = 0x1000;         /* 4 KiB */
+
+               ret = alloc_reserved_region(&fbmem_start, fbmem_size,
+                                           align, "Framebuffer");
+               if (ret) {
+                       printk(KERN_WARNING
+                              "Failed to allocate framebuffer memory\n");
+                       fbmem_size = 0;
+               }
        }
+
+       return 0;
 }
+early_param("fbmem", early_parse_fbmem);
 
 static int __init parse_tag_core(struct tag *tag)
 {
@@ -157,11 +291,9 @@ static int __init parse_tag_core(struct tag *tag)
 }
 __tagtable(ATAG_CORE, parse_tag_core);
 
-static int __init parse_tag_mem_range(struct tag *tag,
-                                     struct tag_mem_range **root)
+static int __init parse_tag_mem(struct tag *tag)
 {
-       struct tag_mem_range *cur, **pprev;
-       struct tag_mem_range *new;
+       unsigned long start, end;
 
        /*
         * Ignore zero-sized entries. If we're running standalone, the
@@ -171,34 +303,53 @@ static int __init parse_tag_mem_range(struct tag *tag,
        if (tag->u.mem_range.size == 0)
                return 0;
 
-       /*
-        * Copy the data so the bootmem init code doesn't need to care
-        * about it.
-        */
-       if (mem_range_next_free >= ARRAY_SIZE(mem_range_cache))
-               panic("Physical memory map too complex!\n");
+       start = tag->u.mem_range.addr;
+       end = tag->u.mem_range.addr + tag->u.mem_range.size - 1;
+
+       add_physical_memory(start, end);
+       return 0;
+}
+__tagtable(ATAG_MEM, parse_tag_mem);
+
+static int __init parse_tag_rdimg(struct tag *tag)
+{
+#ifdef CONFIG_INITRD
+       struct tag_mem_range *mem = &tag->u.mem_range;
+       int ret;
 
-       new = &mem_range_cache[mem_range_next_free++];
-       *new = tag->u.mem_range;
+       if (initrd_start) {
+               printk(KERN_WARNING
+                      "Warning: Only the first initrd image will be used\n");
+               return 0;
+       }
 
-       pprev = root;
-       cur = *root;
-       while (cur) {
-               pprev = &cur->next;
-               cur = cur->next;
+       ret = add_reserved_region(mem->start, mem->start + mem->size - 1,
+                                 "initrd");
+       if (ret) {
+               printk(KERN_WARNING
+                      "Warning: Failed to reserve initrd memory\n");
+               return ret;
        }
 
-       *pprev = new;
-       new->next = NULL;
+       initrd_start = (unsigned long)__va(mem->addr);
+       initrd_end = initrd_start + mem->size;
+#else
+       printk(KERN_WARNING "RAM disk image present, but "
+              "no initrd support in kernel, ignoring\n");
+#endif
 
        return 0;
 }
+__tagtable(ATAG_RDIMG, parse_tag_rdimg);
 
-static int __init parse_tag_mem(struct tag *tag)
+static int __init parse_tag_rsvd_mem(struct tag *tag)
 {
-       return parse_tag_mem_range(tag, &mem_phys);
+       struct tag_mem_range *mem = &tag->u.mem_range;
+
+       return add_reserved_region(mem->addr, mem->addr + mem->size - 1,
+                                  "Reserved");
 }
-__tagtable(ATAG_MEM, parse_tag_mem);
+__tagtable(ATAG_RSVD_MEM, parse_tag_rsvd_mem);
 
 static int __init parse_tag_cmdline(struct tag *tag)
 {
@@ -207,12 +358,6 @@ static int __init parse_tag_cmdline(struct tag *tag)
 }
 __tagtable(ATAG_CMDLINE, parse_tag_cmdline);
 
-static int __init parse_tag_rdimg(struct tag *tag)
-{
-       return parse_tag_mem_range(tag, &mem_ramdisk);
-}
-__tagtable(ATAG_RDIMG, parse_tag_rdimg);
-
 static int __init parse_tag_clock(struct tag *tag)
 {
        /*
@@ -223,12 +368,6 @@ static int __init parse_tag_clock(struct tag *tag)
 }
 __tagtable(ATAG_CLOCK, parse_tag_clock);
 
-static int __init parse_tag_rsvd_mem(struct tag *tag)
-{
-       return parse_tag_mem_range(tag, &mem_reserved);
-}
-__tagtable(ATAG_RSVD_MEM, parse_tag_rsvd_mem);
-
 /*
  * Scan the tag table for this tag, and call its parse function. The
  * tag table is built by the linker from all the __tagtable
@@ -260,10 +399,137 @@ static void __init parse_tags(struct tag *t)
                               t->hdr.tag);
 }
 
+/*
+ * Find a free memory region large enough for storing the
+ * bootmem bitmap.
+ */
+static unsigned long __init
+find_bootmap_pfn(const struct resource *mem)
+{
+       unsigned long bootmap_pages, bootmap_len;
+       unsigned long node_pages = PFN_UP(mem->end - mem->start + 1);
+       unsigned long bootmap_start;
+
+       bootmap_pages = bootmem_bootmap_pages(node_pages);
+       bootmap_len = bootmap_pages << PAGE_SHIFT;
+
+       /*
+        * Find a large enough region without reserved pages for
+        * storing the bootmem bitmap. We can take advantage of the
+        * fact that all lists have been sorted.
+        *
+        * We have to check that we don't collide with any reserved
+        * regions, which includes the kernel image and any RAMDISK
+        * images.
+        */
+       bootmap_start = find_free_region(mem, bootmap_len, PAGE_SIZE);
+
+       return bootmap_start >> PAGE_SHIFT;
+}
+
+#define MAX_LOWMEM     HIGHMEM_START
+#define MAX_LOWMEM_PFN PFN_DOWN(MAX_LOWMEM)
+
+static void __init setup_bootmem(void)
+{
+       unsigned bootmap_size;
+       unsigned long first_pfn, bootmap_pfn, pages;
+       unsigned long max_pfn, max_low_pfn;
+       unsigned node = 0;
+       struct resource *res;
+
+       printk(KERN_INFO "Physical memory:\n");
+       for (res = system_ram; res; res = res->sibling)
+               printk("  %08x-%08x\n", res->start, res->end);
+       printk(KERN_INFO "Reserved memory:\n");
+       for (res = reserved; res; res = res->sibling)
+               printk("  %08x-%08x: %s\n",
+                      res->start, res->end, res->name);
+
+       nodes_clear(node_online_map);
+
+       if (system_ram->sibling)
+               printk(KERN_WARNING "Only using first memory bank\n");
+
+       for (res = system_ram; res; res = NULL) {
+               first_pfn = PFN_UP(res->start);
+               max_low_pfn = max_pfn = PFN_DOWN(res->end + 1);
+               bootmap_pfn = find_bootmap_pfn(res);
+               if (bootmap_pfn > max_pfn)
+                       panic("No space for bootmem bitmap!\n");
+
+               if (max_low_pfn > MAX_LOWMEM_PFN) {
+                       max_low_pfn = MAX_LOWMEM_PFN;
+#ifndef CONFIG_HIGHMEM
+                       /*
+                        * Lowmem is memory that can be addressed
+                        * directly through P1/P2
+                        */
+                       printk(KERN_WARNING
+                              "Node %u: Only %ld MiB of memory will be used.\n",
+                              node, MAX_LOWMEM >> 20);
+                       printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
+#else
+#error HIGHMEM is not supported by AVR32 yet
+#endif
+               }
+
+               /* Initialize the boot-time allocator with low memory only. */
+               bootmap_size = init_bootmem_node(NODE_DATA(node), bootmap_pfn,
+                                                first_pfn, max_low_pfn);
+
+               /*
+                * Register fully available RAM pages with the bootmem
+                * allocator.
+                */
+               pages = max_low_pfn - first_pfn;
+               free_bootmem_node (NODE_DATA(node), PFN_PHYS(first_pfn),
+                                  PFN_PHYS(pages));
+
+               /* Reserve space for the bootmem bitmap... */
+               reserve_bootmem_node(NODE_DATA(node),
+                                    PFN_PHYS(bootmap_pfn),
+                                    bootmap_size);
+
+               /* ...and any other reserved regions. */
+               for (res = reserved; res; res = res->sibling) {
+                       if (res->start > PFN_PHYS(max_pfn))
+                               break;
+
+                       /*
+                        * resource_init will complain about partial
+                        * overlaps, so we'll just ignore such
+                        * resources for now.
+                        */
+                       if (res->start >= PFN_PHYS(first_pfn)
+                           && res->end < PFN_PHYS(max_pfn))
+                               reserve_bootmem_node(
+                                       NODE_DATA(node), res->start,
+                                       res->end - res->start + 1);
+               }
+
+               node_set_online(node);
+       }
+}
+
 void __init setup_arch (char **cmdline_p)
 {
        struct clk *cpu_clk;
 
+       init_mm.start_code = (unsigned long)_text;
+       init_mm.end_code = (unsigned long)_etext;
+       init_mm.end_data = (unsigned long)_edata;
+       init_mm.brk = (unsigned long)_end;
+
+       /*
+        * Include .init section to make allocations easier. It will
+        * be removed before the resource is actually requested.
+        */
+       kernel_code.start = __pa(__init_begin);
+       kernel_code.end = __pa(init_mm.end_code - 1);
+       kernel_data.start = __pa(init_mm.end_code);
+       kernel_data.end = __pa(init_mm.brk - 1);
+
        parse_tags(bootloader_tags);
 
        setup_processor();
@@ -289,24 +555,16 @@ void __init setup_arch (char **cmdline_p)
                       ((cpu_hz + 500) / 1000) % 1000);
        }
 
-       init_mm.start_code = (unsigned long) &_text;
-       init_mm.end_code = (unsigned long) &_etext;
-       init_mm.end_data = (unsigned long) &_edata;
-       init_mm.brk = (unsigned long) &_end;
-
        strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
        *cmdline_p = command_line;
        parse_early_param();
 
        setup_bootmem();
 
-       board_setup_fbmem(fbmem_start, fbmem_size);
-
 #ifdef CONFIG_VT
        conswitchp = &dummy_con;
 #endif
 
        paging_init();
-
        resource_init();
 }
index c10833f..7014a35 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2004-2006 Atmel Corporation
+ * Copyright (C) 2004-2007 Atmel Corporation
  *
  * Based on MIPS implementation arch/mips/kernel/time.c
  *   Copyright 2001 MontaVista Software Inc.
 #include <linux/init.h>
 #include <linux/profile.h>
 #include <linux/sysdev.h>
+#include <linux/err.h>
 
 #include <asm/div64.h>
 #include <asm/sysreg.h>
 #include <asm/io.h>
 #include <asm/sections.h>
 
-static cycle_t read_cycle_count(void)
+/* how many counter cycles in a jiffy? */
+static u32 cycles_per_jiffy;
+
+/* the count value for the next timer interrupt */
+static u32 expirelo;
+
+cycle_t __weak read_cycle_count(void)
 {
        return (cycle_t)sysreg_read(COUNT);
 }
 
-static struct clocksource clocksource_avr32 = {
+struct clocksource __weak clocksource_avr32 = {
        .name           = "avr32",
        .rating         = 350,
        .read           = read_cycle_count,
@@ -40,12 +47,20 @@ static struct clocksource clocksource_avr32 = {
        .flags          = CLOCK_SOURCE_IS_CONTINUOUS,
 };
 
+irqreturn_t __weak timer_interrupt(int irq, void *dev_id);
+
+struct irqaction timer_irqaction = {
+       .handler        = timer_interrupt,
+       .flags          = IRQF_DISABLED,
+       .name           = "timer",
+};
+
 /*
  * By default we provide the null RTC ops
  */
 static unsigned long null_rtc_get_time(void)
 {
-       return mktime(2004, 1, 1, 0, 0, 0);
+       return mktime(2007, 1, 1, 0, 0, 0);
 }
 
 static int null_rtc_set_time(unsigned long sec)
@@ -56,23 +71,14 @@ static int null_rtc_set_time(unsigned long sec)
 static unsigned long (*rtc_get_time)(void) = null_rtc_get_time;
 static int (*rtc_set_time)(unsigned long) = null_rtc_set_time;
 
-/* how many counter cycles in a jiffy? */
-static unsigned long cycles_per_jiffy;
-
-/* cycle counter value at the previous timer interrupt */
-static unsigned int timerhi, timerlo;
-
-/* the count value for the next timer interrupt */
-static unsigned int expirelo;
-
 static void avr32_timer_ack(void)
 {
-       unsigned int count;
+       u32 count;
 
        /* Ack this timer interrupt and set the next one */
        expirelo += cycles_per_jiffy;
+       /* setting COMPARE to 0 stops the COUNT-COMPARE */
        if (expirelo == 0) {
-               printk(KERN_DEBUG "expirelo == 0\n");
                sysreg_write(COMPARE, expirelo + 1);
        } else {
                sysreg_write(COMPARE, expirelo);
@@ -86,27 +92,56 @@ static void avr32_timer_ack(void)
        }
 }
 
-static unsigned int avr32_hpt_read(void)
+int __weak avr32_hpt_init(void)
 {
-       return sysreg_read(COUNT);
+       int ret;
+       unsigned long mult, shift, count_hz;
+
+       count_hz = clk_get_rate(boot_cpu_data.clk);
+       shift = clocksource_avr32.shift;
+       mult = clocksource_hz2mult(count_hz, shift);
+       clocksource_avr32.mult = mult;
+
+       {
+               u64 tmp;
+
+               tmp = TICK_NSEC;
+               tmp <<= shift;
+               tmp += mult / 2;
+               do_div(tmp, mult);
+
+               cycles_per_jiffy = tmp;
+       }
+
+       ret = setup_irq(0, &timer_irqaction);
+       if (ret) {
+               pr_debug("timer: could not request IRQ 0: %d\n", ret);
+               return -ENODEV;
+       }
+
+       printk(KERN_INFO "timer: AT32AP COUNT-COMPARE at irq 0, "
+                       "%lu.%03lu MHz\n",
+                       ((count_hz + 500) / 1000) / 1000,
+                       ((count_hz + 500) / 1000) % 1000);
+
+       return 0;
 }
 
 /*
  * Taken from MIPS c0_hpt_timer_init().
  *
- * Why is it so complicated, and what is "count"?  My assumption is
- * that `count' specifies the "reference cycle", i.e. the cycle since
- * reset that should mean "zero". The reason COUNT is written twice is
- * probably to make sure we don't get any timer interrupts while we
- * are messing with the counter.
+ * The reason COUNT is written twice is probably to make sure we don't get any
+ * timer interrupts while we are messing with the counter.
  */
-static void avr32_hpt_init(unsigned int count)
+int __weak avr32_hpt_start(void)
 {
-       count = sysreg_read(COUNT) - count;
+       u32 count = sysreg_read(COUNT);
        expirelo = (count / cycles_per_jiffy + 1) * cycles_per_jiffy;
        sysreg_write(COUNT, expirelo - cycles_per_jiffy);
        sysreg_write(COMPARE, expirelo);
        sysreg_write(COUNT, count);
+
+       return 0;
 }
 
 /*
@@ -115,26 +150,18 @@ static void avr32_hpt_init(unsigned int count)
  *
  * In UP mode, it is invoked from the (global) timer_interrupt.
  */
-static void local_timer_interrupt(int irq, void *dev_id)
+void local_timer_interrupt(int irq, void *dev_id)
 {
        if (current->pid)
                profile_tick(CPU_PROFILING);
        update_process_times(user_mode(get_irq_regs()));
 }
 
-static irqreturn_t
-timer_interrupt(int irq, void *dev_id)
+irqreturn_t __weak timer_interrupt(int irq, void *dev_id)
 {
-       unsigned int count;
-
        /* ack timer interrupt and try to set next interrupt */
-       count = avr32_hpt_read();
        avr32_timer_ack();
 
-       /* Update timerhi/timerlo for intra-jiffy calibration */
-       timerhi += count < timerlo;     /* Wrap around */
-       timerlo = count;
-
        /*
         * Call the generic timer interrupt handler
         */
@@ -153,60 +180,37 @@ timer_interrupt(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
-static struct irqaction timer_irqaction = {
-       .handler        = timer_interrupt,
-       .flags          = IRQF_DISABLED,
-       .name           = "timer",
-};
-
 void __init time_init(void)
 {
-       unsigned long mult, shift, count_hz;
        int ret;
 
+       /*
+        * Make sure we don't get any COMPARE interrupts before we can
+        * handle them.
+        */
+       sysreg_write(COMPARE, 0);
+
        xtime.tv_sec = rtc_get_time();
        xtime.tv_nsec = 0;
 
        set_normalized_timespec(&wall_to_monotonic,
                                -xtime.tv_sec, -xtime.tv_nsec);
 
-       printk("Before time_init: count=%08lx, compare=%08lx\n",
-              (unsigned long)sysreg_read(COUNT),
-              (unsigned long)sysreg_read(COMPARE));
-
-       count_hz = clk_get_rate(boot_cpu_data.clk);
-       shift = clocksource_avr32.shift;
-       mult = clocksource_hz2mult(count_hz, shift);
-       clocksource_avr32.mult = mult;
-
-       printk("Cycle counter: mult=%lu, shift=%lu\n", mult, shift);
-
-       {
-               u64 tmp;
-
-               tmp = TICK_NSEC;
-               tmp <<= shift;
-               tmp += mult / 2;
-               do_div(tmp, mult);
-
-               cycles_per_jiffy = tmp;
+       ret = avr32_hpt_init();
+       if (ret) {
+               pr_debug("timer: failed setup: %d\n", ret);
+               return;
        }
 
-       /* This sets up the high precision timer for the first interrupt. */
-       avr32_hpt_init(avr32_hpt_read());
-
-       printk("After time_init: count=%08lx, compare=%08lx\n",
-              (unsigned long)sysreg_read(COUNT),
-              (unsigned long)sysreg_read(COMPARE));
-
        ret = clocksource_register(&clocksource_avr32);
        if (ret)
-               printk(KERN_ERR
-                      "timer: could not register clocksource: %d\n", ret);
+               pr_debug("timer: could not register clocksource: %d\n", ret);
 
-       ret = setup_irq(0, &timer_irqaction);
-       if (ret)
-               printk("timer: could not request IRQ 0: %d\n", ret);
+       ret = avr32_hpt_start();
+       if (ret) {
+               pr_debug("timer: failed starting: %d\n", ret);
+               return;
+       }
 }
 
 static struct sysdev_class timer_class = {
index adc01a1..4f0382d 100644 (file)
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  */
-#undef DEBUG
-#include <linux/sched.h>
+
+#include <linux/bug.h>
 #include <linux/init.h>
-#include <linux/module.h>
 #include <linux/kallsyms.h>
+#include <linux/module.h>
 #include <linux/notifier.h>
+#include <linux/sched.h>
+#include <linux/uaccess.h>
 
-#include <asm/traps.h>
-#include <asm/sysreg.h>
 #include <asm/addrspace.h>
-#include <asm/ocd.h>
 #include <asm/mmu_context.h>
-#include <asm/uaccess.h>
-
-static void dump_mem(const char *str, unsigned long bottom, unsigned long top)
-{
-       unsigned long p;
-       int i;
-
-       printk("%s(0x%08lx to 0x%08lx)\n", str, bottom, top);
-
-       for (p = bottom & ~31; p < top; ) {
-               printk("%04lx: ", p & 0xffff);
-
-               for (i = 0; i < 8; i++, p += 4) {
-                       unsigned int val;
-
-                       if (p < bottom || p >= top)
-                               printk("         ");
-                       else {
-                               if (__get_user(val, (unsigned int __user *)p)) {
-                                       printk("\n");
-                                       goto out;
-                               }
-                               printk("%08x ", val);
-                       }
-               }
-               printk("\n");
-       }
-
-out:
-       return;
-}
-
-static inline int valid_stack_ptr(struct thread_info *tinfo, unsigned long p)
-{
-       return (p > (unsigned long)tinfo)
-               && (p < (unsigned long)tinfo + THREAD_SIZE - 3);
-}
-
-#ifdef CONFIG_FRAME_POINTER
-static inline void __show_trace(struct task_struct *tsk, unsigned long *sp,
-                               struct pt_regs *regs)
-{
-       unsigned long lr, fp;
-       struct thread_info *tinfo;
-
-       tinfo = (struct thread_info *)
-               ((unsigned long)sp & ~(THREAD_SIZE - 1));
-
-       if (regs)
-               fp = regs->r7;
-       else if (tsk == current)
-               asm("mov %0, r7" : "=r"(fp));
-       else
-               fp = tsk->thread.cpu_context.r7;
-
-       /*
-        * Walk the stack as long as the frame pointer (a) is within
-        * the kernel stack of the task, and (b) it doesn't move
-        * downwards.
-        */
-       while (valid_stack_ptr(tinfo, fp)) {
-               unsigned long new_fp;
-
-               lr = *(unsigned long *)fp;
-               printk(" [<%08lx>] ", lr);
-               print_symbol("%s\n", lr);
-
-               new_fp = *(unsigned long *)(fp + 4);
-               if (new_fp <= fp)
-                       break;
-               fp = new_fp;
-       }
-       printk("\n");
-}
-#else
-static inline void __show_trace(struct task_struct *tsk, unsigned long *sp,
-                               struct pt_regs *regs)
-{
-       unsigned long addr;
-
-       while (!kstack_end(sp)) {
-               addr = *sp++;
-               if (kernel_text_address(addr)) {
-                       printk(" [<%08lx>] ", addr);
-                       print_symbol("%s\n", addr);
-               }
-       }
-}
-#endif
-
-void show_trace(struct task_struct *tsk, unsigned long *sp,
-                      struct pt_regs *regs)
-{
-       if (regs &&
-           (((regs->sr & MODE_MASK) == MODE_EXCEPTION) ||
-            ((regs->sr & MODE_MASK) == MODE_USER)))
-               return;
-
-       printk ("Call trace:");
-#ifdef CONFIG_KALLSYMS
-       printk("\n");
-#endif
-
-       __show_trace(tsk, sp, regs);
-       printk("\n");
-}
-
-void show_stack(struct task_struct *tsk, unsigned long *sp)
-{
-       unsigned long stack;
-
-       if (!tsk)
-               tsk = current;
-       if (sp == 0) {
-               if (tsk == current) {
-                       register unsigned long *real_sp __asm__("sp");
-                       sp = real_sp;
-               } else {
-                       sp = (unsigned long *)tsk->thread.cpu_context.ksp;
-               }
-       }
-
-       stack = (unsigned long)sp;
-       dump_mem("Stack: ", stack,
-                THREAD_SIZE + (unsigned long)tsk->thread_info);
-       show_trace(tsk, sp, NULL);
-}
-
-void dump_stack(void)
-{
-       show_stack(NULL, NULL);
-}
-EXPORT_SYMBOL(dump_stack);
+#include <asm/ocd.h>
+#include <asm/sysreg.h>
+#include <asm/traps.h>
 
 ATOMIC_NOTIFIER_HEAD(avr32_die_chain);
 
 int register_die_notifier(struct notifier_block *nb)
 {
-       pr_debug("register_die_notifier: %p\n", nb);
-
        return atomic_notifier_chain_register(&avr32_die_chain, nb);
 }
 EXPORT_SYMBOL(register_die_notifier);
@@ -169,93 +36,103 @@ EXPORT_SYMBOL(unregister_die_notifier);
 
 static DEFINE_SPINLOCK(die_lock);
 
-void __die(const char *str, struct pt_regs *regs, unsigned long err,
-          const char *file, const char *func, unsigned long line)
+void NORET_TYPE die(const char *str, struct pt_regs *regs, long err)
 {
-       struct task_struct *tsk = current;
        static int die_counter;
 
        console_verbose();
        spin_lock_irq(&die_lock);
        bust_spinlocks(1);
 
-       printk(KERN_ALERT "%s", str);
-       if (file && func)
-               printk(" in %s:%s, line %ld", file, func, line);
-       printk("[#%d]:\n", ++die_counter);
-       print_modules();
-       show_regs(regs);
-       printk("Process %s (pid: %d, stack limit = 0x%p)\n",
-              tsk->comm, tsk->pid, tsk->thread_info + 1);
-
-       if (!user_mode(regs) || in_interrupt()) {
-               dump_mem("Stack: ", regs->sp,
-                        THREAD_SIZE + (unsigned long)tsk->thread_info);
+       printk(KERN_ALERT "Oops: %s, sig: %ld [#%d]\n" KERN_EMERG,
+              str, err, ++die_counter);
+#ifdef CONFIG_PREEMPT
+       printk("PREEMPT ");
+#endif
+#ifdef CONFIG_FRAME_POINTER
+       printk("FRAME_POINTER ");
+#endif
+       if (current_cpu_data.features & AVR32_FEATURE_OCD) {
+               unsigned long did = __mfdr(DBGREG_DID);
+               printk("chip: 0x%03lx:0x%04lx rev %lu\n",
+                      (did >> 1) & 0x7ff,
+                      (did >> 12) & 0x7fff,
+                      (did >> 28) & 0xf);
+       } else {
+               printk("cpu: arch %u r%u / core %u r%u\n",
+                      current_cpu_data.arch_type,
+                      current_cpu_data.arch_revision,
+                      current_cpu_data.cpu_type,
+                      current_cpu_data.cpu_revision);
        }
 
+       print_modules();
+       show_regs_log_lvl(regs, KERN_EMERG);
+       show_stack_log_lvl(current, regs->sp, regs, KERN_EMERG);
        bust_spinlocks(0);
        spin_unlock_irq(&die_lock);
-       do_exit(SIGSEGV);
+
+       if (in_interrupt())
+               panic("Fatal exception in interrupt");
+
+       if (panic_on_oops)
+               panic("Fatal exception");
+
+       do_exit(err);
 }
 
-void __die_if_kernel(const char *str, struct pt_regs *regs, unsigned long err,
-                    const char *file, const char *func, unsigned long line)
+void _exception(long signr, struct pt_regs *regs, int code,
+               unsigned long addr)
 {
+       siginfo_t info;
+
        if (!user_mode(regs))
-               __die(str, regs, err, file, func, line);
-}
+               die("Unhandled exception in kernel mode", regs, signr);
+
+       memset(&info, 0, sizeof(info));
+       info.si_signo = signr;
+       info.si_code = code;
+       info.si_addr = (void __user *)addr;
+       force_sig_info(signr, &info, current);
 
-asmlinkage void do_nmi(unsigned long ecr, struct pt_regs *regs)
-{
-#ifdef CONFIG_SUBARCH_AVR32B
        /*
-        * The exception entry always saves RSR_EX. For NMI, this is
-        * wrong; it should be RSR_NMI
+        * Init gets no signals that it doesn't have a handler for.
+        * That's all very well, but if it has caused a synchronous
+        * exception and we ignore the resulting signal, it will just
+        * generate the same exception over and over again and we get
+        * nowhere.  Better to kill it and let the kernel panic.
         */
-       regs->sr = sysreg_read(RSR_NMI);
-#endif
+       if (is_init(current)) {
+               __sighandler_t handler;
+
+               spin_lock_irq(&current->sighand->siglock);
+               handler = current->sighand->action[signr-1].sa.sa_handler;
+               spin_unlock_irq(&current->sighand->siglock);
+               if (handler == SIG_DFL) {
+                       /* init has generated a synchronous exception
+                          and it doesn't have a handler for the signal */
+                       printk(KERN_CRIT "init has generated signal %ld "
+                              "but has no handler for it\n", signr);
+                       do_exit(signr);
+               }
+       }
+}
 
-       printk("NMI taken!!!!\n");
-       die("NMI", regs, ecr);
-       BUG();
+asmlinkage void do_nmi(unsigned long ecr, struct pt_regs *regs)
+{
+       printk(KERN_ALERT "Got Non-Maskable Interrupt, dumping regs\n");
+       show_regs_log_lvl(regs, KERN_ALERT);
+       show_stack_log_lvl(current, regs->sp, regs, KERN_ALERT);
 }
 
 asmlinkage void do_critical_exception(unsigned long ecr, struct pt_regs *regs)
 {
-       printk("Unable to handle critical exception %lu at pc = %08lx!\n",
-              ecr, regs->pc);
-       die("Oops", regs, ecr);
-       BUG();
+       die("Critical exception", regs, SIGKILL);
 }
 
 asmlinkage void do_address_exception(unsigned long ecr, struct pt_regs *regs)
 {
-       siginfo_t info;
-
-       die_if_kernel("Oops: Address exception in kernel mode", regs, ecr);
-
-#ifdef DEBUG
-       if (ecr == ECR_ADDR_ALIGN_X)
-               pr_debug("Instruction Address Exception at pc = %08lx\n",
-                        regs->pc);
-       else if (ecr == ECR_ADDR_ALIGN_R)
-               pr_debug("Data Address Exception (Read) at pc = %08lx\n",
-                        regs->pc);
-       else if (ecr == ECR_ADDR_ALIGN_W)
-               pr_debug("Data Address Exception (Write) at pc = %08lx\n",
-                        regs->pc);
-       else
-               BUG();
-
-       show_regs(regs);
-#endif
-
-       info.si_signo = SIGBUS;
-       info.si_errno = 0;
-       info.si_code = BUS_ADRALN;
-       info.si_addr = (void __user *)regs->pc;
-
-       force_sig_info(SIGBUS, &info, current);
+       _exception(SIGBUS, regs, BUS_ADRALN, regs->pc);
 }
 
 /* This way of handling undefined instructions is stolen from ARM */
@@ -280,7 +157,8 @@ static int do_cop_absent(u32 insn)
 {
        int cop_nr;
        u32 cpucr;
-       if ( (insn & 0xfdf00000) == 0xf1900000 )
+
+       if ((insn & 0xfdf00000) == 0xf1900000)
                /* LDC0 */
                cop_nr = 0;
        else
@@ -292,136 +170,91 @@ static int do_cop_absent(u32 insn)
        sysreg_write(CPUCR, cpucr);
 
        cpucr = sysreg_read(CPUCR);
-       if ( !(cpucr & (1 << (24 + cop_nr))) ){
-               printk("Coprocessor #%i not found!\n", cop_nr);
-               return -1;
-       }
+       if (!(cpucr & (1 << (24 + cop_nr))))
+               return -ENODEV;
 
        return 0;
 }
 
-#ifdef CONFIG_BUG
-#ifdef CONFIG_DEBUG_BUGVERBOSE
-static inline void do_bug_verbose(struct pt_regs *regs, u32 insn)
-{
-       char *file;
-       u16 line;
-       char c;
-
-       if (__get_user(line, (u16 __user *)(regs->pc + 2)))
-               return;
-       if (__get_user(file, (char * __user *)(regs->pc + 4))
-           || (unsigned long)file < PAGE_OFFSET
-           || __get_user(c, file))
-               file = "<bad filename>";
-
-       printk(KERN_ALERT "kernel BUG at %s:%d!\n", file, line);
-}
-#else
-static inline void do_bug_verbose(struct pt_regs *regs, u32 insn)
+int is_valid_bugaddr(unsigned long pc)
 {
+       unsigned short opcode;
+
+       if (pc < PAGE_OFFSET)
+               return 0;
+       if (probe_kernel_address((u16 *)pc, opcode))
+               return 0;
 
+       return opcode == AVR32_BUG_OPCODE;
 }
-#endif
-#endif
 
 asmlinkage void do_illegal_opcode(unsigned long ecr, struct pt_regs *regs)
 {
        u32 insn;
        struct undef_hook *hook;
-       siginfo_t info;
        void __user *pc;
+       long code;
 
-       if (!user_mode(regs))
-               goto kernel_trap;
+       if (!user_mode(regs) && (ecr == ECR_ILLEGAL_OPCODE)) {
+               enum bug_trap_type type;
+
+               type = report_bug(regs->pc);
+               switch (type) {
+               case BUG_TRAP_TYPE_NONE:
+                       break;
+               case BUG_TRAP_TYPE_WARN:
+                       regs->pc += 2;
+                       return;
+               case BUG_TRAP_TYPE_BUG:
+                       die("Kernel BUG", regs, SIGKILL);
+               }
+       }
 
        local_irq_enable();
 
-       pc = (void __user *)instruction_pointer(regs);
-       if (__get_user(insn, (u32 __user *)pc))
-               goto invalid_area;
+       if (user_mode(regs)) {
+               pc = (void __user *)instruction_pointer(regs);
+               if (get_user(insn, (u32 __user *)pc))
+                       goto invalid_area;
 
-        if (ecr == ECR_COPROC_ABSENT) {
-               if (do_cop_absent(insn) == 0)
+               if (ecr == ECR_COPROC_ABSENT && !do_cop_absent(insn))
                        return;
-        }
 
-       spin_lock_irq(&undef_lock);
-       list_for_each_entry(hook, &undef_hook, node) {
-               if ((insn & hook->insn_mask) == hook->insn_val) {
-                       if (hook->fn(regs, insn) == 0) {
-                               spin_unlock_irq(&undef_lock);
-                               return;
+               spin_lock_irq(&undef_lock);
+               list_for_each_entry(hook, &undef_hook, node) {
+                       if ((insn & hook->insn_mask) == hook->insn_val) {
+                               if (hook->fn(regs, insn) == 0) {
+                                       spin_unlock_irq(&undef_lock);
+                                       return;
+                               }
                        }
                }
+               spin_unlock_irq(&undef_lock);
        }
-       spin_unlock_irq(&undef_lock);
-
-invalid_area:
 
-#ifdef DEBUG
-       printk("Illegal instruction at pc = %08lx\n", regs->pc);
-       if (regs->pc < TASK_SIZE) {
-               unsigned long ptbr, pgd, pte, *p;
-
-               ptbr = sysreg_read(PTBR);
-               p = (unsigned long *)ptbr;
-               pgd = p[regs->pc >> 22];
-               p = (unsigned long *)((pgd & 0x1ffff000) | 0x80000000);
-               pte = p[(regs->pc >> 12) & 0x3ff];
-               printk("page table: 0x%08lx -> 0x%08lx -> 0x%08lx\n", ptbr, pgd, pte);
-       }
-#endif
-
-       info.si_signo = SIGILL;
-       info.si_errno = 0;
-       info.si_addr = (void __user *)regs->pc;
        switch (ecr) {
-       case ECR_ILLEGAL_OPCODE:
-       case ECR_UNIMPL_INSTRUCTION:
-               info.si_code = ILL_ILLOPC;
-               break;
        case ECR_PRIVILEGE_VIOLATION:
-               info.si_code = ILL_PRVOPC;
+               code = ILL_PRVOPC;
                break;
        case ECR_COPROC_ABSENT:
-               info.si_code = ILL_COPROC;
+               code = ILL_COPROC;
                break;
        default:
-               BUG();
+               code = ILL_ILLOPC;
+               break;
        }
 
-       force_sig_info(SIGILL, &info, current);
+       _exception(SIGILL, regs, code, regs->pc);
        return;
 
-kernel_trap:
-#ifdef CONFIG_BUG
-       if (__kernel_text_address(instruction_pointer(regs))) {
-               insn = *(u16 *)instruction_pointer(regs);
-               if (insn == AVR32_BUG_OPCODE) {
-                       do_bug_verbose(regs, insn);
-                       die("Kernel BUG", regs, 0);
-                       return;
-               }
-       }
-#endif
-
-       die("Oops: Illegal instruction in kernel code", regs, ecr);
+invalid_area:
+       _exception(SIGSEGV, regs, SEGV_MAPERR, regs->pc);
 }
 
 asmlinkage void do_fpe(unsigned long ecr, struct pt_regs *regs)
 {
-       siginfo_t info;
-
-       printk("Floating-point exception at pc = %08lx\n", regs->pc);
-
-       /* We have no FPU... */
-       info.si_signo = SIGILL;
-       info.si_errno = 0;
-       info.si_addr = (void __user *)regs->pc;
-       info.si_code = ILL_COPROC;
-
-       force_sig_info(SIGILL, &info, current);
+       /* We have no FPU yet */
+       _exception(SIGILL, regs, ILL_COPROC, regs->pc);
 }
 
 
index ef13b7c..7ad20cf 100644 (file)
@@ -26,6 +26,12 @@ SECTIONS
                        _sinittext = .;
                        *(.text.reset)
                        *(.init.text)
+                       /*
+                        * .exit.text is discarded at runtime, not
+                        * link time, to deal with references from
+                        * __bug_table
+                        */
+                       *(.exit.text)
                        _einittext = .;
                . = ALIGN(4);
                __tagtable_begin = .;
@@ -86,6 +92,8 @@ SECTIONS
                __stop___ex_table = .;
        }
 
+       BUG_TABLE
+
        RODATA
 
        . = ALIGN(8192);
@@ -126,7 +134,6 @@ SECTIONS
         * thrown away, as cleanup code is never called unless it's a module.
         */
        /DISCARD/               : {
-               *(.exit.text)
                *(.exit.data)
                *(.exitcall.exit)
        }
diff --git a/arch/avr32/mach-at32ap/Kconfig b/arch/avr32/mach-at32ap/Kconfig
new file mode 100644 (file)
index 0000000..eb30783
--- /dev/null
@@ -0,0 +1,31 @@
+if PLATFORM_AT32AP
+
+menu "Atmel AVR32 AP options"
+
+choice
+       prompt "AT32AP7000 static memory bus width"
+       depends on CPU_AT32AP7000
+       default AP7000_16_BIT_SMC
+       help
+         Define the width of the AP7000 external static memory interface.
+         This is used to determine how to mangle the address and/or data
+         when doing little-endian port access.
+
+         The current code can only support a single external memory bus
+         width for all chip selects, excluding the flash (which is using
+         raw access and is thus not affected by any of this.)
+
+config AP7000_32_BIT_SMC
+       bool "32 bit"
+
+config AP7000_16_BIT_SMC
+       bool "16 bit"
+
+config AP7000_8_BIT_SMC
+       bool "8 bit"
+
+endchoice
+
+endmenu
+
+endif # PLATFORM_AT32AP
index b21bea9..f1d3957 100644 (file)
@@ -1,2 +1,3 @@
 obj-y                          += at32ap.o clock.o intc.o extint.o pio.o hsmc.o
 obj-$(CONFIG_CPU_AT32AP7000)   += at32ap7000.o
+obj-$(CONFIG_CPU_AT32AP7000)   += time-tc.o
index 472703f..56db45b 100644 (file)
@@ -18,6 +18,7 @@
 #include <asm/arch/sm.h>
 
 #include "clock.h"
+#include "hmatrix.h"
 #include "pio.h"
 #include "sm.h"
 
@@ -416,7 +417,15 @@ struct platform_device at32_sm_device = {
        .resource       = sm_resource,
        .num_resources  = ARRAY_SIZE(sm_resource),
 };
-DEV_CLK(pclk, at32_sm, pbb, 0);
+static struct clk at32_sm_pclk = {
+       .name           = "pclk",
+       .dev            = &at32_sm_device.dev,
+       .parent         = &pbb_clk,
+       .mode           = pbb_clk_mode,
+       .get_rate       = pbb_clk_get_rate,
+       .users          = 1,
+       .index          = 0,
+};
 
 static struct resource intc0_resource[] = {
        PBMEM(0xfff00400),
@@ -442,6 +451,7 @@ static struct clk hramc_clk = {
        .mode           = hsb_clk_mode,
        .get_rate       = hsb_clk_get_rate,
        .users          = 1,
+       .index          = 3,
 };
 
 static struct resource smc0_resource[] = {
@@ -466,6 +476,57 @@ static struct clk pico_clk = {
        .users          = 1,
 };
 
+/* --------------------------------------------------------------------
+ * HMATRIX
+ * -------------------------------------------------------------------- */
+
+static struct clk hmatrix_clk = {
+       .name           = "hmatrix_clk",
+       .parent         = &pbb_clk,
+       .mode           = pbb_clk_mode,
+       .get_rate       = pbb_clk_get_rate,
+       .index          = 2,
+       .users          = 1,
+};
+#define HMATRIX_BASE   ((void __iomem *)0xfff00800)
+
+#define hmatrix_readl(reg)                                     \
+       __raw_readl((HMATRIX_BASE) + HMATRIX_##reg)
+#define hmatrix_writel(reg,value)                              \
+       __raw_writel((value), (HMATRIX_BASE) + HMATRIX_##reg)
+
+/*
+ * Set bits in the HMATRIX Special Function Register (SFR) used by the
+ * External Bus Interface (EBI). This can be used to enable special
+ * features like CompactFlash support, NAND Flash support, etc. on
+ * certain chipselects.
+ */
+static inline void set_ebi_sfr_bits(u32 mask)
+{
+       u32 sfr;
+
+       clk_enable(&hmatrix_clk);
+       sfr = hmatrix_readl(SFR4);
+       sfr |= mask;
+       hmatrix_writel(SFR4, sfr);
+       clk_disable(&hmatrix_clk);
+}
+
+/* --------------------------------------------------------------------
+ *  System Timer/Counter (TC)
+ * -------------------------------------------------------------------- */
+static struct resource at32_systc0_resource[] = {
+       PBMEM(0xfff00c00),
+       IRQ(22),
+};
+struct platform_device at32_systc0_device = {
+       .name           = "systc",
+       .id             = 0,
+       .resource       = at32_systc0_resource,
+       .num_resources  = ARRAY_SIZE(at32_systc0_resource),
+};
+DEV_CLK(pclk, at32_systc0, pbb, 3);
+
 /* --------------------------------------------------------------------
  *  PIO
  * -------------------------------------------------------------------- */
@@ -514,6 +575,8 @@ void __init at32_add_system_devices(void)
        platform_device_register(&smc0_device);
        platform_device_register(&pdc_device);
 
+       platform_device_register(&at32_systc0_device);
+
        platform_device_register(&pio0_device);
        platform_device_register(&pio1_device);
        platform_device_register(&pio2_device);
@@ -950,6 +1013,7 @@ struct clk *at32_clock_list[] = {
        &pbb_clk,
        &at32_sm_pclk,
        &at32_intc0_pclk,
+       &hmatrix_clk,
        &ebi_clk,
        &hramc_clk,
        &smc0_pclk,
@@ -962,6 +1026,7 @@ struct clk *at32_clock_list[] = {
        &pio2_mck,
        &pio3_mck,
        &pio4_mck,
+       &at32_systc0_pclk,
        &atmel_usart0_usart,
        &atmel_usart1_usart,
        &atmel_usart2_usart,
@@ -1024,6 +1089,9 @@ void __init at32_clock_init(void)
        for (i = 0; i < ARRAY_SIZE(at32_clock_list); i++) {
                struct clk *clk = at32_clock_list[i];
 
+               if (clk->users == 0)
+                       continue;
+
                if (clk->mode == &cpu_clk_mode)
                        cpu_mask |= 1 << clk->index;
                else if (clk->mode == &hsb_clk_mode)
diff --git a/arch/avr32/mach-at32ap/hmatrix.h b/arch/avr32/mach-at32ap/hmatrix.h
new file mode 100644 (file)
index 0000000..d10bfb6
--- /dev/null
@@ -0,0 +1,182 @@
+/*
+ * Register definitions for High-Speed Bus Matrix
+ */
+#ifndef __HMATRIX_H
+#define __HMATRIX_H
+
+/* HMATRIX register offsets */
+#define HMATRIX_MCFG0                          0x0000
+#define HMATRIX_MCFG1                          0x0004
+#define HMATRIX_MCFG2                          0x0008
+#define HMATRIX_MCFG3                          0x000c
+#define HMATRIX_MCFG4                          0x0010
+#define HMATRIX_MCFG5                          0x0014
+#define HMATRIX_MCFG6                          0x0018
+#define HMATRIX_MCFG7                          0x001c
+#define HMATRIX_MCFG8                          0x0020
+#define HMATRIX_MCFG9                          0x0024
+#define HMATRIX_MCFG10                         0x0028
+#define HMATRIX_MCFG11                         0x002c
+#define HMATRIX_MCFG12                         0x0030
+#define HMATRIX_MCFG13                         0x0034
+#define HMATRIX_MCFG14                         0x0038
+#define HMATRIX_MCFG15                         0x003c
+#define HMATRIX_SCFG0                          0x0040
+#define HMATRIX_SCFG1                          0x0044
+#define HMATRIX_SCFG2                          0x0048
+#define HMATRIX_SCFG3                          0x004c
+#define HMATRIX_SCFG4                          0x0050
+#define HMATRIX_SCFG5                          0x0054
+#define HMATRIX_SCFG6                          0x0058
+#define HMATRIX_SCFG7                          0x005c
+#define HMATRIX_SCFG8                          0x0060
+#define HMATRIX_SCFG9                          0x0064
+#define HMATRIX_SCFG10                         0x0068
+#define HMATRIX_SCFG11                         0x006c
+#define HMATRIX_SCFG12                         0x0070
+#define HMATRIX_SCFG13                         0x0074
+#define HMATRIX_SCFG14                         0x0078
+#define HMATRIX_SCFG15                         0x007c
+#define HMATRIX_PRAS0                          0x0080
+#define HMATRIX_PRBS0                          0x0084
+#define HMATRIX_PRAS1                          0x0088
+#define HMATRIX_PRBS1                          0x008c
+#define HMATRIX_PRAS2                          0x0090
+#define HMATRIX_PRBS2                          0x0094
+#define HMATRIX_PRAS3                          0x0098
+#define HMATRIX_PRBS3                          0x009c
+#define HMATRIX_PRAS4                          0x00a0
+#define HMATRIX_PRBS4                          0x00a4
+#define HMATRIX_PRAS5                          0x00a8
+#define HMATRIX_PRBS5                          0x00ac
+#define HMATRIX_PRAS6                          0x00b0
+#define HMATRIX_PRBS6                          0x00b4
+#define HMATRIX_PRAS7                          0x00b8
+#define HMATRIX_PRBS7                          0x00bc
+#define HMATRIX_PRAS8                          0x00c0
+#define HMATRIX_PRBS8                          0x00c4
+#define HMATRIX_PRAS9                          0x00c8
+#define HMATRIX_PRBS9                          0x00cc
+#define HMATRIX_PRAS10                         0x00d0
+#define HMATRIX_PRBS10                         0x00d4
+#define HMATRIX_PRAS11                         0x00d8
+#define HMATRIX_PRBS11                         0x00dc
+#define HMATRIX_PRAS12                         0x00e0
+#define HMATRIX_PRBS12                         0x00e4
+#define HMATRIX_PRAS13                         0x00e8
+#define HMATRIX_PRBS13                         0x00ec
+#define HMATRIX_PRAS14                         0x00f0
+#define HMATRIX_PRBS14                         0x00f4
+#define HMATRIX_PRAS15                         0x00f8
+#define HMATRIX_PRBS15                         0x00fc
+#define HMATRIX_MRCR                           0x0100
+#define HMATRIX_SFR0                           0x0110
+#define HMATRIX_SFR1                           0x0114
+#define HMATRIX_SFR2                           0x0118
+#define HMATRIX_SFR3                           0x011c
+#define HMATRIX_SFR4                           0x0120
+#define HMATRIX_SFR5                           0x0124
+#define HMATRIX_SFR6                           0x0128
+#define HMATRIX_SFR7                           0x012c
+#define HMATRIX_SFR8                           0x0130
+#define HMATRIX_SFR9                           0x0134
+#define HMATRIX_SFR10                          0x0138
+#define HMATRIX_SFR11                          0x013c
+#define HMATRIX_SFR12                          0x0140
+#define HMATRIX_SFR13                          0x0144
+#define HMATRIX_SFR14                          0x0148
+#define HMATRIX_SFR15                          0x014c
+
+/* Bitfields in MCFGx */
+#define HMATRIX_ULBT_OFFSET                    0
+#define HMATRIX_ULBT_SIZE                      3
+
+/* Bitfields in SCFGx */
+#define HMATRIX_SLOT_CYCLE_OFFSET              0
+#define HMATRIX_SLOT_CYCLE_SIZE                        8
+#define HMATRIX_DEFMSTR_TYPE_OFFSET            16
+#define HMATRIX_DEFMSTR_TYPE_SIZE              2
+#define HMATRIX_FIXED_DEFMSTR_OFFSET           18
+#define HMATRIX_FIXED_DEFMSTR_SIZE             4
+#define HMATRIX_ARBT_OFFSET                    24
+#define HMATRIX_ARBT_SIZE                      2
+
+/* Bitfields in PRASx */
+#define HMATRIX_M0PR_OFFSET                    0
+#define HMATRIX_M0PR_SIZE                      4
+#define HMATRIX_M1PR_OFFSET                    4
+#define HMATRIX_M1PR_SIZE                      4
+#define HMATRIX_M2PR_OFFSET                    8
+#define HMATRIX_M2PR_SIZE                      4
+#define HMATRIX_M3PR_OFFSET                    12
+#define HMATRIX_M3PR_SIZE                      4
+#define HMATRIX_M4PR_OFFSET                    16
+#define HMATRIX_M4PR_SIZE                      4
+#define HMATRIX_M5PR_OFFSET                    20
+#define HMATRIX_M5PR_SIZE                      4
+#define HMATRIX_M6PR_OFFSET                    24
+#define HMATRIX_M6PR_SIZE                      4
+#define HMATRIX_M7PR_OFFSET                    28
+#define HMATRIX_M7PR_SIZE                      4
+
+/* Bitfields in PRBSx */
+#define HMATRIX_M8PR_OFFSET                    0
+#define HMATRIX_M8PR_SIZE                      4
+#define HMATRIX_M9PR_OFFSET                    4
+#define HMATRIX_M9PR_SIZE                      4
+#define HMATRIX_M10PR_OFFSET                   8
+#define HMATRIX_M10PR_SIZE                     4
+#define HMATRIX_M11PR_OFFSET                   12
+#define HMATRIX_M11PR_SIZE                     4
+#define HMATRIX_M12PR_OFFSET                   16
+#define HMATRIX_M12PR_SIZE                     4
+#define HMATRIX_M13PR_OFFSET                   20
+#define HMATRIX_M13PR_SIZE                     4
+#define HMATRIX_M14PR_OFFSET                   24
+#define HMATRIX_M14PR_SIZE                     4
+#define HMATRIX_M15PR_OFFSET                   28
+#define HMATRIX_M15PR_SIZE                     4
+
+/* Bitfields in SFR4 */
+#define HMATRIX_CS1A_OFFSET                    1
+#define HMATRIX_CS1A_SIZE                      1
+#define HMATRIX_CS3A_OFFSET                    3
+#define HMATRIX_CS3A_SIZE                      1
+#define HMATRIX_CS4A_OFFSET                    4
+#define HMATRIX_CS4A_SIZE                      1
+#define HMATRIX_CS5A_OFFSET                    5
+#define HMATRIX_CS5A_SIZE                      1
+#define HMATRIX_DBPUC_OFFSET                   8
+#define HMATRIX_DBPUC_SIZE                     1
+
+/* Constants for ULBT */
+#define HMATRIX_ULBT_INFINITE                  0
+#define HMATRIX_ULBT_SINGLE                    1
+#define HMATRIX_ULBT_FOUR_BEAT                 2
+#define HMATRIX_ULBT_EIGHT_BEAT                        3
+#define HMATRIX_ULBT_SIXTEEN_BEAT              4
+
+/* Constants for DEFMSTR_TYPE */
+#define HMATRIX_DEFMSTR_TYPE_NO_DEFAULT                0
+#define HMATRIX_DEFMSTR_TYPE_LAST_DEFAULT      1
+#define HMATRIX_DEFMSTR_TYPE_FIXED_DEFAULT     2
+
+/* Constants for ARBT */
+#define HMATRIX_ARBT_ROUND_ROBIN               0
+#define HMATRIX_ARBT_FIXED_PRIORITY            1
+
+/* Bit manipulation macros */
+#define HMATRIX_BIT(name)                                      \
+       (1 << HMATRIX_##name##_OFFSET)
+#define HMATRIX_BF(name,value)                                 \
+       (((value) & ((1 << HMATRIX_##name##_SIZE) - 1))         \
+        << HMATRIX_##name##_OFFSET)
+#define HMATRIX_BFEXT(name,value)                              \
+       (((value) >> HMATRIX_##name##_OFFSET)                   \
+        & ((1 << HMATRIX_##name##_SIZE) - 1))
+#define HMATRIX_BFINS(name,value,old)                          \
+       (((old) & ~(((1 << HMATRIX_##name##_SIZE) - 1)          \
+                   << HMATRIX_##name##_OFFSET))                \
+        | HMATRIX_BF(name,value))
+
+#endif /* __HMATRIX_H */
index 7691721..5e22a75 100644 (file)
@@ -75,12 +75,35 @@ int smc_set_configuration(int cs, const struct smc_config *config)
                return -EINVAL;
        }
 
+       switch (config->nwait_mode) {
+       case 0:
+               mode |= HSMC_BF(EXNW_MODE, HSMC_EXNW_MODE_DISABLED);
+               break;
+       case 1:
+               mode |= HSMC_BF(EXNW_MODE, HSMC_EXNW_MODE_RESERVED);
+               break;
+       case 2:
+               mode |= HSMC_BF(EXNW_MODE, HSMC_EXNW_MODE_FROZEN);
+               break;
+       case 3:
+               mode |= HSMC_BF(EXNW_MODE, HSMC_EXNW_MODE_READY);
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       if (config->tdf_cycles) {
+               mode |= HSMC_BF(TDF_CYCLES, config->tdf_cycles);
+       }
+
        if (config->nrd_controlled)
                mode |= HSMC_BIT(READ_MODE);
        if (config->nwe_controlled)
                mode |= HSMC_BIT(WRITE_MODE);
        if (config->byte_write)
                mode |= HSMC_BIT(BAT);
+       if (config->tdf_mode)
+               mode |= HSMC_BIT(TDF_MODE);
 
        pr_debug("smc cs%d: setup/%08x pulse/%08x cycle/%08x mode/%08x\n",
                 cs, setup, pulse, cycle, mode);
diff --git a/arch/avr32/mach-at32ap/time-tc.c b/arch/avr32/mach-at32ap/time-tc.c
new file mode 100644 (file)
index 0000000..e3070bd
--- /dev/null
@@ -0,0 +1,218 @@
+/*
+ * Copyright (C) 2004-2007 Atmel Corporation
+ *
+ * Based on MIPS implementation arch/mips/kernel/time.c
+ *   Copyright 2001 MontaVista Software Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/clocksource.h>
+#include <linux/time.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/kernel_stat.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/profile.h>
+#include <linux/sysdev.h>
+#include <linux/err.h>
+
+#include <asm/div64.h>
+#include <asm/sysreg.h>
+#include <asm/io.h>
+#include <asm/sections.h>
+
+#include <asm/arch/time.h>
+
+/* how many counter cycles in a jiffy? */
+static u32 cycles_per_jiffy;
+
+/* the count value for the next timer interrupt */
+static u32 expirelo;
+
+/* the I/O registers of the TC module */
+static void __iomem *ioregs;
+
+cycle_t read_cycle_count(void)
+{
+       return (cycle_t)timer_read(ioregs, 0, CV);
+}
+
+struct clocksource clocksource_avr32 = {
+       .name           = "avr32",
+       .rating         = 342,
+       .read           = read_cycle_count,
+       .mask           = CLOCKSOURCE_MASK(16),
+       .shift          = 16,
+       .flags          = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static void avr32_timer_ack(void)
+{
+       u16 count = expirelo;
+
+       /* Ack this timer interrupt and set the next one, use a u16
+        * variable so it will wrap around correctly */
+       count += cycles_per_jiffy;
+       expirelo = count;
+       timer_write(ioregs, 0, RC, expirelo);
+
+       /* Check to see if we have missed any timer interrupts */
+       count = timer_read(ioregs, 0, CV);
+       if ((count - expirelo) < 0x7fff) {
+               expirelo = count + cycles_per_jiffy;
+               timer_write(ioregs, 0, RC, expirelo);
+       }
+}
+
+u32 avr32_hpt_read(void)
+{
+       return timer_read(ioregs, 0, CV);
+}
+
+static int avr32_timer_calc_div_and_set_jiffies(struct clk *pclk)
+{
+       unsigned int cycles_max = (clocksource_avr32.mask + 1) / 2;
+       unsigned int divs[] = { 4, 8, 16, 32 };
+       int divs_size = sizeof(divs) / sizeof(*divs);
+       int i = 0;
+       unsigned long count_hz;
+       unsigned long shift;
+       unsigned long mult;
+       int clock_div = -1;
+       u64 tmp;
+
+       shift = clocksource_avr32.shift;
+
+       do {
+               count_hz = clk_get_rate(pclk) / divs[i];
+               mult = clocksource_hz2mult(count_hz, shift);
+               clocksource_avr32.mult = mult;
+
+               tmp = TICK_NSEC;
+               tmp <<= shift;
+               tmp += mult / 2;
+               do_div(tmp, mult);
+
+               cycles_per_jiffy = tmp;
+       } while (cycles_per_jiffy > cycles_max && ++i < divs_size);
+
+       clock_div = i + 1;
+
+       if (clock_div > divs_size) {
+               pr_debug("timer: could not calculate clock divider\n");
+               return -EFAULT;
+       }
+
+       /* Set the clock divider */
+       timer_write(ioregs, 0, CMR, TIMER_BF(CMR_TCCLKS, clock_div));
+
+       return 0;
+}
+
+int avr32_hpt_init(unsigned int count)
+{
+       struct resource *regs;
+       struct clk *pclk;
+       int irq = -1;
+       int ret = 0;
+
+       ret = -ENXIO;
+
+       irq = platform_get_irq(&at32_systc0_device, 0);
+       if (irq < 0) {
+               pr_debug("timer: could not get irq\n");
+               goto out_error;
+       }
+
+       pclk = clk_get(&at32_systc0_device.dev, "pclk");
+       if (IS_ERR(pclk)) {
+               pr_debug("timer: could not get clk: %ld\n", PTR_ERR(pclk));
+               goto out_error;
+       }
+       clk_enable(pclk);
+
+       regs = platform_get_resource(&at32_systc0_device, IORESOURCE_MEM, 0);
+       if (!regs) {
+               pr_debug("timer: could not get resource\n");
+               goto out_error_clk;
+       }
+
+       ioregs = ioremap(regs->start, regs->end - regs->start + 1);
+       if (!ioregs) {
+               pr_debug("timer: could not get ioregs\n");
+               goto out_error_clk;
+       }
+
+       ret = avr32_timer_calc_div_and_set_jiffies(pclk);
+       if (ret)
+               goto out_error_io;
+
+       ret = setup_irq(irq, &timer_irqaction);
+       if (ret) {
+               pr_debug("timer: could not request irq %d: %d\n",
+                               irq, ret);
+               goto out_error_io;
+       }
+
+       expirelo = (timer_read(ioregs, 0, CV) / cycles_per_jiffy + 1)
+               * cycles_per_jiffy;
+
+       /* Enable clock and interrupts on RC compare */
+       timer_write(ioregs, 0, CCR, TIMER_BIT(CCR_CLKEN));
+       timer_write(ioregs, 0, IER, TIMER_BIT(IER_CPCS));
+       /* Set cycles to first interrupt */
+       timer_write(ioregs, 0,  RC, expirelo);
+
+       printk(KERN_INFO "timer: AT32AP system timer/counter at 0x%p irq %d\n",
+                       ioregs, irq);
+
+       return 0;
+
+out_error_io:
+       iounmap(ioregs);
+out_error_clk:
+       clk_put(pclk);
+out_error:
+       return ret;
+}
+
+int avr32_hpt_start(void)
+{
+       timer_write(ioregs, 0, CCR, TIMER_BIT(CCR_SWTRG));
+       return 0;
+}
+
+irqreturn_t timer_interrupt(int irq, void *dev_id)
+{
+       unsigned int sr = timer_read(ioregs, 0, SR);
+
+       if (sr & TIMER_BIT(SR_CPCS)) {
+               /* ack timer interrupt and try to set next interrupt */
+               avr32_timer_ack();
+
+               /*
+                * Call the generic timer interrupt handler
+                */
+               write_seqlock(&xtime_lock);
+               do_timer(1);
+               write_sequnlock(&xtime_lock);
+
+               /*
+                * In UP mode, we call local_timer_interrupt() to do profiling
+                * and process accounting.
+                *
+                * SMP is not supported yet.
+                */
+               local_timer_interrupt(irq, dev_id);
+
+               return IRQ_HANDLED;
+       }
+
+       return IRQ_NONE;
+}
index 6785572..146ebdb 100644 (file)
 #include <asm/kdebug.h>
 #include <asm/mmu_context.h>
 #include <asm/sysreg.h>
-#include <asm/uaccess.h>
 #include <asm/tlb.h>
-
-#ifdef DEBUG
-static void dump_code(unsigned long pc)
-{
-       char *p = (char *)pc;
-       char val;
-       int i;
-
-
-       printk(KERN_DEBUG "Code:");
-       for (i = 0; i < 16; i++) {
-               if (__get_user(val, p + i))
-                       break;
-               printk(" %02x", val);
-       }
-       printk("\n");
-}
-#endif
+#include <asm/uaccess.h>
 
 #ifdef CONFIG_KPROBES
 ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain);
@@ -68,17 +50,19 @@ static inline int notify_page_fault(enum die_val val, struct pt_regs *regs,
 }
 #endif
 
+int exception_trace = 1;
+
 /*
  * This routine handles page faults. It determines the address and the
  * problem, and then passes it off to one of the appropriate routines.
  *
  * ecr is the Exception Cause Register. Possible values are:
- *   5:  Page not found (instruction access)
  *   6:  Protection fault (instruction access)
- *   12: Page not found (read access)
- *   13: Page not found (write access)
- *   14: Protection fault (read access)
- *   15: Protection fault (write access)
+ *   15: Protection fault (read access)
+ *   16: Protection fault (write access)
+ *   20: Page not found (instruction access)
+ *   24: Page not found (read access)
+ *   28: Page not found (write access)
  */
 asmlinkage void do_page_fault(unsigned long ecr, struct pt_regs *regs)
 {
@@ -88,7 +72,9 @@ asmlinkage void do_page_fault(unsigned long ecr, struct pt_regs *regs)
        const struct exception_table_entry *fixup;
        unsigned long address;
        unsigned long page;
-       int writeaccess = 0;
+       int writeaccess;
+       long signr;
+       int code;
 
        if (notify_page_fault(DIE_PAGE_FAULT, regs,
                              ecr, SIGSEGV) == NOTIFY_STOP)
@@ -99,6 +85,9 @@ asmlinkage void do_page_fault(unsigned long ecr, struct pt_regs *regs)
        tsk = current;
        mm = tsk->mm;
 
+       signr = SIGSEGV;
+       code = SEGV_MAPERR;
+
        /*
         * If we're in an interrupt or have no user context, we must
         * not take the fault...
@@ -125,7 +114,9 @@ asmlinkage void do_page_fault(unsigned long ecr, struct pt_regs *regs)
         * can handle it...
         */
 good_area:
-       //pr_debug("good area: vm_flags = 0x%lx\n", vma->vm_flags);
+       code = SEGV_ACCERR;
+       writeaccess = 0;
+
        switch (ecr) {
        case ECR_PROTECTION_X:
        case ECR_TLB_MISS_X:
@@ -176,46 +167,24 @@ survive:
         * map. Fix it, but check if it's kernel or user first...
         */
 bad_area:
-       pr_debug("Bad area [%s:%u]: addr %08lx, ecr %lu\n",
-                tsk->comm, tsk->pid, address, ecr);
-
        up_read(&mm->mmap_sem);
 
        if (user_mode(regs)) {
-               /* Hmm...we have to pass address and ecr somehow... */
-               /* tsk->thread.address = address;
-                  tsk->thread.error_code = ecr; */
-#ifdef DEBUG
-               show_regs(regs);
-               dump_code(regs->pc);
-
-               page = sysreg_read(PTBR);
-               printk("ptbr = %08lx", page);
-               if (page) {
-                       page = ((unsigned long *)page)[address >> 22];
-                       printk(" pgd = %08lx", page);
-                       if (page & _PAGE_PRESENT) {
-                               page &= PAGE_MASK;
-                               address &= 0x003ff000;
-                               page = ((unsigned long *)__va(page))[address >> PAGE_SHIFT];
-                               printk(" pte = %08lx\n", page);
-                       }
-               }
-#endif
-               pr_debug("Sending SIGSEGV to PID %d...\n",
-                       tsk->pid);
-               force_sig(SIGSEGV, tsk);
+               if (exception_trace)
+                       printk("%s%s[%d]: segfault at %08lx pc %08lx "
+                              "sp %08lx ecr %lu\n",
+                              is_init(tsk) ? KERN_EMERG : KERN_INFO,
+                              tsk->comm, tsk->pid, address, regs->pc,
+                              regs->sp, ecr);
+               _exception(SIGSEGV, regs, code, address);
                return;
        }
 
 no_context:
-       pr_debug("No context\n");
-
        /* Are we prepared to handle this kernel fault? */
        fixup = search_exception_tables(regs->pc);
        if (fixup) {
                regs->pc = fixup->fixup;
-               pr_debug("Found fixup at %08lx\n", fixup->fixup);
                return;
        }
 
@@ -230,7 +199,6 @@ no_context:
                printk(KERN_ALERT
                       "Unable to handle kernel paging request");
        printk(" at virtual address %08lx\n", address);
-       printk(KERN_ALERT "pc = %08lx\n", regs->pc);
 
        page = sysreg_read(PTBR);
        printk(KERN_ALERT "ptbr = %08lx", page);
@@ -241,20 +209,20 @@ no_context:
                        page &= PAGE_MASK;
                        address &= 0x003ff000;
                        page = ((unsigned long *)__va(page))[address >> PAGE_SHIFT];
-                       printk(" pte = %08lx\n", page);
+                       printk(" pte = %08lx", page);
                }
        }
-       die("\nOops", regs, ecr);
-       do_exit(SIGKILL);
+       printk("\n");
+       die("Kernel access of bad area", regs, signr);
+       return;
 
        /*
         * We ran out of memory, or some other thing happened to us
         * that made us unable to handle the page fault gracefully.
         */
 out_of_memory:
-       printk("Out of memory\n");
        up_read(&mm->mmap_sem);
-       if (current->pid == 1) {
+       if (is_init(current)) {
                yield();
                down_read(&mm->mmap_sem);
                goto survive;
@@ -267,21 +235,20 @@ out_of_memory:
 do_sigbus:
        up_read(&mm->mmap_sem);
 
-       /*
-        * Send a sigbus, regardless of whether we were in kernel or
-        * user mode.
-        */
-       /* address, error_code, trap_no, ... */
-#ifdef DEBUG
-       show_regs(regs);
-       dump_code(regs->pc);
-#endif
-       pr_debug("Sending SIGBUS to PID %d...\n", tsk->pid);
-       force_sig(SIGBUS, tsk);
-
        /* Kernel mode? Handle exceptions or die */
+       signr = SIGBUS;
+       code = BUS_ADRERR;
        if (!user_mode(regs))
                goto no_context;
+
+       if (exception_trace)
+               printk("%s%s[%d]: bus error at %08lx pc %08lx "
+                      "sp %08lx ecr %lu\n",
+                      is_init(tsk) ? KERN_EMERG : KERN_INFO,
+                      tsk->comm, tsk->pid, address, regs->pc,
+                      regs->sp, ecr);
+
+       _exception(SIGBUS, regs, BUS_ADRERR, address);
 }
 
 asmlinkage void do_bus_error(unsigned long addr, int write_access,
@@ -292,8 +259,7 @@ asmlinkage void do_bus_error(unsigned long addr, int write_access,
               addr, write_access ? "write" : "read");
        printk(KERN_INFO "DTLB dump:\n");
        dump_dtlb();
-       die("Bus Error", regs, write_access);
-       do_exit(SIGKILL);
+       die("Bus Error", regs, SIGKILL);
 }
 
 /*
index 70da689..82cf708 100644 (file)
 #include <linux/mm.h>
 #include <linux/swap.h>
 #include <linux/init.h>
-#include <linux/initrd.h>
 #include <linux/mmzone.h>
 #include <linux/bootmem.h>
 #include <linux/pagemap.h>
-#include <linux/pfn.h>
 #include <linux/nodemask.h>
 
 #include <asm/page.h>
@@ -78,242 +76,6 @@ void show_mem(void)
        printk ("%d pages swap cached\n", cached);
 }
 
-static void __init print_memory_map(const char *what,
-                                   struct tag_mem_range *mem)
-{
-       printk ("%s:\n", what);
-       for (; mem; mem = mem->next) {
-               printk ("  %08lx - %08lx\n",
-                       (unsigned long)mem->addr,
-                       (unsigned long)(mem->addr + mem->size));
-       }
-}
-
-#define MAX_LOWMEM     HIGHMEM_START
-#define MAX_LOWMEM_PFN PFN_DOWN(MAX_LOWMEM)
-
-/*
- * Sort a list of memory regions in-place by ascending address.
- *
- * We're using bubble sort because we only have singly linked lists
- * with few elements.
- */
-static void __init sort_mem_list(struct tag_mem_range **pmem)
-{
-       int done;
-       struct tag_mem_range **a, **b;
-
-       if (!*pmem)
-               return;
-
-       do {
-               done = 1;
-               a = pmem, b = &(*pmem)->next;
-               while (*b) {
-                       if ((*a)->addr > (*b)->addr) {
-                               struct tag_mem_range *tmp;
-                               tmp = (*b)->next;
-                               (*b)->next = *a;
-                               *a = *b;
-                               *b = tmp;
-                               done = 0;
-                       }
-                       a = &(*a)->next;
-                       b = &(*a)->next;
-               }
-       } while (!done);
-}
-
-/*
- * Find a free memory region large enough for storing the
- * bootmem bitmap.
- */
-static unsigned long __init
-find_bootmap_pfn(const struct tag_mem_range *mem)
-{
-       unsigned long bootmap_pages, bootmap_len;
-       unsigned long node_pages = PFN_UP(mem->size);
-       unsigned long bootmap_addr = mem->addr;
-       struct tag_mem_range *reserved = mem_reserved;
-       struct tag_mem_range *ramdisk = mem_ramdisk;
-       unsigned long kern_start = virt_to_phys(_stext);
-       unsigned long kern_end = virt_to_phys(_end);
-
-       bootmap_pages = bootmem_bootmap_pages(node_pages);
-       bootmap_len = bootmap_pages << PAGE_SHIFT;
-
-       /*
-        * Find a large enough region without reserved pages for
-        * storing the bootmem bitmap. We can take advantage of the
-        * fact that all lists have been sorted.
-        *
-        * We have to check explicitly reserved regions as well as the
-        * kernel image and any RAMDISK images...
-        *
-        * Oh, and we have to make sure we don't overwrite the taglist
-        * since we're going to use it until the bootmem allocator is
-        * fully up and running.
-        */
-       while (1) {
-               if ((bootmap_addr < kern_end) &&
-                   ((bootmap_addr + bootmap_len) > kern_start))
-                       bootmap_addr = kern_end;
-
-               while (reserved &&
-                      (bootmap_addr >= (reserved->addr + reserved->size)))
-                       reserved = reserved->next;
-
-               if (reserved &&
-                   ((bootmap_addr + bootmap_len) >= reserved->addr)) {
-                       bootmap_addr = reserved->addr + reserved->size;
-                       continue;
-               }
-
-               while (ramdisk &&
-                      (bootmap_addr >= (ramdisk->addr + ramdisk->size)))
-                       ramdisk = ramdisk->next;
-
-               if (!ramdisk ||
-                   ((bootmap_addr + bootmap_len) < ramdisk->addr))
-                       break;
-
-               bootmap_addr = ramdisk->addr + ramdisk->size;
-       }
-
-       if ((PFN_UP(bootmap_addr) + bootmap_len) >= (mem->addr + mem->size))
-               return ~0UL;
-
-       return PFN_UP(bootmap_addr);
-}
-
-void __init setup_bootmem(void)
-{
-       unsigned bootmap_size;
-       unsigned long first_pfn, bootmap_pfn, pages;
-       unsigned long max_pfn, max_low_pfn;
-       unsigned long kern_start = virt_to_phys(_stext);
-       unsigned long kern_end = virt_to_phys(_end);
-       unsigned node = 0;
-       struct tag_mem_range *bank, *res;
-
-       sort_mem_list(&mem_phys);
-       sort_mem_list(&mem_reserved);
-
-       print_memory_map("Physical memory", mem_phys);
-       print_memory_map("Reserved memory", mem_reserved);
-
-       nodes_clear(node_online_map);
-
-       if (mem_ramdisk) {
-#ifdef CONFIG_BLK_DEV_INITRD
-               initrd_start = (unsigned long)__va(mem_ramdisk->addr);
-               initrd_end = initrd_start + mem_ramdisk->size;
-
-               print_memory_map("RAMDISK images", mem_ramdisk);
-               if (mem_ramdisk->next)
-                       printk(KERN_WARNING
-                              "Warning: Only the first RAMDISK image "
-                              "will be used\n");
-               sort_mem_list(&mem_ramdisk);
-#else
-               printk(KERN_WARNING "RAM disk image present, but "
-                      "no initrd support in kernel!\n");
-#endif
-       }
-
-       if (mem_phys->next)
-               printk(KERN_WARNING "Only using first memory bank\n");
-
-       for (bank = mem_phys; bank; bank = NULL) {
-               first_pfn = PFN_UP(bank->addr);
-               max_low_pfn = max_pfn = PFN_DOWN(bank->addr + bank->size);
-               bootmap_pfn = find_bootmap_pfn(bank);
-               if (bootmap_pfn > max_pfn)
-                       panic("No space for bootmem bitmap!\n");
-
-               if (max_low_pfn > MAX_LOWMEM_PFN) {
-                       max_low_pfn = MAX_LOWMEM_PFN;
-#ifndef CONFIG_HIGHMEM
-                       /*
-                        * Lowmem is memory that can be addressed
-                        * directly through P1/P2
-                        */
-                       printk(KERN_WARNING
-                              "Node %u: Only %ld MiB of memory will be used.\n",
-                              node, MAX_LOWMEM >> 20);
-                       printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
-#else
-#error HIGHMEM is not supported by AVR32 yet
-#endif
-               }
-
-               /* Initialize the boot-time allocator with low memory only. */
-               bootmap_size = init_bootmem_node(NODE_DATA(node), bootmap_pfn,
-                                                first_pfn, max_low_pfn);
-
-               printk("Node %u: bdata = %p, bdata->node_bootmem_map = %p\n",
-                      node, NODE_DATA(node)->bdata,
-                      NODE_DATA(node)->bdata->node_bootmem_map);
-
-               /*
-                * Register fully available RAM pages with the bootmem
-                * allocator.
-                */
-               pages = max_low_pfn - first_pfn;
-               free_bootmem_node (NODE_DATA(node), PFN_PHYS(first_pfn),
-                                  PFN_PHYS(pages));
-
-               /*
-                * Reserve space for the kernel image (if present in
-                * this node)...
-                */
-               if ((kern_start >= PFN_PHYS(first_pfn)) &&
-                   (kern_start < PFN_PHYS(max_pfn))) {
-                       printk("Node %u: Kernel image %08lx - %08lx\n",
-                              node, kern_start, kern_end);
-                       reserve_bootmem_node(NODE_DATA(node), kern_start,
-                                            kern_end - kern_start);
-               }
-
-               /* ...the bootmem bitmap... */
-               reserve_bootmem_node(NODE_DATA(node),
-                                    PFN_PHYS(bootmap_pfn),
-                                    bootmap_size);
-
-               /* ...any RAMDISK images... */
-               for (res = mem_ramdisk; res; res = res->next) {
-                       if (res->addr > PFN_PHYS(max_pfn))
-                               break;
-
-                       if (res->addr >= PFN_PHYS(first_pfn)) {
-                               printk("Node %u: RAMDISK %08lx - %08lx\n",
-                                      node,
-                                      (unsigned long)res->addr,
-                                      (unsigned long)(res->addr + res->size));
-                               reserve_bootmem_node(NODE_DATA(node),
-                                                    res->addr, res->size);
-                       }
-               }
-
-               /* ...and any other reserved regions. */
-               for (res = mem_reserved; res; res = res->next) {
-                       if (res->addr > PFN_PHYS(max_pfn))
-                               break;
-
-                       if (res->addr >= PFN_PHYS(first_pfn)) {
-                               printk("Node %u: Reserved %08lx - %08lx\n",
-                                      node,
-                                      (unsigned long)res->addr,
-                                      (unsigned long)(res->addr + res->size));
-                               reserve_bootmem_node(NODE_DATA(node),
-                                                    res->addr, res->size);
-                       }
-               }
-
-               node_set_online(node);
-       }
-}
-
 /*
  * paging_init() sets up the page tables
  *
index 9eca21b..426f59b 100644 (file)
@@ -5,15 +5,9 @@
 #include <asm/alternative.h>
 #include <asm/sections.h>
 
-static int no_replacement    = 0;
 static int smp_alt_once      = 0;
 static int debug_alternative = 0;
 
-static int __init noreplacement_setup(char *s)
-{
-       no_replacement = 1;
-       return 1;
-}
 static int __init bootonly(char *str)
 {
        smp_alt_once = 1;
@@ -25,7 +19,6 @@ static int __init debug_alt(char *str)
        return 1;
 }
 
-__setup("noreplacement", noreplacement_setup);
 __setup("smp-alt-boot", bootonly);
 __setup("debug-alternative", debug_alt);
 
@@ -252,9 +245,6 @@ void alternatives_smp_module_add(struct module *mod, char *name,
        struct smp_alt_module *smp;
        unsigned long flags;
 
-       if (no_replacement)
-               return;
-
        if (smp_alt_once) {
                if (boot_cpu_has(X86_FEATURE_UP))
                        alternatives_smp_unlock(locks, locks_end,
@@ -289,7 +279,7 @@ void alternatives_smp_module_del(struct module *mod)
        struct smp_alt_module *item;
        unsigned long flags;
 
-       if (no_replacement || smp_alt_once)
+       if (smp_alt_once)
                return;
 
        spin_lock_irqsave(&smp_alt, flags);
@@ -320,7 +310,7 @@ void alternatives_smp_switch(int smp)
        return;
 #endif
 
-       if (no_replacement || smp_alt_once)
+       if (smp_alt_once)
                return;
        BUG_ON(!smp && (num_online_cpus() > 1));
 
@@ -386,13 +376,6 @@ extern struct paravirt_patch __start_parainstructions[],
 void __init alternative_instructions(void)
 {
        unsigned long flags;
-       if (no_replacement) {
-               printk(KERN_INFO "(SMP-)alternatives turned off\n");
-               free_init_pages("SMP alternatives",
-                               (unsigned long)__smp_alt_begin,
-                               (unsigned long)__smp_alt_end);
-               return;
-       }
 
        local_irq_save(flags);
        apply_alternatives(__alt_instructions, __alt_instructions_end);
index a1f1b71..2b030d6 100644 (file)
@@ -758,7 +758,7 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy)
                                NULL, (void *)&pr);
 
        /* Check ACPI support for C3 state */
-       if (pr != NULL && longhaul_version != TYPE_LONGHAUL_V1) {
+       if (pr != NULL && longhaul_version == TYPE_POWERSAVER) {
                cx = &pr->power.states[ACPI_STATE_C3];
                if (cx->address > 0 && cx->latency <= 1000) {
                        longhaul_flags |= USE_ACPI_C3;
index 9f1e8c1..84c3497 100644 (file)
@@ -126,7 +126,7 @@ int avail_to_resrv_perfctr_nmi_bit(unsigned int counter)
        int cpu;
        BUG_ON(counter > NMI_MAX_COUNTER_BITS);
        for_each_possible_cpu (cpu) {
-               if (test_bit(counter, &per_cpu(perfctr_nmi_owner, cpu)))
+               if (test_bit(counter, &per_cpu(perfctr_nmi_owner, cpu)[0]))
                        return 0;
        }
        return 1;
@@ -142,7 +142,7 @@ int avail_to_resrv_perfctr_nmi(unsigned int msr)
        BUG_ON(counter > NMI_MAX_COUNTER_BITS);
 
        for_each_possible_cpu (cpu) {
-               if (test_bit(counter, &per_cpu(perfctr_nmi_owner, cpu)))
+               if (test_bit(counter, &per_cpu(perfctr_nmi_owner, cpu)[0]))
                        return 0;
        }
        return 1;
@@ -157,7 +157,7 @@ static int __reserve_perfctr_nmi(int cpu, unsigned int msr)
        counter = nmi_perfctr_msr_to_bit(msr);
        BUG_ON(counter > NMI_MAX_COUNTER_BITS);
 
-       if (!test_and_set_bit(counter, &per_cpu(perfctr_nmi_owner, cpu)))
+       if (!test_and_set_bit(counter, &per_cpu(perfctr_nmi_owner, cpu)[0]))
                return 1;
        return 0;
 }
@@ -171,7 +171,7 @@ static void __release_perfctr_nmi(int cpu, unsigned int msr)
        counter = nmi_perfctr_msr_to_bit(msr);
        BUG_ON(counter > NMI_MAX_COUNTER_BITS);
 
-       clear_bit(counter, &per_cpu(perfctr_nmi_owner, cpu));
+       clear_bit(counter, &per_cpu(perfctr_nmi_owner, cpu)[0]);
 }
 
 int reserve_perfctr_nmi(unsigned int msr)
index 424e925..f26077a 100644 (file)
@@ -427,7 +427,6 @@ make_new_skb(struct net_device *dev)
                printk(KERN_NOTICE "%s: memory squeeze. dropping packet.\n", dev->name);
                return NULL;
        }
-       nskb->dev = dev;
 
        skb_reserve(nskb, 2);   /* Align IP on 16 byte boundaries */
 
@@ -474,7 +473,7 @@ simeth_rx(struct net_device *dev)
                 * XXX Fix me
                 * Should really do a csum+copy here
                 */
-               memcpy(skb->data, frame, len);
+               skb_copy_to_linear_data(skb, frame, len);
 #endif
                skb->protocol = eth_type_trans(skb, dev);
 
index c8173db..5419acb 100644 (file)
@@ -233,7 +233,7 @@ xpnet_receive(partid_t partid, int channel, struct xpnet_message *msg)
                        "%lu)\n", skb->data, &msg->data,
                        (size_t) msg->embedded_bytes);
 
-               memcpy(skb->data, &msg->data, (size_t) msg->embedded_bytes);
+               skb_copy_to_linear_data(skb, &msg->data, (size_t)msg->embedded_bytes);
        } else {
                dev_dbg(xpnet, "transferring buffer to the skb->data area;\n\t"
                        "bte_copy(0x%p, 0x%p, %hu)\n", (void *)msg->buf_pa,
@@ -264,17 +264,16 @@ xpnet_receive(partid_t partid, int channel, struct xpnet_message *msg)
 
        dev_dbg(xpnet, "<skb->head=0x%p skb->data=0x%p skb->tail=0x%p "
                "skb->end=0x%p skb->len=%d\n", (void *) skb->head,
-               (void *) skb->data, (void *) skb->tail, (void *) skb->end,
+               (void *)skb->data, skb_tail_pointer(skb), skb_end_pointer(skb),
                skb->len);
 
-       skb->dev = xpnet_device;
        skb->protocol = eth_type_trans(skb, xpnet_device);
        skb->ip_summed = CHECKSUM_UNNECESSARY;
 
        dev_dbg(xpnet, "passing skb to network layer; \n\tskb->head=0x%p "
                "skb->data=0x%p skb->tail=0x%p skb->end=0x%p skb->len=%d\n",
-               (void *) skb->head, (void *) skb->data, (void *) skb->tail,
-               (void *) skb->end, skb->len);
+               (void *)skb->head, (void *)skb->data, skb_tail_pointer(skb),
+               skb_end_pointer(skb), skb->len);
 
 
        xpnet_device->last_rx = jiffies;
@@ -476,7 +475,7 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
        dev_dbg(xpnet, ">skb->head=0x%p skb->data=0x%p skb->tail=0x%p "
                "skb->end=0x%p skb->len=%d\n", (void *) skb->head,
-               (void *) skb->data, (void *) skb->tail, (void *) skb->end,
+               (void *)skb->data, skb_tail_pointer(skb), skb_end_pointer(skb),
                skb->len);
 
 
@@ -498,7 +497,7 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
        /* get the beginning of the first cacheline and end of last */
        start_addr = ((u64) skb->data & ~(L1_CACHE_BYTES - 1));
-       end_addr = L1_CACHE_ALIGN((u64) skb->tail);
+       end_addr = L1_CACHE_ALIGN((u64)skb_tail_pointer(skb));
 
        /* calculate how many bytes to embed in the XPC message */
        embedded_bytes = 0;
@@ -567,14 +566,15 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
                        msg->version = XPNET_VERSION_EMBED;
                        dev_dbg(xpnet, "calling memcpy(0x%p, 0x%p, 0x%lx)\n",
                                &msg->data, skb->data, (size_t) embedded_bytes);
-                       memcpy(&msg->data, skb->data, (size_t) embedded_bytes);
+                       skb_copy_from_linear_data(skb, &msg->data,
+                                                 (size_t)embedded_bytes);
                } else {
                        msg->version = XPNET_VERSION;
                }
                msg->magic = XPNET_MAGIC;
                msg->size = end_addr - start_addr;
                msg->leadin_ignore = (u64) skb->data - start_addr;
-               msg->tailout_ignore = end_addr - (u64) skb->tail;
+               msg->tailout_ignore = end_addr - (u64)skb_tail_pointer(skb);
                msg->buf_pa = __pa(start_addr);
 
                dev_dbg(xpnet, "sending XPC message to %d:%d\nmsg->buf_pa="
index c78b143..130d825 100644 (file)
@@ -10,7 +10,6 @@ menu "Machine selection"
 
 config ZONE_DMA
        bool
-       default y
 
 choice
        prompt "System type"
@@ -165,7 +164,7 @@ config MIPS_COBALT
        select HW_HAS_PCI
        select I8259
        select IRQ_CPU
-       select MIPS_GT64111
+       select PCI_GT64XXX_PCI0
        select SYS_HAS_CPU_NEVADA
        select SYS_HAS_EARLY_PRINTK
        select SYS_SUPPORTS_32BIT_KERNEL
@@ -207,7 +206,7 @@ config MIPS_EV64120
        depends on EXPERIMENTAL
        select DMA_NONCOHERENT
        select HW_HAS_PCI
-       select MIPS_GT64120
+       select PCI_GT64XXX_PCI0
        select SYS_HAS_CPU_R5000
        select SYS_SUPPORTS_32BIT_KERNEL
        select SYS_SUPPORTS_64BIT_KERNEL
@@ -245,7 +244,7 @@ config LASAT
        select DMA_NONCOHERENT
        select SYS_HAS_EARLY_PRINTK
        select HW_HAS_PCI
-       select MIPS_GT64120
+       select PCI_GT64XXX_PCI0
        select MIPS_NILE4
        select R5000_CPU_SCACHE
        select SYS_HAS_CPU_R5000
@@ -263,7 +262,7 @@ config MIPS_ATLAS
        select HW_HAS_PCI
        select MIPS_BOARDS_GEN
        select MIPS_BONITO64
-       select MIPS_GT64120
+       select PCI_GT64XXX_PCI0
        select MIPS_MSC
        select RM7000_CPU_SCACHE
        select SWAP_IO_SPACE
@@ -296,7 +295,7 @@ config MIPS_MALTA
        select MIPS_BOARDS_GEN
        select MIPS_BONITO64
        select MIPS_CPU_SCACHE
-       select MIPS_GT64120
+       select PCI_GT64XXX_PCI0
        select MIPS_MSC
        select SWAP_IO_SPACE
        select SYS_HAS_CPU_MIPS32_R1
@@ -340,7 +339,7 @@ config WR_PPMC
        select BOOT_ELF32
        select DMA_NONCOHERENT
        select HW_HAS_PCI
-       select MIPS_GT64120
+       select PCI_GT64XXX_PCI0
        select SWAP_IO_SPACE
        select SYS_HAS_CPU_MIPS32_R1
        select SYS_HAS_CPU_MIPS32_R2
@@ -398,7 +397,7 @@ config MOMENCO_OCELOT
        select HW_HAS_PCI
        select IRQ_CPU
        select IRQ_CPU_RM7K
-       select MIPS_GT64120
+       select PCI_GT64XXX_PCI0
        select RM7000_CPU_SCACHE
        select SWAP_IO_SPACE
        select SYS_HAS_CPU_RM7000
@@ -501,10 +500,8 @@ config DDB5477
          ether port USB, AC97, PCI, etc.
 
 config MACH_VR41XX
-       bool "NEC VR41XX-based machines"
+       bool "NEC VR4100 series based machines"
        select SYS_HAS_CPU_VR41XX
-       select SYS_SUPPORTS_32BIT_KERNEL
-       select SYS_SUPPORTS_64BIT_KERNEL if EXPERIMENTAL
        select GENERIC_HARDIRQS_NO__DO_IRQ
 
 config PMC_YOSEMITE
@@ -779,6 +776,7 @@ config TOSHIBA_JMR3927
        select SYS_SUPPORTS_LITTLE_ENDIAN
        select SYS_SUPPORTS_BIG_ENDIAN
        select TOSHIBA_BOARDS
+       select GENERIC_HARDIRQS_NO__DO_IRQ
 
 config TOSHIBA_RBTX4927
        bool "Toshiba TBTX49[23]7 board"
@@ -922,6 +920,7 @@ config SYS_HAS_EARLY_PRINTK
 
 config GENERIC_ISA_DMA
        bool
+       select ZONE_DMA
 
 config I8259
        bool
@@ -945,6 +944,7 @@ config MIPS_DISABLE_OBSOLETE_IDE
 
 config GENERIC_ISA_DMA_SUPPORT_BROKEN
        bool
+       select ZONE_DMA
 
 #
 # Endianess selection.  Sufficiently obscure so many users don't know what to
@@ -999,10 +999,7 @@ config DDB5XXX_COMMON
 config MIPS_BOARDS_GEN
        bool
 
-config MIPS_GT64111
-       bool
-
-config MIPS_GT64120
+config PCI_GT64XXX_PCI0
        bool
 
 config MIPS_TX3927
index 92bca6a..f2f742d 100644 (file)
@@ -530,25 +530,29 @@ cflags-$(CONFIG_SGI_IP32) += -Iinclude/asm-mips/mach-ip32
 load-$(CONFIG_SGI_IP32)                += 0xffffffff80004000
 
 #
-# Sibyte SB1250 SOC
+# Sibyte SB1250/BCM1480 SOC
 #
 # This is a LIB so that it links at the end, and initcalls are later
 # the sequence; but it is built as an object so that modules don't get
 # removed (as happens, even if they have __initcall/module_init)
 #
 core-$(CONFIG_SIBYTE_BCM112X)  += arch/mips/sibyte/sb1250/
+core-$(CONFIG_SIBYTE_BCM112X)  += arch/mips/sibyte/common/
 cflags-$(CONFIG_SIBYTE_BCM112X)        += -Iinclude/asm-mips/mach-sibyte \
                        -DSIBYTE_HDR_FEATURES=SIBYTE_HDR_FMASK_1250_112x_ALL
 
 core-$(CONFIG_SIBYTE_SB1250)   += arch/mips/sibyte/sb1250/
+core-$(CONFIG_SIBYTE_SB1250)   += arch/mips/sibyte/common/
 cflags-$(CONFIG_SIBYTE_SB1250) += -Iinclude/asm-mips/mach-sibyte \
                        -DSIBYTE_HDR_FEATURES=SIBYTE_HDR_FMASK_1250_112x_ALL
 
 core-$(CONFIG_SIBYTE_BCM1x55)  += arch/mips/sibyte/bcm1480/
+core-$(CONFIG_SIBYTE_BCM1x55)  += arch/mips/sibyte/common/
 cflags-$(CONFIG_SIBYTE_BCM1x55)        += -Iinclude/asm-mips/mach-sibyte \
                        -DSIBYTE_HDR_FEATURES=SIBYTE_HDR_FMASK_1480_ALL
 
 core-$(CONFIG_SIBYTE_BCM1x80)  += arch/mips/sibyte/bcm1480/
+core-$(CONFIG_SIBYTE_BCM1x80)  += arch/mips/sibyte/common/
 cflags-$(CONFIG_SIBYTE_BCM1x80)        += -Iinclude/asm-mips/mach-sibyte \
                        -DSIBYTE_HDR_FEATURES=SIBYTE_HDR_FMASK_1480_ALL
 
index 42f0eda..2f0e4c0 100644 (file)
@@ -63,7 +63,7 @@ volatile void __iomem * const ocd_base = (void *) (EXCITE_ADDR_OCD);
 volatile void __iomem * const titan_base = (void *) (EXCITE_ADDR_TITAN);
 
 /* Protect access to shared GPI registers */
-spinlock_t titan_lock = SPIN_LOCK_UNLOCKED;
+DEFINE_SPINLOCK(titan_lock);
 int titan_irqflags;
 
 
index b36dd8f..de017c1 100644 (file)
@@ -4,5 +4,6 @@
 
 obj-y   := irq.o reset.o setup.o
 
+obj-$(CONFIG_PCI)              += pci.o
 obj-$(CONFIG_EARLY_PRINTK)     += console.o
 obj-$(CONFIG_MTD_PHYSMAP)      += mtd.o
index ca56b41..0485d51 100644 (file)
@@ -1,13 +1,11 @@
 /*
  * (C) P. Horton 2006
  */
-
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/console.h>
 #include <linux/serial_reg.h>
+
 #include <asm/addrspace.h>
-#include <asm/mach-cobalt/cobalt.h>
+
+#include <cobalt.h>
 
 void prom_putchar(char c)
 {
index fe93b84..950ad1e 100644 (file)
@@ -17,7 +17,7 @@
 #include <asm/irq_cpu.h>
 #include <asm/gt64120.h>
 
-#include <asm/mach-cobalt/cobalt.h>
+#include <cobalt.h>
 
 /*
  * We have two types of interrupts that we handle, ones that come in through
diff --git a/arch/mips/cobalt/pci.c b/arch/mips/cobalt/pci.c
new file mode 100644 (file)
index 0000000..d91027f
--- /dev/null
@@ -0,0 +1,47 @@
+/*
+ * Register PCI controller.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1996, 1997, 2004, 05 by Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 2001, 2002, 2003 by Liam Davies (ldavies@agile.tv)
+ *
+ */
+#include <linux/init.h>
+#include <linux/pci.h>
+
+#include <asm/gt64120.h>
+
+extern struct pci_ops gt64xxx_pci0_ops;
+
+static struct resource cobalt_mem_resource = {
+       .start  = GT_DEF_PCI0_MEM0_BASE,
+       .end    = GT_DEF_PCI0_MEM0_BASE + GT_DEF_PCI0_MEM0_SIZE - 1,
+       .name   = "PCI memory",
+       .flags  = IORESOURCE_MEM,
+};
+
+static struct resource cobalt_io_resource = {
+       .start  = 0x1000,
+       .end    = GT_DEF_PCI0_IO_SIZE - 1,
+       .name   = "PCI I/O",
+       .flags  = IORESOURCE_IO,
+};
+
+static struct pci_controller cobalt_pci_controller = {
+       .pci_ops        = &gt64xxx_pci0_ops,
+       .mem_resource   = &cobalt_mem_resource,
+       .io_resource    = &cobalt_io_resource,
+       .io_offset      = 0 - GT_DEF_PCI0_IO_BASE,
+};
+
+static int __init cobalt_pci_init(void)
+{
+       register_pci_controller(&cobalt_pci_controller);
+
+       return 0;
+}
+
+arch_initcall(cobalt_pci_init);
index 753dfcc..43cca21 100644 (file)
@@ -8,15 +8,12 @@
  * Copyright (C) 1995, 1996, 1997 by Ralf Baechle
  * Copyright (C) 2001 by Liam Davies (ldavies@agile.tv)
  */
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <asm/cacheflush.h>
+#include <linux/jiffies.h>
+
 #include <asm/io.h>
-#include <asm/processor.h>
 #include <asm/reboot.h>
-#include <asm/system.h>
-#include <asm/mipsregs.h>
-#include <asm/mach-cobalt/cobalt.h>
+
+#include <cobalt.h>
 
 void cobalt_machine_halt(void)
 {
index 88d34f1..d0dd817 100644 (file)
 #include <asm/bootinfo.h>
 #include <asm/time.h>
 #include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/processor.h>
 #include <asm/reboot.h>
 #include <asm/gt64120.h>
 
-#include <asm/mach-cobalt/cobalt.h>
+#include <cobalt.h>
 
 extern void cobalt_machine_restart(char *command);
 extern void cobalt_machine_halt(void);
@@ -63,22 +61,6 @@ void __init plat_timer_setup(struct irqaction *irq)
        GT_WRITE(GT_INTRMASK_OFS, GT_INTR_T0EXP_MSK | GT_READ(GT_INTRMASK_OFS));
 }
 
-extern struct pci_ops gt64111_pci_ops;
-
-static struct resource cobalt_mem_resource = {
-       .start  = GT_DEF_PCI0_MEM0_BASE,
-       .end    = GT_DEF_PCI0_MEM0_BASE + GT_DEF_PCI0_MEM0_SIZE - 1,
-       .name   = "PCI memory",
-       .flags  = IORESOURCE_MEM
-};
-
-static struct resource cobalt_io_resource = {
-       .start  = 0x1000,
-       .end    = 0xffff,
-       .name   = "PCI I/O",
-       .flags  = IORESOURCE_IO
-};
-
 /*
  * Cobalt doesn't have PS/2 keyboard/mouse interfaces,
  * keyboard conntroller is never used.
@@ -111,14 +93,6 @@ static struct resource cobalt_reserved_resources[] = {
        },
 };
 
-static struct pci_controller cobalt_pci_controller = {
-       .pci_ops        = &gt64111_pci_ops,
-       .mem_resource   = &cobalt_mem_resource,
-       .mem_offset     = 0,
-       .io_resource    = &cobalt_io_resource,
-       .io_offset      = 0 - GT_DEF_PCI0_IO_BASE,
-};
-
 void __init plat_mem_setup(void)
 {
        static struct uart_port uart;
@@ -146,10 +120,6 @@ void __init plat_mem_setup(void)
 
        printk("Cobalt board ID: %d\n", cobalt_board_id);
 
-#ifdef CONFIG_PCI
-       register_pci_controller(&cobalt_pci_controller);
-#endif
-
        if (cobalt_board_id > COBALT_BRD_ID_RAQ1) {
 #ifdef CONFIG_SERIAL_8250
                uart.line       = 0;
index 21a0947..068e48e 100644 (file)
@@ -1,7 +1,7 @@
 #
 # Automatically generated make config: don't edit
-# Linux kernel version: 2.6.20
-# Tue Feb 20 21:47:34 2007
+# Linux kernel version: 2.6.21-rc3
+# Thu Mar 15 00:40:40 2007
 #
 CONFIG_MIPS=y
 
@@ -70,7 +70,7 @@ CONFIG_GENERIC_HWEIGHT=y
 CONFIG_GENERIC_CALIBRATE_DELAY=y
 CONFIG_GENERIC_TIME=y
 CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
-# CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ is not set
+CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
 CONFIG_DMA_NONCOHERENT=y
 CONFIG_DMA_NEED_PCI_MAP_STATE=y
 CONFIG_CPU_BIG_ENDIAN=y
@@ -138,12 +138,12 @@ CONFIG_ZONE_DMA_FLAG=1
 # CONFIG_HZ_48 is not set
 # CONFIG_HZ_100 is not set
 # CONFIG_HZ_128 is not set
-# CONFIG_HZ_250 is not set
+CONFIG_HZ_250=y
 # CONFIG_HZ_256 is not set
-CONFIG_HZ_1000=y
+# CONFIG_HZ_1000 is not set
 # CONFIG_HZ_1024 is not set
 CONFIG_SYS_SUPPORTS_ARBIT_HZ=y
-CONFIG_HZ=1000
+CONFIG_HZ=250
 CONFIG_PREEMPT_NONE=y
 # CONFIG_PREEMPT_VOLUNTARY is not set
 # CONFIG_PREEMPT is not set
@@ -175,14 +175,15 @@ CONFIG_SYSVIPC_SYSCTL=y
 # CONFIG_AUDIT is not set
 # CONFIG_IKCONFIG is not set
 CONFIG_SYSFS_DEPRECATED=y
-CONFIG_RELAY=y
+# CONFIG_RELAY is not set
+# CONFIG_BLK_DEV_INITRD is not set
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
 CONFIG_SYSCTL=y
 CONFIG_EMBEDDED=y
 CONFIG_SYSCTL_SYSCALL=y
 CONFIG_KALLSYMS=y
 # CONFIG_KALLSYMS_EXTRA_PASS is not set
-CONFIG_HOTPLUG=y
+# CONFIG_HOTPLUG is not set
 CONFIG_PRINTK=y
 CONFIG_BUG=y
 CONFIG_ELF_CORE=y
@@ -217,11 +218,11 @@ CONFIG_IOSCHED_NOOP=y
 CONFIG_IOSCHED_AS=y
 CONFIG_IOSCHED_DEADLINE=y
 CONFIG_IOSCHED_CFQ=y
-CONFIG_DEFAULT_AS=y
+# CONFIG_DEFAULT_AS is not set
 # CONFIG_DEFAULT_DEADLINE is not set
-# CONFIG_DEFAULT_CFQ is not set
+CONFIG_DEFAULT_CFQ=y
 # CONFIG_DEFAULT_NOOP is not set
-CONFIG_DEFAULT_IOSCHED="anticipatory"
+CONFIG_DEFAULT_IOSCHED="cfq"
 
 #
 # Bus options (PCI, PCMCIA, EISA, ISA, TC)
@@ -233,12 +234,10 @@ CONFIG_MMU=y
 #
 # PCCARD (PCMCIA/CardBus) support
 #
-# CONFIG_PCCARD is not set
 
 #
 # PCI Hotplug Support
 #
-# CONFIG_HOTPLUG_PCI is not set
 
 #
 # Executable file formats
@@ -250,10 +249,7 @@ CONFIG_TRAD_SIGNALS=y
 #
 # Power management options
 #
-CONFIG_PM=y
-# CONFIG_PM_LEGACY is not set
-# CONFIG_PM_DEBUG is not set
-# CONFIG_PM_SYSFS_DEPRECATED is not set
+# CONFIG_PM is not set
 
 #
 # Networking
@@ -267,12 +263,7 @@ CONFIG_NET=y
 CONFIG_PACKET=y
 # CONFIG_PACKET_MMAP is not set
 CONFIG_UNIX=y
-CONFIG_XFRM=y
-CONFIG_XFRM_USER=y
-# CONFIG_XFRM_SUB_POLICY is not set
-CONFIG_XFRM_MIGRATE=y
-CONFIG_NET_KEY=y
-CONFIG_NET_KEY_MIGRATE=y
+# CONFIG_NET_KEY is not set
 CONFIG_INET=y
 # CONFIG_IP_MULTICAST is not set
 # CONFIG_IP_ADVANCED_ROUTER is not set
@@ -290,19 +281,18 @@ CONFIG_IP_PNP_BOOTP=y
 # CONFIG_INET_IPCOMP is not set
 # CONFIG_INET_XFRM_TUNNEL is not set
 # CONFIG_INET_TUNNEL is not set
-CONFIG_INET_XFRM_MODE_TRANSPORT=y
-CONFIG_INET_XFRM_MODE_TUNNEL=y
-CONFIG_INET_XFRM_MODE_BEET=y
-CONFIG_INET_DIAG=y
-CONFIG_INET_TCP_DIAG=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_DIAG is not set
 # CONFIG_TCP_CONG_ADVANCED is not set
 CONFIG_TCP_CONG_CUBIC=y
 CONFIG_DEFAULT_TCP_CONG="cubic"
-CONFIG_TCP_MD5SIG=y
+# CONFIG_TCP_MD5SIG is not set
 # CONFIG_IPV6 is not set
 # CONFIG_INET6_XFRM_TUNNEL is not set
 # CONFIG_INET6_TUNNEL is not set
-CONFIG_NETWORK_SECMARK=y
+# CONFIG_NETWORK_SECMARK is not set
 # CONFIG_NETFILTER is not set
 
 #
@@ -343,13 +333,7 @@ CONFIG_NETWORK_SECMARK=y
 # CONFIG_HAMRADIO is not set
 # CONFIG_IRDA is not set
 # CONFIG_BT is not set
-CONFIG_IEEE80211=y
-# CONFIG_IEEE80211_DEBUG is not set
-CONFIG_IEEE80211_CRYPT_WEP=y
-CONFIG_IEEE80211_CRYPT_CCMP=y
-CONFIG_IEEE80211_SOFTMAC=y
-# CONFIG_IEEE80211_SOFTMAC_DEBUG is not set
-CONFIG_WIRELESS_EXT=y
+# CONFIG_IEEE80211 is not set
 
 #
 # Device Drivers
@@ -360,14 +344,12 @@ CONFIG_WIRELESS_EXT=y
 #
 CONFIG_STANDALONE=y
 CONFIG_PREVENT_FIRMWARE_BUILD=y
-CONFIG_FW_LOADER=y
 # CONFIG_SYS_HYPERVISOR is not set
 
 #
 # Connector - unified userspace <-> kernelspace linker
 #
-CONFIG_CONNECTOR=y
-CONFIG_PROC_EVENTS=y
+# CONFIG_CONNECTOR is not set
 
 #
 # Memory Technology Devices (MTD)
@@ -396,16 +378,13 @@ CONFIG_PROC_EVENTS=y
 # CONFIG_BLK_DEV_NBD is not set
 # CONFIG_BLK_DEV_SX8 is not set
 # CONFIG_BLK_DEV_RAM is not set
-# CONFIG_BLK_DEV_INITRD is not set
-CONFIG_CDROM_PKTCDVD=y
-CONFIG_CDROM_PKTCDVD_BUFFERS=8
-# CONFIG_CDROM_PKTCDVD_WCACHE is not set
-CONFIG_ATA_OVER_ETH=y
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
 
 #
 # Misc devices
 #
-CONFIG_SGI_IOC4=y
+# CONFIG_SGI_IOC4 is not set
 # CONFIG_TIFM_CORE is not set
 
 #
@@ -416,7 +395,7 @@ CONFIG_SGI_IOC4=y
 #
 # SCSI device support
 #
-CONFIG_RAID_ATTRS=y
+# CONFIG_RAID_ATTRS is not set
 # CONFIG_SCSI is not set
 # CONFIG_SCSI_NETLINK is not set
 
@@ -462,26 +441,13 @@ CONFIG_NETDEVICES=y
 #
 # PHY device support
 #
-CONFIG_PHYLIB=y
-
-#
-# MII PHY device drivers
-#
-CONFIG_MARVELL_PHY=y
-CONFIG_DAVICOM_PHY=y
-CONFIG_QSEMI_PHY=y
-CONFIG_LXT_PHY=y
-CONFIG_CICADA_PHY=y
-CONFIG_VITESSE_PHY=y
-CONFIG_SMSC_PHY=y
-# CONFIG_BROADCOM_PHY is not set
-# CONFIG_FIXED_PHY is not set
+# CONFIG_PHYLIB is not set
 
 #
 # Ethernet (10 or 100Mbit)
 #
 CONFIG_NET_ETHERNET=y
-# CONFIG_MII is not set
+CONFIG_MII=y
 # CONFIG_HAPPYMEAL is not set
 # CONFIG_SUNGEM is not set
 # CONFIG_CASSINI is not set
@@ -493,7 +459,27 @@ CONFIG_NET_ETHERNET=y
 #
 # CONFIG_NET_TULIP is not set
 # CONFIG_HP100 is not set
-# CONFIG_NET_PCI is not set
+CONFIG_NET_PCI=y
+# CONFIG_PCNET32 is not set
+# CONFIG_AMD8111_ETH is not set
+# CONFIG_ADAPTEC_STARFIRE is not set
+# CONFIG_B44 is not set
+# CONFIG_FORCEDETH is not set
+CONFIG_TC35815=y
+# CONFIG_DGRS is not set
+# CONFIG_EEPRO100 is not set
+# CONFIG_E100 is not set
+# CONFIG_FEALNX is not set
+# CONFIG_NATSEMI is not set
+# CONFIG_NE2K_PCI is not set
+# CONFIG_8139CP is not set
+# CONFIG_8139TOO is not set
+# CONFIG_SIS900 is not set
+# CONFIG_EPIC100 is not set
+# CONFIG_SUNDANCE is not set
+# CONFIG_TLAN is not set
+# CONFIG_VIA_RHINE is not set
+# CONFIG_SC92031 is not set
 
 #
 # Ethernet (1000 Mbit)
@@ -509,20 +495,21 @@ CONFIG_NET_ETHERNET=y
 # CONFIG_SKGE is not set
 # CONFIG_SKY2 is not set
 # CONFIG_SK98LIN is not set
+# CONFIG_VIA_VELOCITY is not set
 # CONFIG_TIGON3 is not set
 # CONFIG_BNX2 is not set
-CONFIG_QLA3XXX=y
+# CONFIG_QLA3XXX is not set
 # CONFIG_ATL1 is not set
 
 #
 # Ethernet (10000 Mbit)
 #
 # CONFIG_CHELSIO_T1 is not set
-CONFIG_CHELSIO_T3=y
+# CONFIG_CHELSIO_T3 is not set
 # CONFIG_IXGB is not set
 # CONFIG_S2IO is not set
 # CONFIG_MYRI10GE is not set
-CONFIG_NETXEN_NIC=y
+# CONFIG_NETXEN_NIC is not set
 
 #
 # Token Ring devices
@@ -566,10 +553,7 @@ CONFIG_INPUT=y
 #
 # Userland interfaces
 #
-CONFIG_INPUT_MOUSEDEV=y
-CONFIG_INPUT_MOUSEDEV_PSAUX=y
-CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
-CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+# CONFIG_INPUT_MOUSEDEV is not set
 # CONFIG_INPUT_JOYDEV is not set
 # CONFIG_INPUT_TSDEV is not set
 # CONFIG_INPUT_EVDEV is not set
@@ -587,21 +571,13 @@ CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
 #
 # Hardware I/O ports
 #
-CONFIG_SERIO=y
-# CONFIG_SERIO_I8042 is not set
-CONFIG_SERIO_SERPORT=y
-# CONFIG_SERIO_PCIPS2 is not set
-# CONFIG_SERIO_LIBPS2 is not set
-CONFIG_SERIO_RAW=y
+# CONFIG_SERIO is not set
 # CONFIG_GAMEPORT is not set
 
 #
 # Character devices
 #
-CONFIG_VT=y
-CONFIG_VT_CONSOLE=y
-CONFIG_HW_CONSOLE=y
-CONFIG_VT_HW_CONSOLE_BINDING=y
+# CONFIG_VT is not set
 CONFIG_SERIAL_NONSTANDARD=y
 # CONFIG_COMPUTONE is not set
 # CONFIG_ROCKETPORT is not set
@@ -609,7 +585,7 @@ CONFIG_SERIAL_NONSTANDARD=y
 # CONFIG_DIGIEPCA is not set
 # CONFIG_MOXA_INTELLIO is not set
 # CONFIG_MOXA_SMARTIO is not set
-CONFIG_MOXA_SMARTIO_NEW=y
+# CONFIG_MOXA_SMARTIO_NEW is not set
 # CONFIG_ISI is not set
 # CONFIG_SYNCLINKMP is not set
 # CONFIG_SYNCLINK_GT is not set
@@ -629,11 +605,12 @@ CONFIG_MOXA_SMARTIO_NEW=y
 # Non-8250 serial port support
 #
 CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
 CONFIG_SERIAL_TXX9=y
 CONFIG_HAS_TXX9_SERIAL=y
 CONFIG_SERIAL_TXX9_NR_UARTS=6
-# CONFIG_SERIAL_TXX9_CONSOLE is not set
-# CONFIG_SERIAL_TXX9_STDSERIAL is not set
+CONFIG_SERIAL_TXX9_CONSOLE=y
+CONFIG_SERIAL_TXX9_STDSERIAL=y
 # CONFIG_SERIAL_JSM is not set
 # CONFIG_UNIX98_PTYS is not set
 CONFIG_LEGACY_PTYS=y
@@ -684,6 +661,11 @@ CONFIG_LEGACY_PTY_COUNT=256
 # CONFIG_HWMON is not set
 # CONFIG_HWMON_VID is not set
 
+#
+# Multifunction device drivers
+#
+# CONFIG_MFD_SM501 is not set
+
 #
 # Multimedia devices
 #
@@ -697,51 +679,8 @@ CONFIG_LEGACY_PTY_COUNT=256
 #
 # Graphics support
 #
-# CONFIG_FIRMWARE_EDID is not set
-CONFIG_FB=y
-# CONFIG_FB_CFB_FILLRECT is not set
-# CONFIG_FB_CFB_COPYAREA is not set
-# CONFIG_FB_CFB_IMAGEBLIT is not set
-# CONFIG_FB_SVGALIB is not set
-# CONFIG_FB_MACMODES is not set
-# CONFIG_FB_BACKLIGHT is not set
-# CONFIG_FB_MODE_HELPERS is not set
-# CONFIG_FB_TILEBLITTING is not set
-# CONFIG_FB_CIRRUS is not set
-# CONFIG_FB_PM2 is not set
-# CONFIG_FB_CYBER2000 is not set
-# CONFIG_FB_ASILIANT is not set
-# CONFIG_FB_IMSTT is not set
-# CONFIG_FB_S1D13XXX is not set
-# CONFIG_FB_NVIDIA is not set
-# CONFIG_FB_RIVA is not set
-# CONFIG_FB_MATROX is not set
-# CONFIG_FB_RADEON is not set
-# CONFIG_FB_ATY128 is not set
-# CONFIG_FB_ATY is not set
-# CONFIG_FB_S3 is not set
-# CONFIG_FB_SAVAGE is not set
-# CONFIG_FB_SIS is not set
-# CONFIG_FB_NEOMAGIC is not set
-# CONFIG_FB_KYRO is not set
-# CONFIG_FB_3DFX is not set
-# CONFIG_FB_VOODOO1 is not set
-# CONFIG_FB_SMIVGX is not set
-# CONFIG_FB_TRIDENT is not set
-# CONFIG_FB_VIRTUAL is not set
-
-#
-# Console display driver support
-#
-# CONFIG_VGA_CONSOLE is not set
-CONFIG_DUMMY_CONSOLE=y
-# CONFIG_FRAMEBUFFER_CONSOLE is not set
-
-#
-# Logo configuration
-#
-# CONFIG_LOGO is not set
 # CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+# CONFIG_FB is not set
 
 #
 # Sound
@@ -864,7 +803,7 @@ CONFIG_INOTIFY_USER=y
 CONFIG_DNOTIFY=y
 # CONFIG_AUTOFS_FS is not set
 # CONFIG_AUTOFS4_FS is not set
-CONFIG_FUSE_FS=y
+# CONFIG_FUSE_FS is not set
 
 #
 # CD-ROM/DVD Filesystems
@@ -889,14 +828,13 @@ CONFIG_SYSFS=y
 # CONFIG_TMPFS is not set
 # CONFIG_HUGETLB_PAGE is not set
 CONFIG_RAMFS=y
-CONFIG_CONFIGFS_FS=y
+# CONFIG_CONFIGFS_FS is not set
 
 #
 # Miscellaneous filesystems
 #
 # CONFIG_ADFS_FS is not set
 # CONFIG_AFFS_FS is not set
-# CONFIG_ECRYPT_FS is not set
 # CONFIG_HFS_FS is not set
 # CONFIG_HFSPLUS_FS is not set
 # CONFIG_BEFS_FS is not set
@@ -944,10 +882,7 @@ CONFIG_MSDOS_PARTITION=y
 #
 # Distributed Lock Manager
 #
-CONFIG_DLM=y
-CONFIG_DLM_TCP=y
-# CONFIG_DLM_SCTP is not set
-# CONFIG_DLM_DEBUG is not set
+# CONFIG_DLM is not set
 
 #
 # Profiling support
@@ -972,65 +907,22 @@ CONFIG_CMDLINE=""
 #
 # Security options
 #
-CONFIG_KEYS=y
-CONFIG_KEYS_DEBUG_PROC_KEYS=y
+# CONFIG_KEYS is not set
 # CONFIG_SECURITY is not set
 
 #
 # Cryptographic options
 #
-CONFIG_CRYPTO=y
-CONFIG_CRYPTO_ALGAPI=y
-CONFIG_CRYPTO_BLKCIPHER=y
-CONFIG_CRYPTO_HASH=y
-CONFIG_CRYPTO_MANAGER=y
-CONFIG_CRYPTO_HMAC=y
-CONFIG_CRYPTO_XCBC=y
-CONFIG_CRYPTO_NULL=y
-CONFIG_CRYPTO_MD4=y
-CONFIG_CRYPTO_MD5=y
-CONFIG_CRYPTO_SHA1=y
-CONFIG_CRYPTO_SHA256=y
-CONFIG_CRYPTO_SHA512=y
-CONFIG_CRYPTO_WP512=y
-CONFIG_CRYPTO_TGR192=y
-CONFIG_CRYPTO_GF128MUL=y
-CONFIG_CRYPTO_ECB=y
-CONFIG_CRYPTO_CBC=y
-CONFIG_CRYPTO_PCBC=y
-CONFIG_CRYPTO_LRW=y
-CONFIG_CRYPTO_DES=y
-CONFIG_CRYPTO_FCRYPT=y
-CONFIG_CRYPTO_BLOWFISH=y
-CONFIG_CRYPTO_TWOFISH=y
-CONFIG_CRYPTO_TWOFISH_COMMON=y
-CONFIG_CRYPTO_SERPENT=y
-CONFIG_CRYPTO_AES=y
-CONFIG_CRYPTO_CAST5=y
-CONFIG_CRYPTO_CAST6=y
-CONFIG_CRYPTO_TEA=y
-CONFIG_CRYPTO_ARC4=y
-CONFIG_CRYPTO_KHAZAD=y
-CONFIG_CRYPTO_ANUBIS=y
-CONFIG_CRYPTO_DEFLATE=y
-CONFIG_CRYPTO_MICHAEL_MIC=y
-CONFIG_CRYPTO_CRC32C=y
-CONFIG_CRYPTO_CAMELLIA=y
-
-#
-# Hardware crypto devices
-#
+# CONFIG_CRYPTO is not set
 
 #
 # Library routines
 #
 CONFIG_BITREVERSE=y
 # CONFIG_CRC_CCITT is not set
-CONFIG_CRC16=y
+# CONFIG_CRC16 is not set
 CONFIG_CRC32=y
-CONFIG_LIBCRC32C=y
-CONFIG_ZLIB_INFLATE=y
-CONFIG_ZLIB_DEFLATE=y
+# CONFIG_LIBCRC32C is not set
 CONFIG_PLIST=y
 CONFIG_HAS_IOMEM=y
 CONFIG_HAS_IOPORT=y
diff --git a/arch/mips/configs/pnx8550-v2pci_defconfig b/arch/mips/configs/pnx8550-v2pci_defconfig
deleted file mode 100644 (file)
index 3d6c2d7..0000000
+++ /dev/null
@@ -1,1540 +0,0 @@
-#
-# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.20
-# Tue Feb 20 21:47:39 2007
-#
-CONFIG_MIPS=y
-
-#
-# Machine selection
-#
-CONFIG_ZONE_DMA=y
-# CONFIG_MIPS_MTX1 is not set
-# CONFIG_MIPS_BOSPORUS is not set
-# CONFIG_MIPS_PB1000 is not set
-# CONFIG_MIPS_PB1100 is not set
-# CONFIG_MIPS_PB1500 is not set
-# CONFIG_MIPS_PB1550 is not set
-# CONFIG_MIPS_PB1200 is not set
-# CONFIG_MIPS_DB1000 is not set
-# CONFIG_MIPS_DB1100 is not set
-# CONFIG_MIPS_DB1500 is not set
-# CONFIG_MIPS_DB1550 is not set
-# CONFIG_MIPS_DB1200 is not set
-# CONFIG_MIPS_MIRAGE is not set
-# CONFIG_BASLER_EXCITE is not set
-# CONFIG_MIPS_COBALT is not set
-# CONFIG_MACH_DECSTATION is not set
-# CONFIG_MIPS_EV64120 is not set
-# CONFIG_MACH_JAZZ is not set
-# CONFIG_LASAT is not set
-# CONFIG_MIPS_ATLAS is not set
-# CONFIG_MIPS_MALTA is not set
-# CONFIG_MIPS_SEAD is not set
-# CONFIG_WR_PPMC is not set
-# CONFIG_MIPS_SIM is not set
-# CONFIG_MOMENCO_JAGUAR_ATX is not set
-# CONFIG_MOMENCO_OCELOT is not set
-# CONFIG_MOMENCO_OCELOT_3 is not set
-# CONFIG_MOMENCO_OCELOT_C is not set
-# CONFIG_MOMENCO_OCELOT_G is not set
-# CONFIG_MIPS_XXS1500 is not set
-# CONFIG_PNX8550_JBS is not set
-# CONFIG_PNX8550_STB810 is not set
-# CONFIG_DDB5477 is not set
-# CONFIG_MACH_VR41XX is not set
-# CONFIG_PMC_YOSEMITE is not set
-# CONFIG_QEMU is not set
-# CONFIG_MARKEINS is not set
-# CONFIG_SGI_IP22 is not set
-# CONFIG_SGI_IP27 is not set
-# CONFIG_SGI_IP32 is not set
-# CONFIG_SIBYTE_BIGSUR is not set
-# CONFIG_SIBYTE_SWARM is not set
-# CONFIG_SIBYTE_SENTOSA is not set
-# CONFIG_SIBYTE_RHONE is not set
-# CONFIG_SIBYTE_CARMEL is not set
-# CONFIG_SIBYTE_PTSWARM is not set
-# CONFIG_SIBYTE_LITTLESUR is not set
-# CONFIG_SIBYTE_CRHINE is not set
-# CONFIG_SIBYTE_CRHONE is not set
-# CONFIG_SNI_RM is not set
-# CONFIG_TOSHIBA_JMR3927 is not set
-# CONFIG_TOSHIBA_RBTX4927 is not set
-# CONFIG_TOSHIBA_RBTX4938 is not set
-CONFIG_RWSEM_GENERIC_SPINLOCK=y
-# CONFIG_ARCH_HAS_ILOG2_U32 is not set
-# CONFIG_ARCH_HAS_ILOG2_U64 is not set
-CONFIG_GENERIC_FIND_NEXT_BIT=y
-CONFIG_GENERIC_HWEIGHT=y
-CONFIG_GENERIC_CALIBRATE_DELAY=y
-CONFIG_GENERIC_TIME=y
-CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
-CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
-CONFIG_DMA_NONCOHERENT=y
-CONFIG_DMA_NEED_PCI_MAP_STATE=y
-# CONFIG_CPU_BIG_ENDIAN is not set
-CONFIG_CPU_LITTLE_ENDIAN=y
-CONFIG_SYS_SUPPORTS_LITTLE_ENDIAN=y
-CONFIG_PNX8550=y
-CONFIG_SOC_PNX8550=y
-CONFIG_MIPS_L1_CACHE_SHIFT=5
-
-#
-# CPU selection
-#
-CONFIG_CPU_MIPS32_R1=y
-# CONFIG_CPU_MIPS32_R2 is not set
-# CONFIG_CPU_MIPS64_R1 is not set
-# CONFIG_CPU_MIPS64_R2 is not set
-# CONFIG_CPU_R3000 is not set
-# CONFIG_CPU_TX39XX is not set
-# CONFIG_CPU_VR41XX is not set
-# CONFIG_CPU_R4300 is not set
-# CONFIG_CPU_R4X00 is not set
-# CONFIG_CPU_TX49XX is not set
-# CONFIG_CPU_R5000 is not set
-# CONFIG_CPU_R5432 is not set
-# CONFIG_CPU_R6000 is not set
-# CONFIG_CPU_NEVADA is not set
-# CONFIG_CPU_R8000 is not set
-# CONFIG_CPU_R10000 is not set
-# CONFIG_CPU_RM7000 is not set
-# CONFIG_CPU_RM9000 is not set
-# CONFIG_CPU_SB1 is not set
-CONFIG_SYS_HAS_CPU_MIPS32_R1=y
-CONFIG_CPU_MIPS32=y
-CONFIG_CPU_MIPSR1=y
-CONFIG_SYS_SUPPORTS_32BIT_KERNEL=y
-CONFIG_CPU_SUPPORTS_32BIT_KERNEL=y
-
-#
-# Kernel type
-#
-CONFIG_32BIT=y
-# CONFIG_64BIT is not set
-CONFIG_PAGE_SIZE_4KB=y
-# CONFIG_PAGE_SIZE_8KB is not set
-# CONFIG_PAGE_SIZE_16KB is not set
-# CONFIG_PAGE_SIZE_64KB is not set
-CONFIG_CPU_HAS_PREFETCH=y
-CONFIG_MIPS_MT_DISABLED=y
-# CONFIG_MIPS_MT_SMP is not set
-# CONFIG_MIPS_MT_SMTC is not set
-# CONFIG_MIPS_VPE_LOADER is not set
-# CONFIG_64BIT_PHYS_ADDR is not set
-CONFIG_CPU_HAS_LLSC=y
-CONFIG_CPU_HAS_SYNC=y
-CONFIG_GENERIC_HARDIRQS=y
-CONFIG_GENERIC_IRQ_PROBE=y
-CONFIG_CPU_SUPPORTS_HIGHMEM=y
-CONFIG_ARCH_FLATMEM_ENABLE=y
-CONFIG_SELECT_MEMORY_MODEL=y
-CONFIG_FLATMEM_MANUAL=y
-# CONFIG_DISCONTIGMEM_MANUAL is not set
-# CONFIG_SPARSEMEM_MANUAL is not set
-CONFIG_FLATMEM=y
-CONFIG_FLAT_NODE_MEM_MAP=y
-# CONFIG_SPARSEMEM_STATIC is not set
-CONFIG_SPLIT_PTLOCK_CPUS=4
-# CONFIG_RESOURCES_64BIT is not set
-CONFIG_ZONE_DMA_FLAG=1
-# CONFIG_HZ_48 is not set
-# CONFIG_HZ_100 is not set
-# CONFIG_HZ_128 is not set
-CONFIG_HZ_250=y
-# CONFIG_HZ_256 is not set
-# CONFIG_HZ_1000 is not set
-# CONFIG_HZ_1024 is not set
-CONFIG_SYS_SUPPORTS_ARBIT_HZ=y
-CONFIG_HZ=250
-CONFIG_PREEMPT_NONE=y
-# CONFIG_PREEMPT_VOLUNTARY is not set
-# CONFIG_PREEMPT is not set
-# CONFIG_KEXEC is not set
-CONFIG_LOCKDEP_SUPPORT=y
-CONFIG_STACKTRACE_SUPPORT=y
-CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
-
-#
-# Code maturity level options
-#
-CONFIG_EXPERIMENTAL=y
-CONFIG_BROKEN_ON_SMP=y
-CONFIG_INIT_ENV_ARG_LIMIT=32
-
-#
-# General setup
-#
-CONFIG_LOCALVERSION=""
-CONFIG_LOCALVERSION_AUTO=y
-CONFIG_SWAP=y
-CONFIG_SYSVIPC=y
-# CONFIG_IPC_NS is not set
-CONFIG_SYSVIPC_SYSCTL=y
-# CONFIG_POSIX_MQUEUE is not set
-# CONFIG_BSD_PROCESS_ACCT is not set
-# CONFIG_TASKSTATS is not set
-# CONFIG_UTS_NS is not set
-# CONFIG_AUDIT is not set
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_SYSFS_DEPRECATED=y
-# CONFIG_RELAY is not set
-CONFIG_INITRAMFS_SOURCE=""
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_SYSCTL=y
-CONFIG_EMBEDDED=y
-# CONFIG_SYSCTL_SYSCALL is not set
-CONFIG_KALLSYMS=y
-# CONFIG_KALLSYMS_EXTRA_PASS is not set
-CONFIG_HOTPLUG=y
-CONFIG_PRINTK=y
-CONFIG_BUG=y
-CONFIG_ELF_CORE=y
-CONFIG_BASE_FULL=y
-CONFIG_FUTEX=y
-CONFIG_EPOLL=y
-CONFIG_SHMEM=y
-CONFIG_SLAB=y
-CONFIG_VM_EVENT_COUNTERS=y
-CONFIG_RT_MUTEXES=y
-# CONFIG_TINY_SHMEM is not set
-CONFIG_BASE_SMALL=0
-# CONFIG_SLOB is not set
-
-#
-# Loadable module support
-#
-CONFIG_MODULES=y
-# CONFIG_MODULE_UNLOAD is not set
-# CONFIG_MODVERSIONS is not set
-# CONFIG_MODULE_SRCVERSION_ALL is not set
-CONFIG_KMOD=y
-
-#
-# Block layer
-#
-CONFIG_BLOCK=y
-# CONFIG_LBD is not set
-# CONFIG_BLK_DEV_IO_TRACE is not set
-# CONFIG_LSF is not set
-
-#
-# IO Schedulers
-#
-CONFIG_IOSCHED_NOOP=y
-CONFIG_IOSCHED_AS=y
-CONFIG_IOSCHED_DEADLINE=y
-CONFIG_IOSCHED_CFQ=y
-CONFIG_DEFAULT_AS=y
-# CONFIG_DEFAULT_DEADLINE is not set
-# CONFIG_DEFAULT_CFQ is not set
-# CONFIG_DEFAULT_NOOP is not set
-CONFIG_DEFAULT_IOSCHED="anticipatory"
-
-#
-# Bus options (PCI, PCMCIA, EISA, ISA, TC)
-#
-CONFIG_HW_HAS_PCI=y
-CONFIG_PCI=y
-CONFIG_MMU=y
-
-#
-# PCCARD (PCMCIA/CardBus) support
-#
-# CONFIG_PCCARD is not set
-
-#
-# PCI Hotplug Support
-#
-# CONFIG_HOTPLUG_PCI is not set
-
-#
-# Executable file formats
-#
-CONFIG_BINFMT_ELF=y
-# CONFIG_BINFMT_MISC is not set
-CONFIG_TRAD_SIGNALS=y
-
-#
-# Power management options
-#
-CONFIG_PM=y
-# CONFIG_PM_LEGACY is not set
-# CONFIG_PM_DEBUG is not set
-# CONFIG_PM_SYSFS_DEPRECATED is not set
-
-#
-# Networking
-#
-CONFIG_NET=y
-
-#
-# Networking options
-#
-# CONFIG_NETDEBUG is not set
-CONFIG_PACKET=y
-# CONFIG_PACKET_MMAP is not set
-CONFIG_UNIX=y
-CONFIG_XFRM=y
-# CONFIG_XFRM_USER is not set
-# CONFIG_XFRM_SUB_POLICY is not set
-CONFIG_XFRM_MIGRATE=y
-# CONFIG_NET_KEY is not set
-CONFIG_INET=y
-# CONFIG_IP_MULTICAST is not set
-# CONFIG_IP_ADVANCED_ROUTER is not set
-CONFIG_IP_FIB_HASH=y
-CONFIG_IP_PNP=y
-# CONFIG_IP_PNP_DHCP is not set
-# CONFIG_IP_PNP_BOOTP is not set
-# CONFIG_IP_PNP_RARP is not set
-# CONFIG_NET_IPIP is not set
-# CONFIG_NET_IPGRE is not set
-# CONFIG_ARPD is not set
-# CONFIG_SYN_COOKIES is not set
-# CONFIG_INET_AH is not set
-# CONFIG_INET_ESP is not set
-# CONFIG_INET_IPCOMP is not set
-# CONFIG_INET_XFRM_TUNNEL is not set
-CONFIG_INET_TUNNEL=m
-CONFIG_INET_XFRM_MODE_TRANSPORT=y
-CONFIG_INET_XFRM_MODE_TUNNEL=y
-CONFIG_INET_XFRM_MODE_BEET=y
-CONFIG_INET_DIAG=y
-CONFIG_INET_TCP_DIAG=y
-# CONFIG_TCP_CONG_ADVANCED is not set
-CONFIG_TCP_CONG_CUBIC=y
-CONFIG_DEFAULT_TCP_CONG="cubic"
-CONFIG_TCP_MD5SIG=y
-
-#
-# IP: Virtual Server Configuration
-#
-# CONFIG_IP_VS is not set
-CONFIG_IPV6=m
-# CONFIG_IPV6_PRIVACY is not set
-CONFIG_IPV6_ROUTER_PREF=y
-CONFIG_IPV6_ROUTE_INFO=y
-# CONFIG_INET6_AH is not set
-# CONFIG_INET6_ESP is not set
-# CONFIG_INET6_IPCOMP is not set
-# CONFIG_IPV6_MIP6 is not set
-# CONFIG_INET6_XFRM_TUNNEL is not set
-# CONFIG_INET6_TUNNEL is not set
-CONFIG_INET6_XFRM_MODE_TRANSPORT=m
-CONFIG_INET6_XFRM_MODE_TUNNEL=m
-CONFIG_INET6_XFRM_MODE_BEET=m
-# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set
-CONFIG_IPV6_SIT=m
-# CONFIG_IPV6_TUNNEL is not set
-# CONFIG_IPV6_MULTIPLE_TABLES is not set
-# CONFIG_NETWORK_SECMARK is not set
-CONFIG_NETFILTER=y
-# CONFIG_NETFILTER_DEBUG is not set
-
-#
-# Core Netfilter Configuration
-#
-# CONFIG_NETFILTER_NETLINK is not set
-CONFIG_NF_CONNTRACK_ENABLED=m
-CONFIG_NF_CONNTRACK_SUPPORT=y
-# CONFIG_IP_NF_CONNTRACK_SUPPORT is not set
-CONFIG_NF_CONNTRACK=m
-CONFIG_NF_CT_ACCT=y
-CONFIG_NF_CONNTRACK_MARK=y
-CONFIG_NF_CONNTRACK_EVENTS=y
-CONFIG_NF_CT_PROTO_GRE=m
-CONFIG_NF_CT_PROTO_SCTP=m
-CONFIG_NF_CONNTRACK_AMANDA=m
-CONFIG_NF_CONNTRACK_FTP=m
-CONFIG_NF_CONNTRACK_H323=m
-CONFIG_NF_CONNTRACK_IRC=m
-# CONFIG_NF_CONNTRACK_NETBIOS_NS is not set
-CONFIG_NF_CONNTRACK_PPTP=m
-CONFIG_NF_CONNTRACK_SANE=m
-CONFIG_NF_CONNTRACK_SIP=m
-CONFIG_NF_CONNTRACK_TFTP=m
-CONFIG_NETFILTER_XTABLES=m
-CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
-CONFIG_NETFILTER_XT_TARGET_MARK=m
-CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
-CONFIG_NETFILTER_XT_TARGET_NFLOG=m
-CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
-CONFIG_NETFILTER_XT_MATCH_COMMENT=m
-CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
-CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
-CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
-CONFIG_NETFILTER_XT_MATCH_DCCP=m
-# CONFIG_NETFILTER_XT_MATCH_DSCP is not set
-CONFIG_NETFILTER_XT_MATCH_ESP=m
-CONFIG_NETFILTER_XT_MATCH_HELPER=m
-CONFIG_NETFILTER_XT_MATCH_LENGTH=m
-CONFIG_NETFILTER_XT_MATCH_LIMIT=m
-CONFIG_NETFILTER_XT_MATCH_MAC=m
-CONFIG_NETFILTER_XT_MATCH_MARK=m
-# CONFIG_NETFILTER_XT_MATCH_POLICY is not set
-CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
-CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
-# CONFIG_NETFILTER_XT_MATCH_QUOTA is not set
-CONFIG_NETFILTER_XT_MATCH_REALM=m
-CONFIG_NETFILTER_XT_MATCH_SCTP=m
-CONFIG_NETFILTER_XT_MATCH_STATE=m
-# CONFIG_NETFILTER_XT_MATCH_STATISTIC is not set
-CONFIG_NETFILTER_XT_MATCH_STRING=m
-CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
-CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
-
-#
-# IP: Netfilter Configuration
-#
-CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_CONNTRACK_PROC_COMPAT=y
-# CONFIG_IP_NF_QUEUE is not set
-# CONFIG_IP_NF_IPTABLES is not set
-# CONFIG_IP_NF_ARPTABLES is not set
-
-#
-# IPv6: Netfilter Configuration (EXPERIMENTAL)
-#
-CONFIG_NF_CONNTRACK_IPV6=m
-# CONFIG_IP6_NF_QUEUE is not set
-# CONFIG_IP6_NF_IPTABLES is not set
-
-#
-# DCCP Configuration (EXPERIMENTAL)
-#
-# CONFIG_IP_DCCP is not set
-
-#
-# SCTP Configuration (EXPERIMENTAL)
-#
-# CONFIG_IP_SCTP is not set
-
-#
-# TIPC Configuration (EXPERIMENTAL)
-#
-# CONFIG_TIPC is not set
-# CONFIG_ATM is not set
-# CONFIG_BRIDGE is not set
-# CONFIG_VLAN_8021Q is not set
-# CONFIG_DECNET is not set
-# CONFIG_LLC2 is not set
-# CONFIG_IPX is not set
-# CONFIG_ATALK is not set
-# CONFIG_X25 is not set
-# CONFIG_LAPB is not set
-# CONFIG_ECONET is not set
-# CONFIG_WAN_ROUTER is not set
-
-#
-# QoS and/or fair queueing
-#
-# CONFIG_NET_SCHED is not set
-CONFIG_NET_CLS_ROUTE=y
-
-#
-# Network testing
-#
-# CONFIG_NET_PKTGEN is not set
-# CONFIG_HAMRADIO is not set
-# CONFIG_IRDA is not set
-# CONFIG_BT is not set
-# CONFIG_IEEE80211 is not set
-
-#
-# Device Drivers
-#
-
-#
-# Generic Driver Options
-#
-CONFIG_STANDALONE=y
-CONFIG_PREVENT_FIRMWARE_BUILD=y
-CONFIG_FW_LOADER=y
-# CONFIG_SYS_HYPERVISOR is not set
-
-#
-# Connector - unified userspace <-> kernelspace linker
-#
-# CONFIG_CONNECTOR is not set
-
-#
-# Memory Technology Devices (MTD)
-#
-# CONFIG_MTD is not set
-
-#
-# Parallel port support
-#
-# CONFIG_PARPORT is not set
-
-#
-# Plug and Play support
-#
-# CONFIG_PNPACPI is not set
-
-#
-# Block devices
-#
-# CONFIG_BLK_CPQ_DA is not set
-# CONFIG_BLK_CPQ_CISS_DA is not set
-# CONFIG_BLK_DEV_DAC960 is not set
-# CONFIG_BLK_DEV_UMEM is not set
-# CONFIG_BLK_DEV_COW_COMMON is not set
-CONFIG_BLK_DEV_LOOP=y
-# CONFIG_BLK_DEV_CRYPTOLOOP is not set
-# CONFIG_BLK_DEV_NBD is not set
-# CONFIG_BLK_DEV_SX8 is not set
-# CONFIG_BLK_DEV_UB is not set
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_COUNT=16
-CONFIG_BLK_DEV_RAM_SIZE=8192
-CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
-CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CDROM_PKTCDVD is not set
-# CONFIG_ATA_OVER_ETH is not set
-
-#
-# Misc devices
-#
-CONFIG_SGI_IOC4=m
-# CONFIG_TIFM_CORE is not set
-
-#
-# ATA/ATAPI/MFM/RLL support
-#
-CONFIG_IDE=y
-CONFIG_IDE_MAX_HWIFS=4
-CONFIG_BLK_DEV_IDE=y
-
-#
-# Please see Documentation/ide.txt for help/info on IDE drives
-#
-# CONFIG_BLK_DEV_IDE_SATA is not set
-CONFIG_BLK_DEV_IDEDISK=y
-CONFIG_IDEDISK_MULTI_MODE=y
-# CONFIG_BLK_DEV_IDECD is not set
-# CONFIG_BLK_DEV_IDETAPE is not set
-# CONFIG_BLK_DEV_IDEFLOPPY is not set
-# CONFIG_BLK_DEV_IDESCSI is not set
-# CONFIG_IDE_TASK_IOCTL is not set
-
-#
-# IDE chipset support/bugfixes
-#
-CONFIG_IDE_GENERIC=y
-CONFIG_BLK_DEV_IDEPCI=y
-CONFIG_IDEPCI_SHARE_IRQ=y
-# CONFIG_BLK_DEV_OFFBOARD is not set
-# CONFIG_BLK_DEV_GENERIC is not set
-# CONFIG_BLK_DEV_OPTI621 is not set
-CONFIG_BLK_DEV_IDEDMA_PCI=y
-# CONFIG_BLK_DEV_IDEDMA_FORCED is not set
-CONFIG_IDEDMA_PCI_AUTO=y
-# CONFIG_IDEDMA_ONLYDISK is not set
-# CONFIG_BLK_DEV_AEC62XX is not set
-# CONFIG_BLK_DEV_ALI15X3 is not set
-# CONFIG_BLK_DEV_AMD74XX is not set
-CONFIG_BLK_DEV_CMD64X=y
-# CONFIG_BLK_DEV_TRIFLEX is not set
-# CONFIG_BLK_DEV_CY82C693 is not set
-# CONFIG_BLK_DEV_CS5520 is not set
-# CONFIG_BLK_DEV_CS5530 is not set
-# CONFIG_BLK_DEV_HPT34X is not set
-# CONFIG_BLK_DEV_HPT366 is not set
-# CONFIG_BLK_DEV_JMICRON is not set
-# CONFIG_BLK_DEV_SC1200 is not set
-# CONFIG_BLK_DEV_PIIX is not set
-CONFIG_BLK_DEV_IT8213=m
-# CONFIG_BLK_DEV_IT821X is not set
-# CONFIG_BLK_DEV_NS87415 is not set
-# CONFIG_BLK_DEV_PDC202XX_OLD is not set
-# CONFIG_BLK_DEV_PDC202XX_NEW is not set
-# CONFIG_BLK_DEV_SVWKS is not set
-# CONFIG_BLK_DEV_SIIMAGE is not set
-# CONFIG_BLK_DEV_SLC90E66 is not set
-# CONFIG_BLK_DEV_TRM290 is not set
-# CONFIG_BLK_DEV_VIA82CXXX is not set
-CONFIG_BLK_DEV_TC86C001=m
-# CONFIG_IDE_ARM is not set
-CONFIG_BLK_DEV_IDEDMA=y
-# CONFIG_IDEDMA_IVB is not set
-CONFIG_IDEDMA_AUTO=y
-# CONFIG_BLK_DEV_HD is not set
-
-#
-# SCSI device support
-#
-# CONFIG_RAID_ATTRS is not set
-CONFIG_SCSI=y
-CONFIG_SCSI_TGT=m
-CONFIG_SCSI_NETLINK=y
-CONFIG_SCSI_PROC_FS=y
-
-#
-# SCSI support type (disk, tape, CD-ROM)
-#
-CONFIG_BLK_DEV_SD=y
-# CONFIG_CHR_DEV_ST is not set
-# CONFIG_CHR_DEV_OSST is not set
-# CONFIG_BLK_DEV_SR is not set
-# CONFIG_CHR_DEV_SG is not set
-# CONFIG_CHR_DEV_SCH is not set
-
-#
-# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
-#
-# CONFIG_SCSI_MULTI_LUN is not set
-# CONFIG_SCSI_CONSTANTS is not set
-# CONFIG_SCSI_LOGGING is not set
-CONFIG_SCSI_SCAN_ASYNC=y
-
-#
-# SCSI Transports
-#
-CONFIG_SCSI_SPI_ATTRS=m
-CONFIG_SCSI_FC_ATTRS=y
-CONFIG_SCSI_ISCSI_ATTRS=m
-# CONFIG_SCSI_SAS_ATTRS is not set
-# CONFIG_SCSI_SAS_LIBSAS is not set
-
-#
-# SCSI low-level drivers
-#
-CONFIG_ISCSI_TCP=m
-# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
-# CONFIG_SCSI_3W_9XXX is not set
-# CONFIG_SCSI_ACARD is not set
-# CONFIG_SCSI_AACRAID is not set
-CONFIG_SCSI_AIC7XXX=m
-CONFIG_AIC7XXX_CMDS_PER_DEVICE=32
-CONFIG_AIC7XXX_RESET_DELAY_MS=15000
-# CONFIG_AIC7XXX_DEBUG_ENABLE is not set
-CONFIG_AIC7XXX_DEBUG_MASK=0
-# CONFIG_AIC7XXX_REG_PRETTY_PRINT is not set
-# CONFIG_SCSI_AIC7XXX_OLD is not set
-# CONFIG_SCSI_AIC79XX is not set
-# CONFIG_SCSI_AIC94XX is not set
-# CONFIG_SCSI_DPT_I2O is not set
-# CONFIG_SCSI_ARCMSR is not set
-# CONFIG_MEGARAID_NEWGEN is not set
-# CONFIG_MEGARAID_LEGACY is not set
-# CONFIG_MEGARAID_SAS is not set
-# CONFIG_SCSI_HPTIOP is not set
-# CONFIG_SCSI_DMX3191D is not set
-# CONFIG_SCSI_FUTURE_DOMAIN is not set
-# CONFIG_SCSI_IPS is not set
-# CONFIG_SCSI_INITIO is not set
-# CONFIG_SCSI_INIA100 is not set
-# CONFIG_SCSI_STEX is not set
-# CONFIG_SCSI_SYM53C8XX_2 is not set
-# CONFIG_SCSI_QLOGIC_1280 is not set
-# CONFIG_SCSI_QLA_FC is not set
-# CONFIG_SCSI_QLA_ISCSI is not set
-# CONFIG_SCSI_LPFC is not set
-# CONFIG_SCSI_DC395x is not set
-# CONFIG_SCSI_DC390T is not set
-# CONFIG_SCSI_NSP32 is not set
-# CONFIG_SCSI_DEBUG is not set
-# CONFIG_SCSI_SRP is not set
-
-#
-# Serial ATA (prod) and Parallel ATA (experimental) drivers
-#
-# CONFIG_ATA is not set
-
-#
-# Multi-device support (RAID and LVM)
-#
-# CONFIG_MD is not set
-
-#
-# Fusion MPT device support
-#
-# CONFIG_FUSION is not set
-# CONFIG_FUSION_SPI is not set
-# CONFIG_FUSION_FC is not set
-# CONFIG_FUSION_SAS is not set
-
-#
-# IEEE 1394 (FireWire) support
-#
-# CONFIG_IEEE1394 is not set
-
-#
-# I2O device support
-#
-# CONFIG_I2O is not set
-
-#
-# Network device support
-#
-CONFIG_NETDEVICES=y
-# CONFIG_DUMMY is not set
-# CONFIG_BONDING is not set
-# CONFIG_EQUALIZER is not set
-CONFIG_TUN=m
-
-#
-# ARCnet devices
-#
-# CONFIG_ARCNET is not set
-
-#
-# PHY device support
-#
-# CONFIG_PHYLIB is not set
-
-#
-# Ethernet (10 or 100Mbit)
-#
-CONFIG_NET_ETHERNET=y
-CONFIG_MII=y
-# CONFIG_HAPPYMEAL is not set
-# CONFIG_SUNGEM is not set
-# CONFIG_CASSINI is not set
-# CONFIG_NET_VENDOR_3COM is not set
-# CONFIG_DM9000 is not set
-
-#
-# Tulip family network device support
-#
-# CONFIG_NET_TULIP is not set
-# CONFIG_HP100 is not set
-CONFIG_NET_PCI=y
-# CONFIG_PCNET32 is not set
-# CONFIG_AMD8111_ETH is not set
-# CONFIG_ADAPTEC_STARFIRE is not set
-# CONFIG_B44 is not set
-# CONFIG_FORCEDETH is not set
-# CONFIG_DGRS is not set
-# CONFIG_EEPRO100 is not set
-# CONFIG_E100 is not set
-# CONFIG_FEALNX is not set
-CONFIG_NATSEMI=y
-# CONFIG_NE2K_PCI is not set
-# CONFIG_8139CP is not set
-CONFIG_8139TOO=y
-# CONFIG_8139TOO_PIO is not set
-# CONFIG_8139TOO_TUNE_TWISTER is not set
-# CONFIG_8139TOO_8129 is not set
-# CONFIG_8139_OLD_RX_RESET is not set
-# CONFIG_SIS900 is not set
-# CONFIG_EPIC100 is not set
-# CONFIG_SUNDANCE is not set
-# CONFIG_TLAN is not set
-# CONFIG_VIA_RHINE is not set
-# CONFIG_SC92031 is not set
-
-#
-# Ethernet (1000 Mbit)
-#
-# CONFIG_ACENIC is not set
-# CONFIG_DL2K is not set
-# CONFIG_E1000 is not set
-# CONFIG_NS83820 is not set
-# CONFIG_HAMACHI is not set
-# CONFIG_YELLOWFIN is not set
-# CONFIG_R8169 is not set
-# CONFIG_SIS190 is not set
-# CONFIG_SKGE is not set
-# CONFIG_SKY2 is not set
-# CONFIG_SK98LIN is not set
-# CONFIG_VIA_VELOCITY is not set
-# CONFIG_TIGON3 is not set
-# CONFIG_BNX2 is not set
-# CONFIG_QLA3XXX is not set
-# CONFIG_ATL1 is not set
-
-#
-# Ethernet (10000 Mbit)
-#
-# CONFIG_CHELSIO_T1 is not set
-CONFIG_CHELSIO_T3=m
-# CONFIG_IXGB is not set
-# CONFIG_S2IO is not set
-# CONFIG_MYRI10GE is not set
-CONFIG_NETXEN_NIC=m
-
-#
-# Token Ring devices
-#
-# CONFIG_TR is not set
-
-#
-# Wireless LAN (non-hamradio)
-#
-# CONFIG_NET_RADIO is not set
-
-#
-# Wan interfaces
-#
-# CONFIG_WAN is not set
-# CONFIG_FDDI is not set
-# CONFIG_HIPPI is not set
-CONFIG_PPP=m
-# CONFIG_PPP_MULTILINK is not set
-# CONFIG_PPP_FILTER is not set
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
-CONFIG_PPP_DEFLATE=m
-# CONFIG_PPP_BSDCOMP is not set
-CONFIG_PPP_MPPE=m
-# CONFIG_PPPOE is not set
-# CONFIG_SLIP is not set
-CONFIG_SLHC=m
-# CONFIG_NET_FC is not set
-# CONFIG_SHAPER is not set
-# CONFIG_NETCONSOLE is not set
-# CONFIG_NETPOLL is not set
-# CONFIG_NET_POLL_CONTROLLER is not set
-
-#
-# ISDN subsystem
-#
-# CONFIG_ISDN is not set
-
-#
-# Telephony Support
-#
-# CONFIG_PHONE is not set
-
-#
-# Input device support
-#
-CONFIG_INPUT=y
-# CONFIG_INPUT_FF_MEMLESS is not set
-
-#
-# Userland interfaces
-#
-CONFIG_INPUT_MOUSEDEV=y
-CONFIG_INPUT_MOUSEDEV_PSAUX=y
-CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
-CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
-# CONFIG_INPUT_JOYDEV is not set
-# CONFIG_INPUT_TSDEV is not set
-CONFIG_INPUT_EVDEV=m
-# CONFIG_INPUT_EVBUG is not set
-
-#
-# Input Device Drivers
-#
-CONFIG_INPUT_KEYBOARD=y
-CONFIG_KEYBOARD_ATKBD=y
-# CONFIG_KEYBOARD_SUNKBD is not set
-# CONFIG_KEYBOARD_LKKBD is not set
-# CONFIG_KEYBOARD_XTKBD is not set
-# CONFIG_KEYBOARD_NEWTON is not set
-# CONFIG_KEYBOARD_STOWAWAY is not set
-CONFIG_INPUT_MOUSE=y
-CONFIG_MOUSE_PS2=y
-# CONFIG_MOUSE_SERIAL is not set
-# CONFIG_MOUSE_VSXXXAA is not set
-# CONFIG_INPUT_JOYSTICK is not set
-# CONFIG_INPUT_TOUCHSCREEN is not set
-# CONFIG_INPUT_MISC is not set
-
-#
-# Hardware I/O ports
-#
-CONFIG_SERIO=y
-CONFIG_SERIO_I8042=y
-CONFIG_SERIO_SERPORT=y
-# CONFIG_SERIO_PCIPS2 is not set
-CONFIG_SERIO_LIBPS2=y
-# CONFIG_SERIO_RAW is not set
-# CONFIG_GAMEPORT is not set
-
-#
-# Character devices
-#
-CONFIG_VT=y
-# CONFIG_VT_CONSOLE is not set
-CONFIG_HW_CONSOLE=y
-# CONFIG_VT_HW_CONSOLE_BINDING is not set
-CONFIG_SERIAL_NONSTANDARD=y
-# CONFIG_COMPUTONE is not set
-# CONFIG_ROCKETPORT is not set
-# CONFIG_CYCLADES is not set
-# CONFIG_DIGIEPCA is not set
-# CONFIG_MOXA_INTELLIO is not set
-# CONFIG_MOXA_SMARTIO is not set
-CONFIG_MOXA_SMARTIO_NEW=m
-# CONFIG_ISI is not set
-# CONFIG_SYNCLINKMP is not set
-# CONFIG_SYNCLINK_GT is not set
-# CONFIG_N_HDLC is not set
-# CONFIG_RISCOM8 is not set
-# CONFIG_SPECIALIX is not set
-# CONFIG_SX is not set
-# CONFIG_RIO is not set
-# CONFIG_STALDRV is not set
-
-#
-# Serial drivers
-#
-# CONFIG_SERIAL_8250 is not set
-
-#
-# Non-8250 serial port support
-#
-CONFIG_SERIAL_PNX8XXX=y
-CONFIG_SERIAL_PNX8XXX_CONSOLE=y
-CONFIG_SERIAL_CORE=y
-CONFIG_SERIAL_CORE_CONSOLE=y
-# CONFIG_SERIAL_JSM is not set
-CONFIG_UNIX98_PTYS=y
-CONFIG_LEGACY_PTYS=y
-CONFIG_LEGACY_PTY_COUNT=256
-
-#
-# IPMI
-#
-# CONFIG_IPMI_HANDLER is not set
-
-#
-# Watchdog Cards
-#
-# CONFIG_WATCHDOG is not set
-CONFIG_HW_RANDOM=y
-# CONFIG_RTC is not set
-# CONFIG_GEN_RTC is not set
-# CONFIG_DTLK is not set
-# CONFIG_R3964 is not set
-# CONFIG_APPLICOM is not set
-# CONFIG_DRM is not set
-# CONFIG_RAW_DRIVER is not set
-
-#
-# TPM devices
-#
-# CONFIG_TCG_TPM is not set
-
-#
-# I2C support
-#
-CONFIG_I2C=m
-CONFIG_I2C_CHARDEV=m
-
-#
-# I2C Algorithms
-#
-CONFIG_I2C_ALGOBIT=m
-# CONFIG_I2C_ALGOPCF is not set
-# CONFIG_I2C_ALGOPCA is not set
-
-#
-# I2C Hardware Bus support
-#
-# CONFIG_I2C_ALI1535 is not set
-# CONFIG_I2C_ALI1563 is not set
-# CONFIG_I2C_ALI15X3 is not set
-# CONFIG_I2C_AMD756 is not set
-# CONFIG_I2C_AMD8111 is not set
-# CONFIG_I2C_I801 is not set
-# CONFIG_I2C_I810 is not set
-# CONFIG_I2C_PIIX4 is not set
-# CONFIG_I2C_NFORCE2 is not set
-# CONFIG_I2C_OCORES is not set
-# CONFIG_I2C_PARPORT_LIGHT is not set
-# CONFIG_I2C_PASEMI is not set
-# CONFIG_I2C_PROSAVAGE is not set
-# CONFIG_I2C_SAVAGE4 is not set
-# CONFIG_I2C_SIS5595 is not set
-# CONFIG_I2C_SIS630 is not set
-# CONFIG_I2C_SIS96X is not set
-# CONFIG_I2C_STUB is not set
-# CONFIG_I2C_VIA is not set
-# CONFIG_I2C_VIAPRO is not set
-# CONFIG_I2C_VOODOO3 is not set
-# CONFIG_I2C_PCA_ISA is not set
-
-#
-# Miscellaneous I2C Chip support
-#
-# CONFIG_SENSORS_DS1337 is not set
-# CONFIG_SENSORS_DS1374 is not set
-# CONFIG_SENSORS_EEPROM is not set
-# CONFIG_SENSORS_PCF8574 is not set
-# CONFIG_SENSORS_PCA9539 is not set
-# CONFIG_SENSORS_PCF8591 is not set
-# CONFIG_SENSORS_MAX6875 is not set
-# CONFIG_I2C_DEBUG_CORE is not set
-# CONFIG_I2C_DEBUG_ALGO is not set
-# CONFIG_I2C_DEBUG_BUS is not set
-# CONFIG_I2C_DEBUG_CHIP is not set
-
-#
-# SPI support
-#
-# CONFIG_SPI is not set
-# CONFIG_SPI_MASTER is not set
-
-#
-# Dallas's 1-wire bus
-#
-# CONFIG_W1 is not set
-
-#
-# Hardware Monitoring support
-#
-CONFIG_HWMON=y
-# CONFIG_HWMON_VID is not set
-# CONFIG_SENSORS_ABITUGURU is not set
-# CONFIG_SENSORS_ADM1021 is not set
-# CONFIG_SENSORS_ADM1025 is not set
-# CONFIG_SENSORS_ADM1026 is not set
-# CONFIG_SENSORS_ADM1029 is not set
-# CONFIG_SENSORS_ADM1031 is not set
-# CONFIG_SENSORS_ADM9240 is not set
-# CONFIG_SENSORS_ASB100 is not set
-# CONFIG_SENSORS_ATXP1 is not set
-# CONFIG_SENSORS_DS1621 is not set
-# CONFIG_SENSORS_F71805F is not set
-# CONFIG_SENSORS_FSCHER is not set
-# CONFIG_SENSORS_FSCPOS is not set
-# CONFIG_SENSORS_GL518SM is not set
-# CONFIG_SENSORS_GL520SM is not set
-# CONFIG_SENSORS_IT87 is not set
-# CONFIG_SENSORS_LM63 is not set
-# CONFIG_SENSORS_LM75 is not set
-# CONFIG_SENSORS_LM77 is not set
-# CONFIG_SENSORS_LM78 is not set
-# CONFIG_SENSORS_LM80 is not set
-# CONFIG_SENSORS_LM83 is not set
-# CONFIG_SENSORS_LM85 is not set
-# CONFIG_SENSORS_LM87 is not set
-# CONFIG_SENSORS_LM90 is not set
-# CONFIG_SENSORS_LM92 is not set
-# CONFIG_SENSORS_MAX1619 is not set
-# CONFIG_SENSORS_PC87360 is not set
-# CONFIG_SENSORS_PC87427 is not set
-# CONFIG_SENSORS_SIS5595 is not set
-# CONFIG_SENSORS_SMSC47M1 is not set
-# CONFIG_SENSORS_SMSC47M192 is not set
-# CONFIG_SENSORS_SMSC47B397 is not set
-# CONFIG_SENSORS_VIA686A is not set
-# CONFIG_SENSORS_VT1211 is not set
-# CONFIG_SENSORS_VT8231 is not set
-# CONFIG_SENSORS_W83781D is not set
-# CONFIG_SENSORS_W83791D is not set
-# CONFIG_SENSORS_W83792D is not set
-# CONFIG_SENSORS_W83793 is not set
-# CONFIG_SENSORS_W83L785TS is not set
-# CONFIG_SENSORS_W83627HF is not set
-# CONFIG_SENSORS_W83627EHF is not set
-# CONFIG_HWMON_DEBUG_CHIP is not set
-
-#
-# Multimedia devices
-#
-# CONFIG_VIDEO_DEV is not set
-
-#
-# Digital Video Broadcasting Devices
-#
-# CONFIG_DVB is not set
-# CONFIG_USB_DABUSB is not set
-
-#
-# Graphics support
-#
-CONFIG_FIRMWARE_EDID=y
-CONFIG_FB=y
-# CONFIG_FB_DDC is not set
-# CONFIG_FB_CFB_FILLRECT is not set
-# CONFIG_FB_CFB_COPYAREA is not set
-# CONFIG_FB_CFB_IMAGEBLIT is not set
-# CONFIG_FB_SVGALIB is not set
-# CONFIG_FB_MACMODES is not set
-# CONFIG_FB_BACKLIGHT is not set
-# CONFIG_FB_MODE_HELPERS is not set
-# CONFIG_FB_TILEBLITTING is not set
-# CONFIG_FB_CIRRUS is not set
-# CONFIG_FB_PM2 is not set
-# CONFIG_FB_CYBER2000 is not set
-# CONFIG_FB_ASILIANT is not set
-# CONFIG_FB_IMSTT is not set
-# CONFIG_FB_S1D13XXX is not set
-# CONFIG_FB_NVIDIA is not set
-# CONFIG_FB_RIVA is not set
-# CONFIG_FB_MATROX is not set
-# CONFIG_FB_RADEON is not set
-# CONFIG_FB_ATY128 is not set
-# CONFIG_FB_ATY is not set
-# CONFIG_FB_S3 is not set
-# CONFIG_FB_SAVAGE is not set
-# CONFIG_FB_SIS is not set
-# CONFIG_FB_NEOMAGIC is not set
-# CONFIG_FB_KYRO is not set
-# CONFIG_FB_3DFX is not set
-# CONFIG_FB_VOODOO1 is not set
-# CONFIG_FB_SMIVGX is not set
-# CONFIG_FB_TRIDENT is not set
-# CONFIG_FB_VIRTUAL is not set
-
-#
-# Console display driver support
-#
-# CONFIG_VGA_CONSOLE is not set
-CONFIG_DUMMY_CONSOLE=y
-# CONFIG_FRAMEBUFFER_CONSOLE is not set
-
-#
-# Logo configuration
-#
-# CONFIG_LOGO is not set
-# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
-
-#
-# Sound
-#
-# CONFIG_SOUND is not set
-
-#
-# HID Devices
-#
-CONFIG_HID=y
-# CONFIG_HID_DEBUG is not set
-
-#
-# USB support
-#
-CONFIG_USB_ARCH_HAS_HCD=y
-CONFIG_USB_ARCH_HAS_OHCI=y
-CONFIG_USB_ARCH_HAS_EHCI=y
-CONFIG_USB=y
-# CONFIG_USB_DEBUG is not set
-
-#
-# Miscellaneous USB options
-#
-CONFIG_USB_DEVICEFS=y
-# CONFIG_USB_DYNAMIC_MINORS is not set
-# CONFIG_USB_SUSPEND is not set
-# CONFIG_USB_OTG is not set
-
-#
-# USB Host Controller Drivers
-#
-# CONFIG_USB_EHCI_HCD is not set
-# CONFIG_USB_ISP116X_HCD is not set
-# CONFIG_USB_OHCI_HCD is not set
-# CONFIG_USB_UHCI_HCD is not set
-# CONFIG_USB_SL811_HCD is not set
-
-#
-# USB Device Class drivers
-#
-# CONFIG_USB_ACM is not set
-# CONFIG_USB_PRINTER is not set
-
-#
-# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
-#
-
-#
-# may also be needed; see USB_STORAGE Help for more information
-#
-CONFIG_USB_STORAGE=y
-# CONFIG_USB_STORAGE_DEBUG is not set
-# CONFIG_USB_STORAGE_DATAFAB is not set
-# CONFIG_USB_STORAGE_FREECOM is not set
-# CONFIG_USB_STORAGE_ISD200 is not set
-# CONFIG_USB_STORAGE_DPCM is not set
-# CONFIG_USB_STORAGE_USBAT is not set
-# CONFIG_USB_STORAGE_SDDR09 is not set
-# CONFIG_USB_STORAGE_SDDR55 is not set
-# CONFIG_USB_STORAGE_JUMPSHOT is not set
-# CONFIG_USB_STORAGE_ALAUDA is not set
-# CONFIG_USB_STORAGE_KARMA is not set
-# CONFIG_USB_LIBUSUAL is not set
-
-#
-# USB Input Devices
-#
-CONFIG_USB_HID=y
-# CONFIG_USB_HIDINPUT_POWERBOOK is not set
-# CONFIG_HID_FF is not set
-CONFIG_USB_HIDDEV=y
-# CONFIG_USB_AIPTEK is not set
-# CONFIG_USB_WACOM is not set
-# CONFIG_USB_ACECAD is not set
-# CONFIG_USB_KBTAB is not set
-# CONFIG_USB_POWERMATE is not set
-# CONFIG_USB_TOUCHSCREEN is not set
-# CONFIG_USB_YEALINK is not set
-# CONFIG_USB_XPAD is not set
-# CONFIG_USB_ATI_REMOTE is not set
-# CONFIG_USB_ATI_REMOTE2 is not set
-# CONFIG_USB_KEYSPAN_REMOTE is not set
-# CONFIG_USB_APPLETOUCH is not set
-# CONFIG_USB_GTCO is not set
-
-#
-# USB Imaging devices
-#
-# CONFIG_USB_MDC800 is not set
-# CONFIG_USB_MICROTEK is not set
-
-#
-# USB Network Adapters
-#
-# CONFIG_USB_CATC is not set
-# CONFIG_USB_KAWETH is not set
-# CONFIG_USB_PEGASUS is not set
-# CONFIG_USB_RTL8150 is not set
-# CONFIG_USB_USBNET_MII is not set
-# CONFIG_USB_USBNET is not set
-CONFIG_USB_MON=y
-
-#
-# USB port drivers
-#
-
-#
-# USB Serial Converter support
-#
-# CONFIG_USB_SERIAL is not set
-
-#
-# USB Miscellaneous drivers
-#
-# CONFIG_USB_EMI62 is not set
-# CONFIG_USB_EMI26 is not set
-# CONFIG_USB_ADUTUX is not set
-# CONFIG_USB_AUERSWALD is not set
-# CONFIG_USB_RIO500 is not set
-# CONFIG_USB_LEGOTOWER is not set
-# CONFIG_USB_LCD is not set
-# CONFIG_USB_BERRY_CHARGE is not set
-# CONFIG_USB_LED is not set
-# CONFIG_USB_CYPRESS_CY7C63 is not set
-# CONFIG_USB_CYTHERM is not set
-# CONFIG_USB_PHIDGET is not set
-# CONFIG_USB_IDMOUSE is not set
-# CONFIG_USB_FTDI_ELAN is not set
-# CONFIG_USB_APPLEDISPLAY is not set
-# CONFIG_USB_LD is not set
-# CONFIG_USB_TRANCEVIBRATOR is not set
-# CONFIG_USB_TEST is not set
-
-#
-# USB DSL modem support
-#
-
-#
-# USB Gadget Support
-#
-# CONFIG_USB_GADGET is not set
-
-#
-# MMC/SD Card support
-#
-# CONFIG_MMC is not set
-
-#
-# LED devices
-#
-# CONFIG_NEW_LEDS is not set
-
-#
-# LED drivers
-#
-
-#
-# LED Triggers
-#
-
-#
-# InfiniBand support
-#
-# CONFIG_INFINIBAND is not set
-
-#
-# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
-#
-
-#
-# Real Time Clock
-#
-# CONFIG_RTC_CLASS is not set
-
-#
-# DMA Engine support
-#
-# CONFIG_DMA_ENGINE is not set
-
-#
-# DMA Clients
-#
-
-#
-# DMA Devices
-#
-
-#
-# Auxiliary Display support
-#
-
-#
-# Virtualization
-#
-
-#
-# File systems
-#
-CONFIG_EXT2_FS=y
-# CONFIG_EXT2_FS_XATTR is not set
-# CONFIG_EXT2_FS_XIP is not set
-CONFIG_EXT3_FS=y
-CONFIG_EXT3_FS_XATTR=y
-# CONFIG_EXT3_FS_POSIX_ACL is not set
-# CONFIG_EXT3_FS_SECURITY is not set
-# CONFIG_EXT4DEV_FS is not set
-CONFIG_JBD=y
-# CONFIG_JBD_DEBUG is not set
-CONFIG_FS_MBCACHE=y
-# CONFIG_REISERFS_FS is not set
-# CONFIG_JFS_FS is not set
-# CONFIG_FS_POSIX_ACL is not set
-CONFIG_XFS_FS=m
-# CONFIG_XFS_QUOTA is not set
-# CONFIG_XFS_SECURITY is not set
-# CONFIG_XFS_POSIX_ACL is not set
-# CONFIG_XFS_RT is not set
-# CONFIG_GFS2_FS is not set
-# CONFIG_OCFS2_FS is not set
-# CONFIG_MINIX_FS is not set
-# CONFIG_ROMFS_FS is not set
-CONFIG_INOTIFY=y
-CONFIG_INOTIFY_USER=y
-# CONFIG_QUOTA is not set
-CONFIG_DNOTIFY=y
-CONFIG_AUTOFS_FS=y
-CONFIG_AUTOFS4_FS=y
-# CONFIG_FUSE_FS is not set
-
-#
-# CD-ROM/DVD Filesystems
-#
-# CONFIG_ISO9660_FS is not set
-# CONFIG_UDF_FS is not set
-
-#
-# DOS/FAT/NT Filesystems
-#
-CONFIG_FAT_FS=y
-CONFIG_MSDOS_FS=y
-CONFIG_VFAT_FS=y
-CONFIG_FAT_DEFAULT_CODEPAGE=437
-CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
-# CONFIG_NTFS_FS is not set
-
-#
-# Pseudo filesystems
-#
-CONFIG_PROC_FS=y
-# CONFIG_PROC_KCORE is not set
-CONFIG_PROC_SYSCTL=y
-CONFIG_SYSFS=y
-CONFIG_TMPFS=y
-# CONFIG_TMPFS_POSIX_ACL is not set
-# CONFIG_HUGETLB_PAGE is not set
-CONFIG_RAMFS=y
-CONFIG_CONFIGFS_FS=m
-
-#
-# Miscellaneous filesystems
-#
-# CONFIG_ADFS_FS is not set
-# CONFIG_AFFS_FS is not set
-# CONFIG_HFS_FS is not set
-# CONFIG_HFSPLUS_FS is not set
-# CONFIG_BEFS_FS is not set
-# CONFIG_BFS_FS is not set
-# CONFIG_EFS_FS is not set
-CONFIG_CRAMFS=y
-# CONFIG_VXFS_FS is not set
-# CONFIG_HPFS_FS is not set
-# CONFIG_QNX4FS_FS is not set
-# CONFIG_SYSV_FS is not set
-# CONFIG_UFS_FS is not set
-
-#
-# Network File Systems
-#
-CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
-# CONFIG_NFS_V3_ACL is not set
-# CONFIG_NFS_V4 is not set
-# CONFIG_NFS_DIRECTIO is not set
-CONFIG_NFSD=m
-# CONFIG_NFSD_V3 is not set
-# CONFIG_NFSD_TCP is not set
-CONFIG_ROOT_NFS=y
-CONFIG_LOCKD=y
-CONFIG_LOCKD_V4=y
-CONFIG_EXPORTFS=m
-CONFIG_NFS_COMMON=y
-CONFIG_SUNRPC=y
-# CONFIG_RPCSEC_GSS_KRB5 is not set
-# CONFIG_RPCSEC_GSS_SPKM3 is not set
-CONFIG_SMB_FS=m
-# CONFIG_SMB_NLS_DEFAULT is not set
-# CONFIG_CIFS is not set
-# CONFIG_NCP_FS is not set
-# CONFIG_CODA_FS is not set
-# CONFIG_AFS_FS is not set
-# CONFIG_9P_FS is not set
-
-#
-# Partition Types
-#
-# CONFIG_PARTITION_ADVANCED is not set
-CONFIG_MSDOS_PARTITION=y
-
-#
-# Native Language Support
-#
-CONFIG_NLS=y
-CONFIG_NLS_DEFAULT="iso8859-1"
-# CONFIG_NLS_CODEPAGE_437 is not set
-# CONFIG_NLS_CODEPAGE_737 is not set
-# CONFIG_NLS_CODEPAGE_775 is not set
-# CONFIG_NLS_CODEPAGE_850 is not set
-# CONFIG_NLS_CODEPAGE_852 is not set
-# CONFIG_NLS_CODEPAGE_855 is not set
-# CONFIG_NLS_CODEPAGE_857 is not set
-# CONFIG_NLS_CODEPAGE_860 is not set
-# CONFIG_NLS_CODEPAGE_861 is not set
-# CONFIG_NLS_CODEPAGE_862 is not set
-# CONFIG_NLS_CODEPAGE_863 is not set
-# CONFIG_NLS_CODEPAGE_864 is not set
-# CONFIG_NLS_CODEPAGE_865 is not set
-# CONFIG_NLS_CODEPAGE_866 is not set
-# CONFIG_NLS_CODEPAGE_869 is not set
-# CONFIG_NLS_CODEPAGE_936 is not set
-# CONFIG_NLS_CODEPAGE_950 is not set
-# CONFIG_NLS_CODEPAGE_932 is not set
-# CONFIG_NLS_CODEPAGE_949 is not set
-# CONFIG_NLS_CODEPAGE_874 is not set
-# CONFIG_NLS_ISO8859_8 is not set
-# CONFIG_NLS_CODEPAGE_1250 is not set
-# CONFIG_NLS_CODEPAGE_1251 is not set
-# CONFIG_NLS_ASCII is not set
-# CONFIG_NLS_ISO8859_1 is not set
-# CONFIG_NLS_ISO8859_2 is not set
-# CONFIG_NLS_ISO8859_3 is not set
-# CONFIG_NLS_ISO8859_4 is not set
-# CONFIG_NLS_ISO8859_5 is not set
-# CONFIG_NLS_ISO8859_6 is not set
-# CONFIG_NLS_ISO8859_7 is not set
-# CONFIG_NLS_ISO8859_9 is not set
-# CONFIG_NLS_ISO8859_13 is not set
-# CONFIG_NLS_ISO8859_14 is not set
-# CONFIG_NLS_ISO8859_15 is not set
-# CONFIG_NLS_KOI8_R is not set
-# CONFIG_NLS_KOI8_U is not set
-# CONFIG_NLS_UTF8 is not set
-
-#
-# Distributed Lock Manager
-#
-CONFIG_DLM=m
-CONFIG_DLM_TCP=y
-# CONFIG_DLM_SCTP is not set
-# CONFIG_DLM_DEBUG is not set
-
-#
-# Profiling support
-#
-# CONFIG_PROFILING is not set
-
-#
-# Kernel hacking
-#
-CONFIG_TRACE_IRQFLAGS_SUPPORT=y
-# CONFIG_PRINTK_TIME is not set
-CONFIG_ENABLE_MUST_CHECK=y
-# CONFIG_MAGIC_SYSRQ is not set
-# CONFIG_UNUSED_SYMBOLS is not set
-# CONFIG_DEBUG_FS is not set
-# CONFIG_HEADERS_CHECK is not set
-# CONFIG_DEBUG_KERNEL is not set
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_CROSSCOMPILE=y
-CONFIG_CMDLINE=""
-CONFIG_SYS_SUPPORTS_KGDB=y
-
-#
-# Security options
-#
-# CONFIG_KEYS is not set
-# CONFIG_SECURITY is not set
-
-#
-# Cryptographic options
-#
-CONFIG_CRYPTO=y
-CONFIG_CRYPTO_ALGAPI=y
-CONFIG_CRYPTO_BLKCIPHER=m
-CONFIG_CRYPTO_HASH=m
-CONFIG_CRYPTO_MANAGER=m
-# CONFIG_CRYPTO_HMAC is not set
-CONFIG_CRYPTO_XCBC=m
-# CONFIG_CRYPTO_NULL is not set
-# CONFIG_CRYPTO_MD4 is not set
-CONFIG_CRYPTO_MD5=y
-CONFIG_CRYPTO_SHA1=m
-# CONFIG_CRYPTO_SHA256 is not set
-# CONFIG_CRYPTO_SHA512 is not set
-# CONFIG_CRYPTO_WP512 is not set
-# CONFIG_CRYPTO_TGR192 is not set
-CONFIG_CRYPTO_GF128MUL=m
-CONFIG_CRYPTO_ECB=m
-CONFIG_CRYPTO_CBC=m
-CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_LRW=m
-# CONFIG_CRYPTO_DES is not set
-CONFIG_CRYPTO_FCRYPT=m
-# CONFIG_CRYPTO_BLOWFISH is not set
-# CONFIG_CRYPTO_TWOFISH is not set
-# CONFIG_CRYPTO_SERPENT is not set
-# CONFIG_CRYPTO_AES is not set
-# CONFIG_CRYPTO_CAST5 is not set
-# CONFIG_CRYPTO_CAST6 is not set
-# CONFIG_CRYPTO_TEA is not set
-CONFIG_CRYPTO_ARC4=m
-# CONFIG_CRYPTO_KHAZAD is not set
-# CONFIG_CRYPTO_ANUBIS is not set
-# CONFIG_CRYPTO_DEFLATE is not set
-# CONFIG_CRYPTO_MICHAEL_MIC is not set
-CONFIG_CRYPTO_CRC32C=m
-CONFIG_CRYPTO_CAMELLIA=m
-# CONFIG_CRYPTO_TEST is not set
-
-#
-# Hardware crypto devices
-#
-
-#
-# Library routines
-#
-CONFIG_BITREVERSE=y
-CONFIG_CRC_CCITT=m
-# CONFIG_CRC16 is not set
-CONFIG_CRC32=y
-CONFIG_LIBCRC32C=m
-CONFIG_ZLIB_INFLATE=y
-CONFIG_ZLIB_DEFLATE=m
-CONFIG_TEXTSEARCH=y
-CONFIG_TEXTSEARCH_KMP=m
-CONFIG_TEXTSEARCH_BM=m
-CONFIG_TEXTSEARCH_FSM=m
-CONFIG_PLIST=y
-CONFIG_HAS_IOMEM=y
-CONFIG_HAS_IOPORT=y
index 2fbe934..0d5289b 100644 (file)
@@ -13,7 +13,7 @@
 #include <linux/kernel.h>
 #include <asm/gt64120.h>
 
-extern struct pci_ops gt64120_pci_ops;
+extern struct pci_ops gt64xxx_pci0_ops;
 
 static struct resource pci0_io_resource = {
        .name  = "pci_0 io",
@@ -30,7 +30,7 @@ static struct resource pci0_mem_resource = {
 };
 
 static struct pci_controller hose_0 = {
-       .pci_ops        = &gt64120_pci_ops,
+       .pci_ops        = &gt64xxx_pci0_ops,
        .io_resource    = &pci0_io_resource,
        .mem_resource   = &pci0_mem_resource,
 };
index aa481b7..5398813 100644 (file)
 
 #include <asm/bootinfo.h>
 
-extern int prom_argc;
-extern char **prom_argv, **prom_envp;
-
-typedef struct
-{
-    char *name;
-/*    char *val; */
-}t_env_var;
-
-
 char * __init prom_getcmdline(void)
 {
        return &(arcs_cmdline[0]);
@@ -60,6 +50,8 @@ void  __init prom_init_cmdline(void)
 {
        char *cp;
        int actr;
+       int prom_argc = fw_arg0;
+       char **prom_argv = (char **) fw_arg1;
 
        actr = 1; /* Always ignore argv[0] */
 
index 1c1cad9..c611ab4 100644 (file)
  *  675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
-#include <linux/types.h>
-#include <asm/jmr3927/txx927.h>
 #include <asm/jmr3927/tx3927.h>
-#include <asm/jmr3927/jmr3927.h>
 
 #define TIMEOUT       0xffffff
-#define SLOW_DOWN
-
-static const char digits[16] = "0123456789abcdef";
-
-#ifdef SLOW_DOWN
-#define slow_down() { int k; for (k=0; k<10000; k++); }
-#else
-#define slow_down()
-#endif
 
 void
-putch(const unsigned char c)
+prom_putchar(char c)
 {
         int i = 0;
 
         do {
-            slow_down();
             i++;
-            if (i>TIMEOUT) {
+            if (i>TIMEOUT)
                 break;
-            }
         } while (!(tx3927_sioptr(1)->cisr & TXx927_SICISR_TXALS));
        tx3927_sioptr(1)->tfifo = c;
        return;
 }
 
-unsigned char getch(void)
-{
-        int i = 0;
-       int dicr;
-       char c;
-
-       /* diable RX int. */
-       dicr = tx3927_sioptr(1)->dicr;
-       tx3927_sioptr(1)->dicr = 0;
-
-        do {
-            slow_down();
-            i++;
-            if (i>TIMEOUT) {
-                break;
-            }
-        } while (tx3927_sioptr(1)->disr & TXx927_SIDISR_UVALID)
-               ;
-       c = tx3927_sioptr(1)->rfifo;
-
-       /* clear RX int. status */
-       tx3927_sioptr(1)->disr &= ~TXx927_SIDISR_RDIS;
-       /* enable RX int. */
-       tx3927_sioptr(1)->dicr = dicr;
-
-       return c;
-}
-void
-do_jmr3927_led_set(char n)
-{
-    /* and with current leds */
-    jmr3927_led_and_set(n);
-}
-
 void
-puts(unsigned char *cp)
+puts(const char *cp)
 {
-    int i = 0;
-
-    while (*cp) {
-        do {
-            slow_down();
-            i++;
-            if (i>TIMEOUT) {
-                break;
-            }
-        } while (!(tx3927_sioptr(1)->cisr & TXx927_SICISR_TXALS));
-       tx3927_sioptr(1)->tfifo = *cp++;
-    }
-    putch('\r');
-    putch('\n');
-}
-
-void
-fputs(unsigned char *cp)
-{
-    int i = 0;
-
-    while (*cp) {
-        do {
-             slow_down();
-            i++;
-            if (i>TIMEOUT) {
-                break;
-            }
-        } while (!(tx3927_sioptr(1)->cisr & TXx927_SICISR_TXALS));
-       tx3927_sioptr(1)->tfifo = *cp++;
-    }
-}
-
-
-void
-put64(uint64_t ul)
-{
-    int cnt;
-    unsigned ch;
-
-    cnt = 16;            /* 16 nibbles in a 64 bit long */
-    putch('0');
-    putch('x');
-    do {
-        cnt--;
-        ch = (unsigned char)(ul >> cnt * 4) & 0x0F;
-                putch(digits[ch]);
-    } while (cnt > 0);
-}
-
-void
-put32(unsigned u)
-{
-    int cnt;
-    unsigned ch;
-
-    cnt = 8;            /* 8 nibbles in a 32 bit long */
-    putch('0');
-    putch('x');
-    do {
-        cnt--;
-        ch = (unsigned char)(u >> cnt * 4) & 0x0F;
-                putch(digits[ch]);
-    } while (cnt > 0);
+    while (*cp)
+       prom_putchar(*cp++);
+    prom_putchar('\r');
+    prom_putchar('\n');
 }
index 18fe9a8..8d00ba4 100644 (file)
@@ -3,5 +3,4 @@
 #
 
 obj-y                          += init.o irq.o setup.o
-obj-$(CONFIG_RUNTIME_DEBUG)    += debug.o
 obj-$(CONFIG_KGDB)             += kgdb_io.o
index a55cb45..9169fab 100644 (file)
  *  675 Mass Ave, Cambridge, MA 02139, USA.
  */
 #include <linux/init.h>
-#include <linux/mm.h>
-#include <linux/sched.h>
-#include <linux/bootmem.h>
-
-#include <asm/addrspace.h>
 #include <asm/bootinfo.h>
-#include <asm/mipsregs.h>
 #include <asm/jmr3927/jmr3927.h>
 
-int prom_argc;
-char **prom_argv, **prom_envp;
 extern void  __init prom_init_cmdline(void);
-extern char *prom_getenv(char *envname);
-unsigned long mips_nofpu = 0;
 
 const char *get_system_type(void)
 {
@@ -52,7 +42,7 @@ const char *get_system_type(void)
        ;
 }
 
-extern void puts(unsigned char *cp);
+extern void puts(const char *cp);
 
 void __init prom_init(void)
 {
@@ -61,10 +51,6 @@ void __init prom_init(void)
        if ((tx3927_ccfgptr->ccfg & TX3927_CCFG_TLBOFF) == 0)
                puts("Warning: TX3927 TLB off\n");
 #endif
-       prom_argc = fw_arg0;
-       prom_argv = (char **) fw_arg1;
-       prom_envp = (char **) fw_arg2;
-
        mips_machgroup = MACH_GROUP_TOSHIBA;
 
 #ifdef CONFIG_TOSHIBA_JMR3927
index 7d2c203..1187b44 100644 (file)
  *  675 Mass Ave, Cambridge, MA 02139, USA.
  */
 #include <linux/init.h>
-
-#include <linux/errno.h>
-#include <linux/irq.h>
-#include <linux/kernel_stat.h>
-#include <linux/signal.h>
 #include <linux/sched.h>
 #include <linux/types.h>
 #include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/timex.h>
-#include <linux/slab.h>
-#include <linux/random.h>
-#include <linux/smp.h>
-#include <linux/smp_lock.h>
-#include <linux/bitops.h>
 
-#include <asm/irq_regs.h>
 #include <asm/io.h>
 #include <asm/mipsregs.h>
 #include <asm/system.h>
 
-#include <asm/ptrace.h>
 #include <asm/processor.h>
-#include <asm/jmr3927/irq.h>
-#include <asm/debug.h>
 #include <asm/jmr3927/jmr3927.h>
 
 #if JMR3927_IRQ_END > NR_IRQS
 #error JMR3927_IRQ_END > NR_IRQS
 #endif
 
-struct tb_irq_space* tb_irq_spaces;
-
-static int jmr3927_irq_base = -1;
-
-#ifdef CONFIG_PCI
-static int jmr3927_gen_iack(void)
-{
-       /* generate ACK cycle */
-#ifdef __BIG_ENDIAN
-       return (tx3927_pcicptr->iiadp >> 24) & 0xff;
-#else
-       return tx3927_pcicptr->iiadp & 0xff;
-#endif
-}
-#endif
-
 #define irc_dlevel     0
 #define irc_elevel     1
 
@@ -87,89 +55,24 @@ static unsigned char irc_level[TX3927_NUM_IR] = {
        6, 6, 6                 /* TMR */
 };
 
-static void jmr3927_irq_disable(unsigned int irq_nr);
-static void jmr3927_irq_enable(unsigned int irq_nr);
-
-static void jmr3927_irq_ack(unsigned int irq)
-{
-       if (irq == JMR3927_IRQ_IRC_TMR0)
-               jmr3927_tmrptr->tisr = 0;       /* ack interrupt */
-
-       jmr3927_irq_disable(irq);
-}
-
-static void jmr3927_irq_end(unsigned int irq)
-{
-       if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
-               jmr3927_irq_enable(irq);
-}
-
-static void jmr3927_irq_disable(unsigned int irq_nr)
-{
-       struct tb_irq_space* sp;
-
-       for (sp = tb_irq_spaces; sp; sp = sp->next) {
-               if (sp->start_irqno <= irq_nr &&
-                   irq_nr < sp->start_irqno + sp->nr_irqs) {
-                       if (sp->mask_func)
-                               sp->mask_func(irq_nr - sp->start_irqno,
-                                             sp->space_id);
-                       break;
-               }
-       }
-}
-
-static void jmr3927_irq_enable(unsigned int irq_nr)
-{
-       struct tb_irq_space* sp;
-
-       for (sp = tb_irq_spaces; sp; sp = sp->next) {
-               if (sp->start_irqno <= irq_nr &&
-                   irq_nr < sp->start_irqno + sp->nr_irqs) {
-                       if (sp->unmask_func)
-                               sp->unmask_func(irq_nr - sp->start_irqno,
-                                               sp->space_id);
-                       break;
-               }
-       }
-}
-
 /*
  * CP0_STATUS is a thread's resource (saved/restored on context switch).
- * So disable_irq/enable_irq MUST handle IOC/ISAC/IRC registers.
+ * So disable_irq/enable_irq MUST handle IOC/IRC registers.
  */
-static void mask_irq_isac(int irq_nr, int space_id)
-{
-       /* 0: mask */
-       unsigned char imask =
-               jmr3927_isac_reg_in(JMR3927_ISAC_INTM_ADDR);
-       unsigned int bit  = 1 << irq_nr;
-       jmr3927_isac_reg_out(imask & ~bit, JMR3927_ISAC_INTM_ADDR);
-       /* flush write buffer */
-       (void)jmr3927_ioc_reg_in(JMR3927_IOC_REV_ADDR);
-}
-static void unmask_irq_isac(int irq_nr, int space_id)
-{
-       /* 0: mask */
-       unsigned char imask = jmr3927_isac_reg_in(JMR3927_ISAC_INTM_ADDR);
-       unsigned int bit  = 1 << irq_nr;
-       jmr3927_isac_reg_out(imask | bit, JMR3927_ISAC_INTM_ADDR);
-       /* flush write buffer */
-       (void)jmr3927_ioc_reg_in(JMR3927_IOC_REV_ADDR);
-}
-
-static void mask_irq_ioc(int irq_nr, int space_id)
+static void mask_irq_ioc(unsigned int irq)
 {
        /* 0: mask */
+       unsigned int irq_nr = irq - JMR3927_IRQ_IOC;
        unsigned char imask = jmr3927_ioc_reg_in(JMR3927_IOC_INTM_ADDR);
        unsigned int bit = 1 << irq_nr;
        jmr3927_ioc_reg_out(imask & ~bit, JMR3927_IOC_INTM_ADDR);
        /* flush write buffer */
        (void)jmr3927_ioc_reg_in(JMR3927_IOC_REV_ADDR);
 }
-static void unmask_irq_ioc(int irq_nr, int space_id)
+static void unmask_irq_ioc(unsigned int irq)
 {
        /* 0: mask */
+       unsigned int irq_nr = irq - JMR3927_IRQ_IOC;
        unsigned char imask = jmr3927_ioc_reg_in(JMR3927_IOC_INTM_ADDR);
        unsigned int bit = 1 << irq_nr;
        jmr3927_ioc_reg_out(imask | bit, JMR3927_IOC_INTM_ADDR);
@@ -177,8 +80,9 @@ static void unmask_irq_ioc(int irq_nr, int space_id)
        (void)jmr3927_ioc_reg_in(JMR3927_IOC_REV_ADDR);
 }
 
-static void mask_irq_irc(int irq_nr, int space_id)
+static void mask_irq_irc(unsigned int irq)
 {
+       unsigned int irq_nr = irq - JMR3927_IRQ_IRC;
        volatile unsigned long *ilrp = &tx3927_ircptr->ilr[irq_nr / 2];
        if (irq_nr & 1)
                *ilrp = (*ilrp & 0x00ff) | (irc_dlevel << 8);
@@ -191,8 +95,9 @@ static void mask_irq_irc(int irq_nr, int space_id)
        (void)tx3927_ircptr->ssr;
 }
 
-static void unmask_irq_irc(int irq_nr, int space_id)
+static void unmask_irq_irc(unsigned int irq)
 {
+       unsigned int irq_nr = irq - JMR3927_IRQ_IRC;
        volatile unsigned long *ilrp = &tx3927_ircptr->ilr[irq_nr / 2];
        if (irq_nr & 1)
                *ilrp = (*ilrp & 0x00ff) | (irc_level[irq_nr] << 8);
@@ -203,98 +108,14 @@ static void unmask_irq_irc(int irq_nr, int space_id)
        tx3927_ircptr->imr = irc_elevel;
 }
 
-struct tb_irq_space jmr3927_isac_irqspace = {
-       .next = NULL,
-       .start_irqno = JMR3927_IRQ_ISAC,
-       nr_irqs : JMR3927_NR_IRQ_ISAC,
-       .mask_func = mask_irq_isac,
-       .unmask_func = unmask_irq_isac,
-       .name = "ISAC",
-       .space_id = 0,
-       can_share : 0
-};
-struct tb_irq_space jmr3927_ioc_irqspace = {
-       .next = NULL,
-       .start_irqno = JMR3927_IRQ_IOC,
-       nr_irqs : JMR3927_NR_IRQ_IOC,
-       .mask_func = mask_irq_ioc,
-       .unmask_func = unmask_irq_ioc,
-       .name = "IOC",
-       .space_id = 0,
-       can_share : 1
-};
-
-struct tb_irq_space jmr3927_irc_irqspace = {
-       .next           = NULL,
-       .start_irqno    = JMR3927_IRQ_IRC,
-       .nr_irqs        = JMR3927_NR_IRQ_IRC,
-       .mask_func      = mask_irq_irc,
-       .unmask_func    = unmask_irq_irc,
-       .name           = "on-chip",
-       .space_id       = 0,
-       .can_share      = 0
-};
-
-
-#ifdef CONFIG_TX_BRANCH_LIKELY_BUG_WORKAROUND
-static int tx_branch_likely_bug_count = 0;
-static int have_tx_branch_likely_bug = 0;
-
-static void tx_branch_likely_bug_fixup(void)
-{
-       struct pt_regs *regs = get_irq_regs();
-
-       /* TX39/49-BUG: Under this condition, the insn in delay slot
-           of the branch likely insn is executed (not nullified) even
-           the branch condition is false. */
-       if (!have_tx_branch_likely_bug)
-               return;
-       if ((regs->cp0_epc & 0xfff) == 0xffc &&
-           KSEGX(regs->cp0_epc) != KSEG0 &&
-           KSEGX(regs->cp0_epc) != KSEG1) {
-               unsigned int insn = *(unsigned int*)(regs->cp0_epc - 4);
-               /* beql,bnel,blezl,bgtzl */
-               /* bltzl,bgezl,blezall,bgezall */
-               /* bczfl, bcztl */
-               if ((insn & 0xf0000000) == 0x50000000 ||
-                   (insn & 0xfc0e0000) == 0x04020000 ||
-                   (insn & 0xf3fe0000) == 0x41020000) {
-                       regs->cp0_epc -= 4;
-                       tx_branch_likely_bug_count++;
-                       printk(KERN_INFO
-                              "fix branch-likery bug in %s (insn %08x)\n",
-                              current->comm, insn);
-               }
-       }
-}
-#endif
-
-static void jmr3927_spurious(void)
-{
-       struct pt_regs * regs = get_irq_regs();
-
-#ifdef CONFIG_TX_BRANCH_LIKELY_BUG_WORKAROUND
-       tx_branch_likely_bug_fixup();
-#endif
-       printk(KERN_WARNING "spurious interrupt (cause 0x%lx, pc 0x%lx, ra 0x%lx).\n",
-              regs->cp0_cause, regs->cp0_epc, regs->regs[31]);
-}
-
 asmlinkage void plat_irq_dispatch(void)
 {
-       struct pt_regs * regs = get_irq_regs();
+       unsigned long cp0_cause = read_c0_cause();
        int irq;
 
-#ifdef CONFIG_TX_BRANCH_LIKELY_BUG_WORKAROUND
-       tx_branch_likely_bug_fixup();
-#endif
-       if ((regs->cp0_cause & CAUSEF_IP7) == 0) {
-#if 0
-               jmr3927_spurious();
-#endif
+       if ((cp0_cause & CAUSEF_IP7) == 0)
                return;
-       }
-       irq = (regs->cp0_cause >> CAUSEB_IP2) & 0x0f;
+       irq = (cp0_cause >> CAUSEB_IP2) & 0x0f;
 
        do_IRQ(irq + JMR3927_IRQ_IRC);
 }
@@ -317,35 +138,6 @@ static struct irqaction ioc_action = {
        jmr3927_ioc_interrupt, 0, CPU_MASK_NONE, "IOC", NULL, NULL,
 };
 
-static irqreturn_t jmr3927_isac_interrupt(int irq, void *dev_id)
-{
-       unsigned char istat = jmr3927_isac_reg_in(JMR3927_ISAC_INTS2_ADDR);
-       int i;
-
-       for (i = 0; i < JMR3927_NR_IRQ_ISAC; i++) {
-               if (istat & (1 << i)) {
-                       irq = JMR3927_IRQ_ISAC + i;
-                       do_IRQ(irq);
-               }
-       }
-       return IRQ_HANDLED;
-}
-
-static struct irqaction isac_action = {
-       jmr3927_isac_interrupt, 0, CPU_MASK_NONE, "ISAC", NULL, NULL,
-};
-
-
-static irqreturn_t jmr3927_isaerr_interrupt(int irq, void *dev_id)
-{
-       printk(KERN_WARNING "ISA error interrupt (irq 0x%x).\n", irq);
-
-       return IRQ_HANDLED;
-}
-static struct irqaction isaerr_action = {
-       jmr3927_isaerr_interrupt, 0, CPU_MASK_NONE, "ISA error", NULL, NULL,
-};
-
 static irqreturn_t jmr3927_pcierr_interrupt(int irq, void *dev_id)
 {
        printk(KERN_WARNING "PCI error interrupt (irq 0x%x).\n", irq);
@@ -358,54 +150,19 @@ static struct irqaction pcierr_action = {
        jmr3927_pcierr_interrupt, 0, CPU_MASK_NONE, "PCI error", NULL, NULL,
 };
 
-int jmr3927_ether1_irq = 0;
-
-void jmr3927_irq_init(u32 irq_base);
+static void __init jmr3927_irq_init(void);
 
 void __init arch_init_irq(void)
 {
-       /* look for io board's presence */
-       int have_isac = jmr3927_have_isac();
-
        /* Now, interrupt control disabled, */
        /* all IRC interrupts are masked, */
        /* all IRC interrupt mode are Low Active. */
 
-       if (have_isac) {
-
-               /* ETHER1 (NE2000 compatible 10M-Ether) parameter setup */
-               /* temporary enable interrupt control */
-               tx3927_ircptr->cer = 1;
-               /* ETHER1 Int. Is High-Active. */
-               if (tx3927_ircptr->ssr & (1 << 0))
-                       jmr3927_ether1_irq = JMR3927_IRQ_IRC_INT0;
-#if 0  /* INT3 may be asserted by ether0 (even after reboot...) */
-               else if (tx3927_ircptr->ssr & (1 << 3))
-                       jmr3927_ether1_irq = JMR3927_IRQ_IRC_INT3;
-#endif
-               /* disable interrupt control */
-               tx3927_ircptr->cer = 0;
-
-               /* Ether1: High Active */
-               if (jmr3927_ether1_irq) {
-                       int ether1_irc = jmr3927_ether1_irq - JMR3927_IRQ_IRC;
-                       tx3927_ircptr->cr[ether1_irc / 8] |=
-                               TX3927_IRCR_HIGH << ((ether1_irc % 8) * 2);
-               }
-       }
-
        /* mask all IOC interrupts */
        jmr3927_ioc_reg_out(0, JMR3927_IOC_INTM_ADDR);
        /* setup IOC interrupt mode (SOFT:High Active, Others:Low Active) */
        jmr3927_ioc_reg_out(JMR3927_IOC_INTF_SOFT, JMR3927_IOC_INTP_ADDR);
 
-       if (have_isac) {
-               /* mask all ISAC interrupts */
-               jmr3927_isac_reg_out(0, JMR3927_ISAC_INTM_ADDR);
-               /* setup ISAC interrupt mode (ISAIRQ3,ISAIRQ5:Low Active ???) */
-               jmr3927_isac_reg_out(JMR3927_ISAC_INTF_IRQ3|JMR3927_ISAC_INTF_IRQ5, JMR3927_ISAC_INTP_ADDR);
-       }
-
        /* clear PCI Soft interrupts */
        jmr3927_ioc_reg_out(0, JMR3927_IOC_INTS1_ADDR);
        /* clear PCI Reset interrupts */
@@ -415,21 +172,11 @@ void __init arch_init_irq(void)
        tx3927_ircptr->cer = TX3927_IRCER_ICE;
        tx3927_ircptr->imr = irc_elevel;
 
-       jmr3927_irq_init(NR_ISA_IRQS);
-
-       /* setup irq space */
-       add_tb_irq_space(&jmr3927_isac_irqspace);
-       add_tb_irq_space(&jmr3927_ioc_irqspace);
-       add_tb_irq_space(&jmr3927_irc_irqspace);
+       jmr3927_irq_init();
 
        /* setup IOC interrupt 1 (PCI, MODEM) */
        setup_irq(JMR3927_IRQ_IOCINT, &ioc_action);
 
-       if (have_isac) {
-               setup_irq(JMR3927_IRQ_ISACINT, &isac_action);
-               setup_irq(JMR3927_IRQ_ISAC_ISAER, &isaerr_action);
-       }
-
 #ifdef CONFIG_PCI
        setup_irq(JMR3927_IRQ_IRC_PCI, &pcierr_action);
 #endif
@@ -438,21 +185,28 @@ void __init arch_init_irq(void)
        set_c0_status(ST0_IM);  /* IE bit is still 0. */
 }
 
-static struct irq_chip jmr3927_irq_controller = {
-       .name = "jmr3927_irq",
-       .ack = jmr3927_irq_ack,
-       .mask = jmr3927_irq_disable,
-       .mask_ack = jmr3927_irq_ack,
-       .unmask = jmr3927_irq_enable,
-       .end = jmr3927_irq_end,
+static struct irq_chip jmr3927_irq_ioc = {
+       .name = "jmr3927_ioc",
+       .ack = mask_irq_ioc,
+       .mask = mask_irq_ioc,
+       .mask_ack = mask_irq_ioc,
+       .unmask = unmask_irq_ioc,
 };
 
-void jmr3927_irq_init(u32 irq_base)
+static struct irq_chip jmr3927_irq_irc = {
+       .name = "jmr3927_irc",
+       .ack = mask_irq_irc,
+       .mask = mask_irq_irc,
+       .mask_ack = mask_irq_irc,
+       .unmask = unmask_irq_irc,
+};
+
+static void __init jmr3927_irq_init(void)
 {
        u32 i;
 
-       for (i= irq_base; i< irq_base + JMR3927_NR_IRQ_IRC + JMR3927_NR_IRQ_IOC; i++)
-               set_irq_chip(i, &jmr3927_irq_controller);
-
-       jmr3927_irq_base = irq_base;
+       for (i = JMR3927_IRQ_IRC; i < JMR3927_IRQ_IRC + JMR3927_NR_IRQ_IRC; i++)
+               set_irq_chip_and_handler(i, &jmr3927_irq_irc, handle_level_irq);
+       for (i = JMR3927_IRQ_IOC; i < JMR3927_IRQ_IOC + JMR3927_NR_IRQ_IOC; i++)
+               set_irq_chip_and_handler(i, &jmr3927_irq_ioc, handle_level_irq);
 }
index 269a42d..2604f2c 100644 (file)
  *  675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
-#include <linux/types.h>
-#include <asm/jmr3927/txx927.h>
-#include <asm/jmr3927/tx3927.h>
 #include <asm/jmr3927/jmr3927.h>
 
 #define TIMEOUT       0xffffff
-#define SLOW_DOWN
-
-static const char digits[16] = "0123456789abcdef";
-
-#ifdef SLOW_DOWN
-#define slow_down() { int k; for (k=0; k<10000; k++); }
-#else
-#define slow_down()
-#endif
 
 static int remoteDebugInitialized = 0;
+static void debugInit(int baud)
 
 int putDebugChar(unsigned char c)
 {
@@ -103,20 +92,8 @@ unsigned char getDebugChar(void)
        return c;
 }
 
-void debugInit(int baud)
+static void debugInit(int baud)
 {
-       /*
-       volatile unsigned long lcr;
-       volatile unsigned long dicr;
-       volatile unsigned long disr;
-       volatile unsigned long cisr;
-       volatile unsigned long fcr;
-       volatile unsigned long flcr;
-       volatile unsigned long bgr;
-       volatile unsigned long tfifo;
-       volatile unsigned long rfifo;
-       */
-
        tx3927_sioptr(0)->lcr = 0x020;
        tx3927_sioptr(0)->dicr = 0;
        tx3927_sioptr(0)->disr = 0x4100;
@@ -125,31 +102,4 @@ void debugInit(int baud)
        tx3927_sioptr(0)->flcr = 0x02;
        tx3927_sioptr(0)->bgr = ((JMR3927_BASE_BAUD + baud / 2) / baud) |
                TXx927_SIBGR_BCLK_T0;
-#if 0
-       /*
-        * Reset the UART.
-        */
-       tx3927_sioptr(0)->fcr = TXx927_SIFCR_SWRST;
-       while (tx3927_sioptr(0)->fcr & TXx927_SIFCR_SWRST)
-               ;
-
-       /*
-        * and set the speed of the serial port
-        * (currently hardwired to 9600 8N1
-        */
-
-       tx3927_sioptr(0)->lcr = TXx927_SILCR_UMODE_8BIT |
-               TXx927_SILCR_USBL_1BIT |
-               TXx927_SILCR_SCS_IMCLK_BG;
-       tx3927_sioptr(0)->bgr =
-               ((JMR3927_BASE_BAUD + baud / 2) / baud) |
-               TXx927_SIBGR_BCLK_T0;
-
-       /* HW RTS/CTS control */
-       if (ser->flags & ASYNC_HAVE_CTS_LINE)
-               tx3927_sioptr(0)->flcr = TXx927_SIFLCR_RCS | TXx927_SIFLCR_TES |
-                       TXx927_SIFLCR_RTSTL_MAX /* 15 */;
-       /* Enable RX/TX */
-       tx3927_sioptr(0)->flcr &= ~(TXx927_SIFLCR_RSDE | TXx927_SIFLCR_TSDE);
-#endif
 }
index fc523bd..d1ef289 100644 (file)
 
 #include <asm/addrspace.h>
 #include <asm/time.h>
-#include <asm/bcache.h>
-#include <asm/irq.h>
 #include <asm/reboot.h>
-#include <asm/gdb-stub.h>
 #include <asm/jmr3927/jmr3927.h>
 #include <asm/mipsregs.h>
-#include <asm/traps.h>
 
-extern void puts(unsigned char *cp);
+extern void puts(const char *cp);
 
 /* Tick Timer divider */
 #define JMR3927_TIMER_CCD      0       /* 1/2 */
 #define JMR3927_TIMER_CLK      (JMR3927_IMCLK / (2 << JMR3927_TIMER_CCD))
 
-unsigned char led_state = 0xf;
-
-struct {
-    struct resource ram0;
-    struct resource ram1;
-    struct resource pcimem;
-    struct resource iob;
-    struct resource ioc;
-    struct resource pciio;
-    struct resource jmy1394;
-    struct resource rom1;
-    struct resource rom0;
-    struct resource sio0;
-    struct resource sio1;
-} jmr3927_resources = {
-       {
-               .start  = 0,
-               .end    = 0x01FFFFFF,
-               .name   = "RAM0",
-               .flags = IORESOURCE_MEM
-       }, {
-               .start  = 0x02000000,
-               .end    = 0x03FFFFFF,
-               .name   = "RAM1",
-               .flags = IORESOURCE_MEM
-       }, {
-               .start  = 0x08000000,
-               .end    = 0x07FFFFFF,
-               .name   = "PCIMEM",
-               .flags = IORESOURCE_MEM
-       }, {
-               .start  = 0x10000000,
-               .end    = 0x13FFFFFF,
-               .name   = "IOB"
-       }, {
-               .start  = 0x14000000,
-               .end    = 0x14FFFFFF,
-               .name   = "IOC"
-       }, {
-               .start  = 0x15000000,
-               .end    = 0x15FFFFFF,
-               .name   = "PCIIO"
-       }, {
-               .start  = 0x1D000000,
-               .end    = 0x1D3FFFFF,
-               .name   = "JMY1394"
-       }, {
-               .start  = 0x1E000000,
-               .end    = 0x1E3FFFFF,
-               .name   = "ROM1"
-       }, {
-               .start  = 0x1FC00000,
-               .end    = 0x1FFFFFFF,
-               .name   = "ROM0"
-       }, {
-               .start  = 0xFFFEF300,
-               .end    = 0xFFFEF3FF,
-               .name   = "SIO0"
-       }, {
-               .start  = 0xFFFEF400,
-               .end    = 0xFFFEF4FF,
-               .name   = "SIO1"
-       },
-};
-
 /* don't enable - see errata */
-int jmr3927_ccfg_toeon = 0;
+static int jmr3927_ccfg_toeon;
 
 static inline void do_reset(void)
 {
@@ -173,9 +104,15 @@ static cycle_t jmr3927_hpt_read(void)
        return jiffies * (JMR3927_TIMER_CLK / HZ) + jmr3927_tmrptr->trr;
 }
 
+static void jmr3927_timer_ack(void)
+{
+       jmr3927_tmrptr->tisr = 0;       /* ack interrupt */
+}
+
 static void __init jmr3927_time_init(void)
 {
        clocksource_mips.read = jmr3927_hpt_read;
+       mips_timer_ack = jmr3927_timer_ack;
        mips_hpt_frequency = JMR3927_TIMER_CLK;
 }
 
@@ -190,9 +127,6 @@ void __init plat_timer_setup(struct irqaction *irq)
        setup_irq(JMR3927_IRQ_TICK, irq);
 }
 
-#define USECS_PER_JIFFY (1000000/HZ)
-
-//#undef DO_WRITE_THROUGH
 #define DO_WRITE_THROUGH
 #define DO_ENABLE_CACHE
 
@@ -224,12 +158,6 @@ void __init plat_mem_setup(void)
        /* Reboot on panic */
        panic_timeout = 180;
 
-       {
-               unsigned int conf;
-               conf = read_c0_conf();
-       }
-
-#if 1
        /* cache setup */
        {
                unsigned int conf;
@@ -256,16 +184,14 @@ void __init plat_mem_setup(void)
                write_c0_conf(conf);
                write_c0_cache(0);
        }
-#endif
 
        /* initialize board */
        jmr3927_board_init();
 
        argptr = prom_getcmdline();
 
-       if ((argptr = strstr(argptr, "toeon")) != NULL) {
-                       jmr3927_ccfg_toeon = 1;
-       }
+       if ((argptr = strstr(argptr, "toeon")) != NULL)
+               jmr3927_ccfg_toeon = 1;
        argptr = prom_getcmdline();
        if ((argptr = strstr(argptr, "ip=")) == NULL) {
                argptr = prom_getcmdline();
@@ -281,7 +207,7 @@ void __init plat_mem_setup(void)
                        memset(&req, 0, sizeof(req));
                        req.line = i;
                        req.iotype = UPIO_MEM;
-                       req.membase = (char *)TX3927_SIO_REG(i);
+                       req.membase = (unsigned char __iomem *)TX3927_SIO_REG(i);
                        req.mapbase = TX3927_SIO_REG(i);
                        req.irq = i == 0 ?
                                JMR3927_IRQ_IRC_SIO0 : JMR3927_IRQ_IRC_SIO1;
@@ -303,65 +229,33 @@ void __init plat_mem_setup(void)
 
 static void tx3927_setup(void);
 
-#ifdef CONFIG_PCI
-unsigned long mips_pci_io_base;
-unsigned long mips_pci_io_size;
-unsigned long mips_pci_mem_base;
-unsigned long mips_pci_mem_size;
-/* for legacy I/O, PCI I/O PCI Bus address must be 0 */
-unsigned long mips_pci_io_pciaddr = 0;
-#endif
-
 static void __init jmr3927_board_init(void)
 {
-       char *argptr;
-
-#ifdef CONFIG_PCI
-       mips_pci_io_base = JMR3927_PCIIO;
-       mips_pci_io_size = JMR3927_PCIIO_SIZE;
-       mips_pci_mem_base = JMR3927_PCIMEM;
-       mips_pci_mem_size = JMR3927_PCIMEM_SIZE;
-#endif
-
        tx3927_setup();
 
-       if (jmr3927_have_isac()) {
-
-#ifdef CONFIG_FB_E1355
-               argptr = prom_getcmdline();
-               if ((argptr = strstr(argptr, "video=")) == NULL) {
-                       argptr = prom_getcmdline();
-                       strcat(argptr, " video=e1355fb:crt16h");
-               }
-#endif
-
-#ifdef CONFIG_BLK_DEV_IDE
-               /* overrides PCI-IDE */
-#endif
-       }
-
        /* SIO0 DTR on */
        jmr3927_ioc_reg_out(0, JMR3927_IOC_DTR_ADDR);
 
        jmr3927_led_set(0);
 
-
-       if (jmr3927_have_isac())
-               jmr3927_io_led_set(0);
        printk("JMR-TX3927 (Rev %d) --- IOC(Rev %d) DIPSW:%d,%d,%d,%d\n",
               jmr3927_ioc_reg_in(JMR3927_IOC_BREV_ADDR) & JMR3927_REV_MASK,
               jmr3927_ioc_reg_in(JMR3927_IOC_REV_ADDR) & JMR3927_REV_MASK,
               jmr3927_dipsw1(), jmr3927_dipsw2(),
               jmr3927_dipsw3(), jmr3927_dipsw4());
-       if (jmr3927_have_isac())
-               printk("JMI-3927IO2 --- ISAC(Rev %d) DIPSW:%01x\n",
-                      jmr3927_isac_reg_in(JMR3927_ISAC_REV_ADDR) & JMR3927_REV_MASK,
-                      jmr3927_io_dipsw());
 }
 
-void __init tx3927_setup(void)
+static void __init tx3927_setup(void)
 {
        int i;
+#ifdef CONFIG_PCI
+       unsigned long mips_pci_io_base = JMR3927_PCIIO;
+       unsigned long mips_pci_io_size = JMR3927_PCIIO_SIZE;
+       unsigned long mips_pci_mem_base = JMR3927_PCIMEM;
+       unsigned long mips_pci_mem_size = JMR3927_PCIMEM_SIZE;
+       /* for legacy I/O, PCI I/O PCI Bus address must be 0 */
+       unsigned long mips_pci_io_pciaddr = 0;
+#endif
 
        /* SDRAMC are configured by PROM */
 
@@ -475,10 +369,8 @@ void __init tx3927_setup(void)
                tx3927_pcicptr->mbas = ~(mips_pci_mem_size - 1);
                tx3927_pcicptr->mba = 0;
                tx3927_pcicptr->tlbmma = 0;
-#ifndef JMR3927_INIT_INDIRECT_PCI
                /* Enable Direct mapping Address Space Decoder */
                tx3927_pcicptr->lbc |= TX3927_PCIC_LBC_ILMDE | TX3927_PCIC_LBC_ILIDE;
-#endif
 
                /* Clear All Local Bus Status */
                tx3927_pcicptr->lbstat = TX3927_PCIC_LBIM_ALL;
@@ -491,22 +383,15 @@ void __init tx3927_setup(void)
 
                /* PCIC Int => IRC IRQ10 */
                tx3927_pcicptr->il = TX3927_IR_PCI;
-#if 1
                /* Target Control (per errata) */
                tx3927_pcicptr->tc = TX3927_PCIC_TC_OF8E | TX3927_PCIC_TC_IF8E;
-#endif
 
                /* Enable Bus Arbiter */
-#if 0
-               tx3927_pcicptr->req_trace = 0x73737373;
-#endif
                tx3927_pcicptr->pbapmc = TX3927_PCIC_PBAPMC_PBAEN;
 
                tx3927_pcicptr->pcicmd = PCI_COMMAND_MASTER |
                        PCI_COMMAND_MEMORY |
-#if 1
                        PCI_COMMAND_IO |
-#endif
                        PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
        }
 #endif /* CONFIG_PCI */
@@ -555,8 +440,6 @@ static int __init jmr3927_rtc_init(void)
                .flags  = IORESOURCE_MEM,
        };
        struct platform_device *dev;
-       if (!jmr3927_have_nvram())
-               return -ENODEV;
        dev = platform_device_register_simple("ds1742", -1, &res, 1);
        return IS_ERR(dev) ? PTR_ERR(dev) : 0;
 }
index 222de46..761a779 100644 (file)
@@ -102,7 +102,6 @@ void output_thread_info_defines(void)
        offset("#define TI_ADDR_LIMIT      ", struct thread_info, addr_limit);
        offset("#define TI_RESTART_BLOCK   ", struct thread_info, restart_block);
        offset("#define TI_REGS            ", struct thread_info, regs);
-       constant("#define _THREAD_SIZE_ORDER ", THREAD_SIZE_ORDER);
        constant("#define _THREAD_SIZE       ", THREAD_SIZE);
        constant("#define _THREAD_MASK       ", THREAD_MASK);
        linefeed;
index 9c79703..2345160 100644 (file)
@@ -328,8 +328,8 @@ void __init init_i8259_irqs (void)
 {
        int i;
 
-       request_resource(&ioport_resource, &pic1_io_resource);
-       request_resource(&ioport_resource, &pic2_io_resource);
+       insert_resource(&ioport_resource, &pic1_io_resource);
+       insert_resource(&ioport_resource, &pic2_io_resource);
 
        init_8259A(0);
 
index 29eadd4..c658001 100644 (file)
@@ -17,6 +17,7 @@
  */
 #include <linux/kernel.h>
 #include <linux/module.h>
+#include <linux/sched.h>
 #include <linux/unistd.h>
 #include <linux/file.h>
 #include <linux/fs.h>
@@ -198,7 +199,6 @@ void sp_work_handle_request(void)
        int cmd;
 
        char *vcwd;
-       mm_segment_t old_fs;
        int size;
 
        ret.retval = -1;
@@ -241,8 +241,6 @@ void sp_work_handle_request(void)
                if ((ret.retval = sp_syscall(__NR_gettimeofday, (int)&tv,
                                             (int)&tz, 0,0)) == 0)
                ret.retval = tv.tv_sec;
-
-               ret.errno = errno;
                break;
 
        case MTSP_SYSCALL_EXIT:
@@ -279,7 +277,6 @@ void sp_work_handle_request(void)
                if (cmd >= 0) {
                        ret.retval = sp_syscall(cmd, generic.arg0, generic.arg1,
                                                generic.arg2, generic.arg3);
-                       ret.errno = errno;
                } else
                        printk(KERN_WARNING
                               "KSPD: Unknown SP syscall number %d\n", sc.cmd);
index 28c2e2e..656bde2 100644 (file)
@@ -49,7 +49,8 @@ LEAF(resume)
 #ifndef CONFIG_CPU_HAS_LLSC
        sw      zero, ll_bit
 #endif
-       mfc0    t2, CP0_STATUS
+       mfc0    t1, CP0_STATUS
+       sw      t1, THREAD_STATUS(a0)
        cpu_save_nonscratch a0
        sw      ra, THREAD_REG31(a0)
 
@@ -59,8 +60,8 @@ LEAF(resume)
        lw      t3, TASK_THREAD_INFO(a0)
        lw      t0, TI_FLAGS(t3)
        li      t1, _TIF_USEDFPU
-       and     t1, t0
-       beqz    t1, 1f
+       and     t2, t0, t1
+       beqz    t2, 1f
        nor     t1, zero, t1
 
        and     t0, t0, t1
@@ -73,13 +74,10 @@ LEAF(resume)
        li      t1, ~ST0_CU1
        and     t0, t0, t1
        sw      t0, ST_OFF(t3)
-       /* clear thread_struct CU1 bit */
-       and     t2, t1
 
        fpu_save_single a0, t0                  # clobbers t0
 
 1:
-       sw      t2, THREAD_STATUS(a0)
        /*
         * The order of restoring the registers takes care of the race
         * updating $28, $29 and kernelsp without disabling ints.
index c7698fd..cc566cf 100644 (file)
@@ -48,7 +48,8 @@
 #ifndef CONFIG_CPU_HAS_LLSC
        sw      zero, ll_bit
 #endif
-       mfc0    t2, CP0_STATUS
+       mfc0    t1, CP0_STATUS
+       LONG_S  t1, THREAD_STATUS(a0)
        cpu_save_nonscratch a0
        LONG_S  ra, THREAD_REG31(a0)
 
@@ -58,8 +59,8 @@
        PTR_L   t3, TASK_THREAD_INFO(a0)
        LONG_L  t0, TI_FLAGS(t3)
        li      t1, _TIF_USEDFPU
-       and     t1, t0
-       beqz    t1, 1f
+       and     t2, t0, t1
+       beqz    t2, 1f
        nor     t1, zero, t1
 
        and     t0, t0, t1
        li      t1, ~ST0_CU1
        and     t0, t0, t1
        LONG_S  t0, ST_OFF(t3)
-       /* clear thread_struct CU1 bit */
-       and     t2, t1
 
        fpu_save_double a0 t0 t1                # c0_status passed in t0
                                                # clobbers t1
 1:
-       LONG_S  t2, THREAD_STATUS(a0)
 
        /*
         * The order of restoring the registers takes care of the race
index e6e3047..bfc8ca1 100644 (file)
@@ -289,7 +289,7 @@ unsigned int rtlx_write_poll(int index)
        return write_spacefree(chan->rt_read, chan->rt_write, chan->buffer_size);
 }
 
-ssize_t rtlx_read(int index, void __user *buff, size_t count, int user)
+ssize_t rtlx_read(int index, void __user *buff, size_t count)
 {
        size_t lx_write, fl = 0L;
        struct rtlx_channel *lx;
@@ -331,9 +331,10 @@ out:
        return count;
 }
 
-ssize_t rtlx_write(int index, const void __user *buffer, size_t count, int user)
+ssize_t rtlx_write(int index, const void __user *buffer, size_t count)
 {
        struct rtlx_channel *rt;
+       unsigned long failed;
        size_t rt_read;
        size_t fl;
 
@@ -363,7 +364,7 @@ ssize_t rtlx_write(int index, const void __user *buffer, size_t count, int user)
        }
 
 out:
-       count -= cailed;
+       count -= failed;
 
        smp_wmb();
        rt->rt_write = (rt->rt_write + count) % rt->buffer_size;
index 297dfcb..c0faabd 100644 (file)
@@ -34,4 +34,13 @@ extern int install_sigtramp(unsigned int __user *tramp, unsigned int syscall);
 /* Check and clear pending FPU exceptions in saved CSR */
 extern int fpcsr_pending(unsigned int __user *fpcsr);
 
+/* Make sure we will not lose FPU ownership */
+#ifdef CONFIG_PREEMPT
+#define lock_fpu_owner()       preempt_disable()
+#define unlock_fpu_owner()     preempt_enable()
+#else
+#define lock_fpu_owner()       pagefault_disable()
+#define unlock_fpu_owner()     pagefault_enable()
+#endif
+
 #endif /* __SIGNAL_COMMON_H */
index 8c3c5a5..07d6730 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/ptrace.h>
 #include <linux/unistd.h>
 #include <linux/compiler.h>
+#include <linux/uaccess.h>
 
 #include <asm/abi.h>
 #include <asm/asm.h>
@@ -27,7 +28,6 @@
 #include <asm/cacheflush.h>
 #include <asm/fpu.h>
 #include <asm/sim.h>
-#include <asm/uaccess.h>
 #include <asm/ucontext.h>
 #include <asm/cpu-features.h>
 #include <asm/war.h>
@@ -78,6 +78,46 @@ struct rt_sigframe {
 /*
  * Helper routines
  */
+static int protected_save_fp_context(struct sigcontext __user *sc)
+{
+       int err;
+       while (1) {
+               lock_fpu_owner();
+               own_fpu_inatomic(1);
+               err = save_fp_context(sc); /* this might fail */
+               unlock_fpu_owner();
+               if (likely(!err))
+                       break;
+               /* touch the sigcontext and try again */
+               err = __put_user(0, &sc->sc_fpregs[0]) |
+                       __put_user(0, &sc->sc_fpregs[31]) |
+                       __put_user(0, &sc->sc_fpc_csr);
+               if (err)
+                       break;  /* really bad sigcontext */
+       }
+       return err;
+}
+
+static int protected_restore_fp_context(struct sigcontext __user *sc)
+{
+       int err, tmp;
+       while (1) {
+               lock_fpu_owner();
+               own_fpu_inatomic(0);
+               err = restore_fp_context(sc); /* this might fail */
+               unlock_fpu_owner();
+               if (likely(!err))
+                       break;
+               /* touch the sigcontext and try again */
+               err = __get_user(tmp, &sc->sc_fpregs[0]) |
+                       __get_user(tmp, &sc->sc_fpregs[31]) |
+                       __get_user(tmp, &sc->sc_fpc_csr);
+               if (err)
+                       break;  /* really bad sigcontext */
+       }
+       return err;
+}
+
 int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
 {
        int err = 0;
@@ -113,10 +153,7 @@ int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
                 * Save FPU state to signal context. Signal handler
                 * will "inherit" current FPU state.
                 */
-               own_fpu(1);
-               enable_fp_in_kernel();
-               err |= save_fp_context(sc);
-               disable_fp_in_kernel();
+               err |= protected_save_fp_context(sc);
        }
        return err;
 }
@@ -148,7 +185,7 @@ check_and_restore_fp_context(struct sigcontext __user *sc)
        err = sig = fpcsr_pending(&sc->sc_fpc_csr);
        if (err > 0)
                err = 0;
-       err |= restore_fp_context(sc);
+       err |= protected_restore_fp_context(sc);
        return err ?: sig;
 }
 
@@ -187,11 +224,8 @@ int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
 
        if (used_math) {
                /* restore fpu context if we have used it before */
-               own_fpu(0);
-               enable_fp_in_kernel();
                if (!err)
                        err = check_and_restore_fp_context(sc);
-               disable_fp_in_kernel();
        } else {
                /* signal handler may have used FPU.  Give it up. */
                lose_fpu(0);
index 151fd2f..b9a0144 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/compat.h>
 #include <linux/suspend.h>
 #include <linux/compiler.h>
+#include <linux/uaccess.h>
 
 #include <asm/abi.h>
 #include <asm/asm.h>
@@ -29,7 +30,6 @@
 #include <linux/bitops.h>
 #include <asm/cacheflush.h>
 #include <asm/sim.h>
-#include <asm/uaccess.h>
 #include <asm/ucontext.h>
 #include <asm/system.h>
 #include <asm/fpu.h>
@@ -176,6 +176,46 @@ struct rt_sigframe32 {
 /*
  * sigcontext handlers
  */
+static int protected_save_fp_context32(struct sigcontext32 __user *sc)
+{
+       int err;
+       while (1) {
+               lock_fpu_owner();
+               own_fpu_inatomic(1);
+               err = save_fp_context32(sc); /* this might fail */
+               unlock_fpu_owner();
+               if (likely(!err))
+                       break;
+               /* touch the sigcontext and try again */
+               err = __put_user(0, &sc->sc_fpregs[0]) |
+                       __put_user(0, &sc->sc_fpregs[31]) |
+                       __put_user(0, &sc->sc_fpc_csr);
+               if (err)
+                       break;  /* really bad sigcontext */
+       }
+       return err;
+}
+
+static int protected_restore_fp_context32(struct sigcontext32 __user *sc)
+{
+       int err, tmp;
+       while (1) {
+               lock_fpu_owner();
+               own_fpu_inatomic(0);
+               err = restore_fp_context32(sc); /* this might fail */
+               unlock_fpu_owner();
+               if (likely(!err))
+                       break;
+               /* touch the sigcontext and try again */
+               err = __get_user(tmp, &sc->sc_fpregs[0]) |
+                       __get_user(tmp, &sc->sc_fpregs[31]) |
+                       __get_user(tmp, &sc->sc_fpc_csr);
+               if (err)
+                       break;  /* really bad sigcontext */
+       }
+       return err;
+}
+
 static int setup_sigcontext32(struct pt_regs *regs,
                              struct sigcontext32 __user *sc)
 {
@@ -209,10 +249,7 @@ static int setup_sigcontext32(struct pt_regs *regs,
                 * Save FPU state to signal context.  Signal handler
                 * will "inherit" current FPU state.
                 */
-               own_fpu(1);
-               enable_fp_in_kernel();
-               err |= save_fp_context32(sc);
-               disable_fp_in_kernel();
+               err |= protected_save_fp_context32(sc);
        }
        return err;
 }
@@ -225,7 +262,7 @@ check_and_restore_fp_context32(struct sigcontext32 __user *sc)
        err = sig = fpcsr_pending(&sc->sc_fpc_csr);
        if (err > 0)
                err = 0;
-       err |= restore_fp_context32(sc);
+       err |= protected_restore_fp_context32(sc);
        return err ?: sig;
 }
 
@@ -261,11 +298,8 @@ static int restore_sigcontext32(struct pt_regs *regs,
 
        if (used_math) {
                /* restore fpu context if we have used it before */
-               own_fpu(0);
-               enable_fp_in_kernel();
                if (!err)
                        err = check_and_restore_fp_context32(sc);
-               disable_fp_in_kernel();
        } else {
                /* signal handler may have used FPU.  Give it up. */
                lose_fpu(0);
index 7d76a85..493cb29 100644 (file)
@@ -650,7 +650,7 @@ asmlinkage void do_bp(struct pt_regs *regs)
        unsigned int opcode, bcode;
        siginfo_t info;
 
-       if (get_user(opcode, (unsigned int __user *) exception_epc(regs)))
+       if (__get_user(opcode, (unsigned int __user *) exception_epc(regs)))
                goto out_sigsegv;
 
        /*
@@ -700,7 +700,7 @@ asmlinkage void do_tr(struct pt_regs *regs)
        unsigned int opcode, tcode = 0;
        siginfo_t info;
 
-       if (get_user(opcode, (unsigned int __user *) exception_epc(regs)))
+       if (__get_user(opcode, (unsigned int __user *) exception_epc(regs)))
                goto out_sigsegv;
 
        /* Immediate versions don't provide a code.  */
@@ -757,11 +757,12 @@ asmlinkage void do_cpu(struct pt_regs *regs)
 {
        unsigned int cpid;
 
+       die_if_kernel("do_cpu invoked from kernel context!", regs);
+
        cpid = (regs->cp0_cause >> CAUSEB_CE) & 3;
 
        switch (cpid) {
        case 0:
-               die_if_kernel("do_cpu invoked from kernel context!", regs);
                if (!cpu_has_llsc)
                        if (!simulate_llsc(regs))
                                return;
@@ -772,9 +773,6 @@ asmlinkage void do_cpu(struct pt_regs *regs)
                break;
 
        case 1:
-               if (!test_thread_flag(TIF_ALLOW_FP_IN_KERNEL))
-                       die_if_kernel("do_cpu invoked from kernel context!",
-                                     regs);
                if (used_math())        /* Using the FPU again.  */
                        own_fpu(1);
                else {                  /* First time FPU user.  */
@@ -782,19 +780,7 @@ asmlinkage void do_cpu(struct pt_regs *regs)
                        set_used_math();
                }
 
-               if (raw_cpu_has_fpu) {
-                       if (test_thread_flag(TIF_ALLOW_FP_IN_KERNEL)) {
-                               local_irq_disable();
-                               if (cpu_has_fpu)
-                                       regs->cp0_status |= ST0_CU1;
-                               /*
-                                * We must return without enabling
-                                * interrupts to ensure keep FPU
-                                * ownership until resume.
-                                */
-                               return;
-                       }
-               } else {
+               if (!raw_cpu_has_fpu) {
                        int sig;
                        sig = fpu_emulator_cop1Handler(regs,
                                                &current->thread.fpu, 0);
@@ -836,7 +822,6 @@ asmlinkage void do_cpu(struct pt_regs *regs)
 
        case 2:
        case 3:
-               die_if_kernel("do_cpu invoked from kernel context!", regs);
                break;
        }
 
index f653946..548dbe5 100644 (file)
 
 void mips_display_message(const char *str)
 {
-       static volatile unsigned int *display = NULL;
+       static unsigned int __iomem *display = NULL;
        int i;
 
        if (unlikely(display == NULL))
-               display = (volatile unsigned int *)ioremap(ASCII_DISPLAY_POS_BASE, 16*sizeof(int));
+               display = ioremap(ASCII_DISPLAY_POS_BASE, 16*sizeof(int));
 
        for (i = 0; i <= 14; i=i+2) {
                 if (*str)
-                        display[i] = *str++;
+                        writel(*str++, display + i);
                 else
-                        display[i] = ' ';
+                        writel(' ', display + i);
        }
 }
index 3192a14..f98d60f 100644 (file)
@@ -65,7 +65,7 @@ static struct resource msc_io_resource = {
 };
 
 extern struct pci_ops bonito64_pci_ops;
-extern struct pci_ops gt64120_pci_ops;
+extern struct pci_ops gt64xxx_pci0_ops;
 extern struct pci_ops msc_pci_ops;
 
 static struct pci_controller bonito64_controller = {
@@ -76,7 +76,7 @@ static struct pci_controller bonito64_controller = {
 };
 
 static struct pci_controller gt64120_controller = {
-       .pci_ops        = &gt64120_pci_ops,
+       .pci_ops        = &gt64xxx_pci0_ops,
        .io_resource    = &gt64120_io_resource,
        .mem_resource   = &gt64120_mem_resource,
 };
index 0996ba3..7a1bb51 100644 (file)
@@ -39,24 +39,24 @@ static void atlas_machine_power_off(void);
 
 static void mips_machine_restart(char *command)
 {
-        volatile unsigned int *softres_reg = (unsigned int *)ioremap (SOFTRES_REG, sizeof(unsigned int));
+       unsigned int __iomem *softres_reg = ioremap(SOFTRES_REG, sizeof(unsigned int));
 
-       *softres_reg = GORESET;
+       writew(GORESET, softres_reg);
 }
 
 static void mips_machine_halt(void)
 {
-        volatile unsigned int *softres_reg = (unsigned int *)ioremap (SOFTRES_REG, sizeof(unsigned int));
+        unsigned int __iomem *softres_reg = ioremap(SOFTRES_REG, sizeof(unsigned int));
 
-       *softres_reg = GORESET;
+       writew(GORESET, softres_reg);
 }
 
 #if defined(CONFIG_MIPS_ATLAS)
 static void atlas_machine_power_off(void)
 {
-        volatile unsigned int *psustby_reg = (unsigned int *)ioremap(ATLAS_PSUSTBY_REG, sizeof(unsigned int));
+       unsigned int __iomem *psustby_reg = ioremap(ATLAS_PSUSTBY_REG, sizeof(unsigned int));
 
-       *psustby_reg = ATLAS_GOSTBY;
+       writew(ATLAS_GOSTBY, psustby_reg);
 }
 #endif
 
index 3c206bb..83d7602 100644 (file)
@@ -42,8 +42,6 @@
 #include <asm/mips-boards/msc01_pci.h>
 #include <asm/msc01_ic.h>
 
-extern void mips_timer_interrupt(void);
-
 static DEFINE_SPINLOCK(mips_irq_lock);
 
 static inline int mips_pcibios_iack(void)
@@ -85,7 +83,7 @@ static inline int mips_pcibios_iack(void)
                dummy = BONITO_PCIMAP_CFG;
                iob();    /* sync */
 
-               irq = *(volatile u32 *)(_pcictrl_bonito_pcicfg);
+               irq = readl((u32 *)_pcictrl_bonito_pcicfg);
                iob();    /* sync */
                irq &= 0xff;
                BONITO_PCIMAP_CFG = 0;
index 56ea766..7873932 100644 (file)
@@ -145,7 +145,8 @@ void __init plat_mem_setup(void)
 #ifdef CONFIG_BLK_DEV_IDE
        /* Check PCI clock */
        {
-               int jmpr = (*((volatile unsigned int *)ioremap(MALTA_JMPRS_REG, sizeof(unsigned int))) >> 2) & 0x07;
+               unsigned int __iomem *jmpr_p = (unsigned int *) ioremap(MALTA_JMPRS_REG, sizeof(unsigned int));
+               int jmpr = (readw(jmpr_p) >> 2) & 0x07;
                static const int pciclocks[] __initdata = {
                        33, 20, 25, 30, 12, 16, 37, 10
                };
@@ -179,7 +180,6 @@ void __init plat_mem_setup(void)
        };
 #endif
 #endif
-
        mips_reboot_setup();
 
        board_time_init = mips_time_init;
index 4e8f1b6..abf99b1 100644 (file)
@@ -96,7 +96,7 @@ void __flush_anon_page(struct page *page, unsigned long vmaddr)
 
                kaddr = kmap_coherent(page, vmaddr);
                flush_data_cache_page((unsigned long)kaddr);
-               kunmap_coherent(kaddr);
+               kunmap_coherent();
        }
 }
 
index e9951c0..2d1c2c0 100644 (file)
@@ -177,7 +177,7 @@ void *kmap_coherent(struct page *page, unsigned long addr)
 
 #define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1)))
 
-void kunmap_coherent(struct page *page)
+void kunmap_coherent(void)
 {
 #ifndef CONFIG_MIPS_MT_SMTC
        unsigned int wired;
@@ -210,7 +210,7 @@ void copy_user_highpage(struct page *to, struct page *from,
        if (cpu_has_dc_aliases) {
                vfrom = kmap_coherent(from, vaddr);
                copy_page(vto, vfrom);
-               kunmap_coherent(from);
+               kunmap_coherent();
        } else {
                vfrom = kmap_atomic(from, KM_USER0);
                copy_page(vto, vfrom);
@@ -233,7 +233,7 @@ void copy_to_user_page(struct vm_area_struct *vma,
        if (cpu_has_dc_aliases) {
                void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
                memcpy(vto, src, len);
-               kunmap_coherent(page);
+               kunmap_coherent();
        } else
                memcpy(dst, src, len);
        if ((vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc)
@@ -250,7 +250,7 @@ void copy_from_user_page(struct vm_area_struct *vma,
                void *vfrom =
                        kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
                memcpy(dst, vfrom, len);
-               kunmap_coherent(page);
+               kunmap_coherent();
        } else
                memcpy(dst, src, len);
 }
@@ -351,18 +351,15 @@ void __init paging_init(void)
 #endif
        kmap_coherent_init();
 
-#ifdef CONFIG_ISA
-       if (max_low_pfn >= MAX_DMA_PFN)
-               if (min_low_pfn >= MAX_DMA_PFN) {
-                       zones_size[ZONE_DMA] = 0;
-                       zones_size[ZONE_NORMAL] = max_low_pfn - min_low_pfn;
-               } else {
-                       zones_size[ZONE_DMA] = MAX_DMA_PFN - min_low_pfn;
-                       zones_size[ZONE_NORMAL] = max_low_pfn - MAX_DMA_PFN;
-               }
+#ifdef CONFIG_ZONE_DMA
+       if (min_low_pfn < MAX_DMA_PFN && MAX_DMA_PFN <= max_low_pfn) {
+               zones_size[ZONE_DMA] = MAX_DMA_PFN - min_low_pfn;
+               zones_size[ZONE_NORMAL] = max_low_pfn - MAX_DMA_PFN;
+       } else if (max_low_pfn < MAX_DMA_PFN)
+               zones_size[ZONE_DMA] = max_low_pfn - min_low_pfn;
        else
 #endif
-       zones_size[ZONE_DMA] = max_low_pfn - min_low_pfn;
+       zones_size[ZONE_NORMAL] = max_low_pfn - min_low_pfn;
 
 #ifdef CONFIG_HIGHMEM
        zones_size[ZONE_HIGHMEM] = highend_pfn - highstart_pfn;
index 69a8bcf..4f94fa2 100644 (file)
@@ -35,7 +35,7 @@
 #define vpe_id()       smp_processor_id()
 #else
 #define WHAT           0
-#define vpe_id()       smp_processor_id()
+#define vpe_id()       0
 #endif
 
 #define __define_perf_accessors(r, n, np)                              \
index bf85995..df487c0 100644 (file)
@@ -8,8 +8,7 @@ obj-y                           += pci.o pci-dac.o
 # PCI bus host bridge specific code
 #
 obj-$(CONFIG_MIPS_BONITO64)    += ops-bonito64.o
-obj-$(CONFIG_MIPS_GT64111)     += ops-gt64111.o
-obj-$(CONFIG_MIPS_GT64120)     += ops-gt64120.o
+obj-$(CONFIG_PCI_GT64XXX_PCI0) += ops-gt64xxx_pci0.o
 obj-$(CONFIG_PCI_MARVELL)      += ops-marvell.o
 obj-$(CONFIG_MIPS_MSC)         += ops-msc.o
 obj-$(CONFIG_MIPS_NILE4)       += ops-nile4.o
index 6e72d21..73d1850 100644 (file)
@@ -29,7 +29,6 @@
  */
 #include <linux/types.h>
 #include <linux/pci.h>
-#include <linux/kernel.h>
 #include <linux/init.h>
 
 #include <asm/jmr3927/jmr3927.h>
@@ -81,14 +80,8 @@ int __init pcibios_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
 
        /* Check OnBoard Ethernet (IDSEL=A24, DevNu=13) */
        if (dev->bus->parent == NULL &&
-           slot == TX3927_PCIC_IDSEL_AD_TO_SLOT(24)) {
-               extern int jmr3927_ether1_irq;
-               /* check this irq line was reserved for ether1 */
-               if (jmr3927_ether1_irq != JMR3927_IRQ_ETHER0)
-                       irq = JMR3927_IRQ_ETHER0;
-               else
-                       irq = 0;        /* disable */
-       }
+           slot == TX3927_PCIC_IDSEL_AD_TO_SLOT(24))
+               irq = JMR3927_IRQ_ETHER0;
        return irq;
 }
 
diff --git a/arch/mips/pci/ops-gt64111.c b/arch/mips/pci/ops-gt64111.c
deleted file mode 100644 (file)
index ecd3991..0000000
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1995, 1996, 1997, 2002 by Ralf Baechle
- * Copyright (C) 2001, 2002, 2003 by Liam Davies (ldavies@agile.tv)
- */
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-
-#include <asm/pci.h>
-#include <asm/io.h>
-#include <asm/gt64120.h>
-
-#include <asm/mach-cobalt/cobalt.h>
-
-/*
- * Device 31 on the GT64111 is used to generate PCI special
- * cycles, so we shouldn't expected to find a device there ...
- */
-static inline int pci_range_ck(struct pci_bus *bus, unsigned int devfn)
-{
-       if (bus->number == 0 && PCI_SLOT(devfn) < 31)
-               return 0;
-
-       return -1;
-}
-
-static int gt64111_pci_read_config(struct pci_bus *bus, unsigned int devfn,
-       int where, int size, u32 * val)
-{
-       if (pci_range_ck(bus, devfn))
-               return PCIBIOS_DEVICE_NOT_FOUND;
-
-       switch (size) {
-       case 4:
-               PCI_CFG_SET(devfn, where);
-               *val = GT_READ(GT_PCI0_CFGDATA_OFS);
-               return PCIBIOS_SUCCESSFUL;
-
-       case 2:
-               PCI_CFG_SET(devfn, (where & ~0x3));
-               *val = GT_READ(GT_PCI0_CFGDATA_OFS)
-                   >> ((where & 3) * 8);
-               return PCIBIOS_SUCCESSFUL;
-
-       case 1:
-               PCI_CFG_SET(devfn, (where & ~0x3));
-               *val = GT_READ(GT_PCI0_CFGDATA_OFS)
-                   >> ((where & 3) * 8);
-               return PCIBIOS_SUCCESSFUL;
-       }
-
-       return PCIBIOS_BAD_REGISTER_NUMBER;
-}
-
-static int gt64111_pci_write_config(struct pci_bus *bus, unsigned int devfn,
-       int where, int size, u32 val)
-{
-       u32 tmp;
-
-       if (pci_range_ck(bus, devfn))
-               return PCIBIOS_DEVICE_NOT_FOUND;
-
-       switch (size) {
-       case 4:
-               PCI_CFG_SET(devfn, where);
-               GT_WRITE(GT_PCI0_CFGDATA_OFS, val);
-
-               return PCIBIOS_SUCCESSFUL;
-
-       case 2:
-               PCI_CFG_SET(devfn, (where & ~0x3));
-               tmp = GT_READ(GT_PCI0_CFGDATA_OFS);
-               tmp &= ~(0xffff << ((where & 0x3) * 8));
-               tmp |= (val << ((where & 0x3) * 8));
-               GT_WRITE(GT_PCI0_CFGDATA_OFS, tmp);
-
-               return PCIBIOS_SUCCESSFUL;
-
-       case 1:
-               PCI_CFG_SET(devfn, (where & ~0x3));
-               tmp = GT_READ(GT_PCI0_CFGDATA_OFS);
-               tmp &= ~(0xff << ((where & 0x3) * 8));
-               tmp |= (val << ((where & 0x3) * 8));
-               GT_WRITE(GT_PCI0_CFGDATA_OFS, tmp);
-
-               return PCIBIOS_SUCCESSFUL;
-       }
-
-       return PCIBIOS_BAD_REGISTER_NUMBER;
-}
-
-struct pci_ops gt64111_pci_ops = {
-       .read = gt64111_pci_read_config,
-       .write = gt64111_pci_write_config,
-};
similarity index 80%
rename from arch/mips/pci/ops-gt64120.c
rename to arch/mips/pci/ops-gt64xxx_pci0.c
index 6335844..3d896c5 100644 (file)
@@ -39,8 +39,8 @@
 #define PCI_CFG_TYPE1_DEV_SHF           11
 #define PCI_CFG_TYPE1_BUS_SHF           16
 
-static int gt64120_pcibios_config_access(unsigned char access_type,
-       struct pci_bus *bus, unsigned int devfn, int where, u32 * data)
+static int gt64xxx_pci0_pcibios_config_access(unsigned char access_type,
+               struct pci_bus *bus, unsigned int devfn, int where, u32 * data)
 {
        unsigned char busnum = bus->number;
        u32 intr;
@@ -100,13 +100,13 @@ static int gt64120_pcibios_config_access(unsigned char access_type,
  * We can't address 8 and 16 bit words directly.  Instead we have to
  * read/write a 32bit word and mask/modify the data we actually want.
  */
-static int gt64120_pcibios_read(struct pci_bus *bus, unsigned int devfn,
-                                int where, int size, u32 * val)
+static int gt64xxx_pci0_pcibios_read(struct pci_bus *bus, unsigned int devfn,
+               int where, int size, u32 * val)
 {
        u32 data = 0;
 
-       if (gt64120_pcibios_config_access(PCI_ACCESS_READ, bus, devfn, where,
-                                         &data))
+       if (gt64xxx_pci0_pcibios_config_access(PCI_ACCESS_READ, bus, devfn,
+                                              where, &data))
                return PCIBIOS_DEVICE_NOT_FOUND;
 
        if (size == 1)
@@ -119,16 +119,16 @@ static int gt64120_pcibios_read(struct pci_bus *bus, unsigned int devfn,
        return PCIBIOS_SUCCESSFUL;
 }
 
-static int gt64120_pcibios_write(struct pci_bus *bus, unsigned int devfn,
-                             int where, int size, u32 val)
+static int gt64xxx_pci0_pcibios_write(struct pci_bus *bus, unsigned int devfn,
+               int where, int size, u32 val)
 {
        u32 data = 0;
 
        if (size == 4)
                data = val;
        else {
-               if (gt64120_pcibios_config_access(PCI_ACCESS_READ, bus, devfn,
-                                                 where, &data))
+               if (gt64xxx_pci0_pcibios_config_access(PCI_ACCESS_READ, bus,
+                                                      devfn, where, &data))
                        return PCIBIOS_DEVICE_NOT_FOUND;
 
                if (size == 1)
@@ -139,14 +139,14 @@ static int gt64120_pcibios_write(struct pci_bus *bus, unsigned int devfn,
                                (val << ((where & 3) << 3));
        }
 
-       if (gt64120_pcibios_config_access(PCI_ACCESS_WRITE, bus, devfn, where,
-                                      &data))
+       if (gt64xxx_pci0_pcibios_config_access(PCI_ACCESS_WRITE, bus, devfn,
+                                              where, &data))
                return PCIBIOS_DEVICE_NOT_FOUND;
 
        return PCIBIOS_SUCCESSFUL;
 }
 
-struct pci_ops gt64120_pci_ops = {
-       .read = gt64120_pcibios_read,
-       .write = gt64120_pcibios_write
+struct pci_ops gt64xxx_pci0_ops = {
+       .read   = gt64xxx_pci0_pcibios_read,
+       .write  = gt64xxx_pci0_pcibios_write
 };
index 42530a0..aa698bd 100644 (file)
@@ -40,7 +40,6 @@
 
 #include <asm/addrspace.h>
 #include <asm/jmr3927/jmr3927.h>
-#include <asm/debug.h>
 
 static inline int mkaddr(unsigned char bus, unsigned char dev_fn,
        unsigned char where)
@@ -130,234 +129,3 @@ struct pci_ops jmr3927_pci_ops = {
        jmr3927_pci_read_config,
        jmr3927_pci_write_config,
 };
-
-
-#ifndef JMR3927_INIT_INDIRECT_PCI
-
-inline unsigned long tc_readl(volatile __u32 * addr)
-{
-       return readl(addr);
-}
-
-inline void tc_writel(unsigned long data, volatile __u32 * addr)
-{
-       writel(data, addr);
-}
-#else
-
-unsigned long tc_readl(volatile __u32 * addr)
-{
-       unsigned long val;
-
-       *(volatile u32 *) (unsigned long) & tx3927_pcicptr->ipciaddr =
-           (unsigned long) CPHYSADDR(addr);
-       *(volatile u32 *) (unsigned long) & tx3927_pcicptr->ipcibe =
-           (PCI_IPCIBE_ICMD_MEMREAD << PCI_IPCIBE_ICMD_SHIFT) |
-           PCI_IPCIBE_IBE_LONG;
-       while (!(tx3927_pcicptr->istat & PCI_ISTAT_IDICC));
-       val =
-           le32_to_cpu(*(volatile u32 *) (unsigned long) & tx3927_pcicptr->
-                       ipcidata);
-       /* clear by setting */
-       tx3927_pcicptr->istat |= PCI_ISTAT_IDICC;
-       return val;
-}
-
-void tc_writel(unsigned long data, volatile __u32 * addr)
-{
-       *(volatile u32 *) (unsigned long) & tx3927_pcicptr->ipcidata =
-           cpu_to_le32(data);
-       *(volatile u32 *) (unsigned long) & tx3927_pcicptr->ipciaddr =
-           (unsigned long) CPHYSADDR(addr);
-       *(volatile u32 *) (unsigned long) & tx3927_pcicptr->ipcibe =
-           (PCI_IPCIBE_ICMD_MEMWRITE << PCI_IPCIBE_ICMD_SHIFT) |
-           PCI_IPCIBE_IBE_LONG;
-       while (!(tx3927_pcicptr->istat & PCI_ISTAT_IDICC));
-       /* clear by setting */
-       tx3927_pcicptr->istat |= PCI_ISTAT_IDICC;
-}
-
-unsigned char tx_ioinb(unsigned char *addr)
-{
-       unsigned long val;
-       __u32 ioaddr;
-       int offset;
-       int byte;
-
-       ioaddr = (unsigned long) addr;
-       offset = ioaddr & 0x3;
-       byte = 0xf & ~(8 >> offset);
-
-       *(volatile u32 *) (unsigned long) & tx3927_pcicptr->ipciaddr =
-           (unsigned long) ioaddr;
-       *(volatile u32 *) (unsigned long) & tx3927_pcicptr->ipcibe =
-           (PCI_IPCIBE_ICMD_IOREAD << PCI_IPCIBE_ICMD_SHIFT) | byte;
-       while (!(tx3927_pcicptr->istat & PCI_ISTAT_IDICC));
-       val =
-           le32_to_cpu(*(volatile u32 *) (unsigned long) & tx3927_pcicptr->
-                       ipcidata);
-       val = val & 0xff;
-       /* clear by setting */
-       tx3927_pcicptr->istat |= PCI_ISTAT_IDICC;
-       return val;
-}
-
-void tx_iooutb(unsigned long data, unsigned char *addr)
-{
-       __u32 ioaddr;
-       int offset;
-       int byte;
-
-       data = data | (data << 8) | (data << 16) | (data << 24);
-       ioaddr = (unsigned long) addr;
-       offset = ioaddr & 0x3;
-       byte = 0xf & ~(8 >> offset);
-
-       *(volatile u32 *) (unsigned long) & tx3927_pcicptr->ipcidata = data;
-       *(volatile u32 *) (unsigned long) & tx3927_pcicptr->ipciaddr =
-           (unsigned long) ioaddr;
-       *(volatile u32 *) (unsigned long) & tx3927_pcicptr->ipcibe =
-           (PCI_IPCIBE_ICMD_IOWRITE << PCI_IPCIBE_ICMD_SHIFT) | byte;
-       while (!(tx3927_pcicptr->istat & PCI_ISTAT_IDICC));
-       /* clear by setting */
-       tx3927_pcicptr->istat |= PCI_ISTAT_IDICC;
-}
-
-unsigned short tx_ioinw(unsigned short *addr)
-{
-       unsigned long val;
-       __u32 ioaddr;
-       int offset;
-       int byte;
-
-       ioaddr = (unsigned long) addr;
-       offset = ioaddr & 0x2;
-       byte = 3 << offset;
-
-       *(volatile u32 *) (unsigned long) & tx3927_pcicptr->ipciaddr =
-           (unsigned long) ioaddr;
-       *(volatile u32 *) (unsigned long) & tx3927_pcicptr->ipcibe =
-           (PCI_IPCIBE_ICMD_IOREAD << PCI_IPCIBE_ICMD_SHIFT) | byte;
-       while (!(tx3927_pcicptr->istat & PCI_ISTAT_IDICC));
-       val =
-           le32_to_cpu(*(volatile u32 *) (unsigned long) & tx3927_pcicptr->
-                       ipcidata);
-       val = val & 0xffff;
-       /* clear by setting */
-       tx3927_pcicptr->istat |= PCI_ISTAT_IDICC;
-       return val;
-
-}
-
-void tx_iooutw(unsigned long data, unsigned short *addr)
-{
-       __u32 ioaddr;
-       int offset;
-       int byte;
-
-       data = data | (data << 16);
-       ioaddr = (unsigned long) addr;
-       offset = ioaddr & 0x2;
-       byte = 3 << offset;
-
-       *(volatile u32 *) (unsigned long) & tx3927_pcicptr->ipcidata = data;
-       *(volatile u32 *) (unsigned long) & tx3927_pcicptr->ipciaddr =
-           (unsigned long) ioaddr;
-       *(volatile u32 *) (unsigned long) & tx3927_pcicptr->ipcibe =
-           (PCI_IPCIBE_ICMD_IOWRITE << PCI_IPCIBE_ICMD_SHIFT) | byte;
-       while (!(tx3927_pcicptr->istat & PCI_ISTAT_IDICC));
-       /* clear by setting */
-       tx3927_pcicptr->istat |= PCI_ISTAT_IDICC;
-}
-
-unsigned long tx_ioinl(unsigned int *addr)
-{
-       unsigned long val;
-       __u32 ioaddr;
-
-       ioaddr = (unsigned long) addr;
-       *(volatile u32 *) (unsigned long) & tx3927_pcicptr->ipciaddr =
-           (unsigned long) ioaddr;
-       *(volatile u32 *) (unsigned long) & tx3927_pcicptr->ipcibe =
-           (PCI_IPCIBE_ICMD_IOREAD << PCI_IPCIBE_ICMD_SHIFT) |
-           PCI_IPCIBE_IBE_LONG;
-       while (!(tx3927_pcicptr->istat & PCI_ISTAT_IDICC));
-       val =
-           le32_to_cpu(*(volatile u32 *) (unsigned long) & tx3927_pcicptr->
-                       ipcidata);
-       /* clear by setting */
-       tx3927_pcicptr->istat |= PCI_ISTAT_IDICC;
-       return val;
-}
-
-void tx_iooutl(unsigned long data, unsigned int *addr)
-{
-       __u32 ioaddr;
-
-       ioaddr = (unsigned long) addr;
-       *(volatile u32 *) (unsigned long) & tx3927_pcicptr->ipcidata =
-           cpu_to_le32(data);
-       *(volatile u32 *) (unsigned long) & tx3927_pcicptr->ipciaddr =
-           (unsigned long) ioaddr;
-       *(volatile u32 *) (unsigned long) & tx3927_pcicptr->ipcibe =
-           (PCI_IPCIBE_ICMD_IOWRITE << PCI_IPCIBE_ICMD_SHIFT) |
-           PCI_IPCIBE_IBE_LONG;
-       while (!(tx3927_pcicptr->istat & PCI_ISTAT_IDICC));
-       /* clear by setting */
-       tx3927_pcicptr->istat |= PCI_ISTAT_IDICC;
-}
-
-void tx_insbyte(unsigned char *addr, void *buffer, unsigned int count)
-{
-       unsigned char *ptr = (unsigned char *) buffer;
-
-       while (count--) {
-               *ptr++ = tx_ioinb(addr);
-       }
-}
-
-void tx_insword(unsigned short *addr, void *buffer, unsigned int count)
-{
-       unsigned short *ptr = (unsigned short *) buffer;
-
-       while (count--) {
-               *ptr++ = tx_ioinw(addr);
-       }
-}
-
-void tx_inslong(unsigned int *addr, void *buffer, unsigned int count)
-{
-       unsigned long *ptr = (unsigned long *) buffer;
-
-       while (count--) {
-               *ptr++ = tx_ioinl(addr);
-       }
-}
-
-void tx_outsbyte(unsigned char *addr, void *buffer, unsigned int count)
-{
-       unsigned char *ptr = (unsigned char *) buffer;
-
-       while (count--) {
-               tx_iooutb(*ptr++, addr);
-       }
-}
-
-void tx_outsword(unsigned short *addr, void *buffer, unsigned int count)
-{
-       unsigned short *ptr = (unsigned short *) buffer;
-
-       while (count--) {
-               tx_iooutw(*ptr++, addr);
-       }
-}
-
-void tx_outslong(unsigned int *addr, void *buffer, unsigned int count)
-{
-       unsigned long *ptr = (unsigned long *) buffer;
-
-       while (count--) {
-               tx_iooutl(*ptr++, addr);
-       }
-}
-#endif
index 88fb191..985784a 100644 (file)
@@ -12,7 +12,7 @@
 #include <asm/bootinfo.h>
 
 extern struct pci_ops nile4_pci_ops;
-extern struct pci_ops gt64120_pci_ops;
+extern struct pci_ops gt64xxx_pci0_ops;
 static struct resource lasat_pci_mem_resource = {
        .name   = "LASAT PCI MEM",
        .start  = 0x18000000,
@@ -38,7 +38,7 @@ static int __init lasat_pci_setup(void)
 
        switch (mips_machtype) {
        case MACH_LASAT_100:
-                lasat_pci_controller.pci_ops = &gt64120_pci_ops;
+                lasat_pci_controller.pci_ops = &gt64xxx_pci0_ops;
                 break;
        case MACH_LASAT_200:
                 lasat_pci_controller.pci_ops = &nile4_pci_ops;
index 2b9495d..7f94f26 100644 (file)
@@ -81,7 +81,7 @@ static struct resource ocelot_io_resource = {
 };
 
 static struct pci_controller ocelot_pci_controller = {
-       .pci_ops        = gt64120_pci_ops;
+       .pci_ops        = gt64xxx_pci0_ops;
        .mem_resource   = &ocelot_mem_resource;
        .io_resource    = &ocelot_io_resource;
 };
index de7cfc5..8108231 100644 (file)
@@ -77,6 +77,13 @@ pcibios_align_resource(void *data, struct resource *res,
 
 void __init register_pci_controller(struct pci_controller *hose)
 {
+       if (request_resource(&iomem_resource, hose->mem_resource) < 0)
+               goto out;
+       if (request_resource(&ioport_resource, hose->io_resource) < 0) {
+               release_resource(hose->mem_resource);
+               goto out;
+       }
+
        *hose_tail = hose;
        hose_tail = &hose->next;
 
@@ -87,6 +94,11 @@ void __init register_pci_controller(struct pci_controller *hose)
                printk(KERN_WARNING
                       "registering PCI controller with io_map_base unset\n");
        }
+       return;
+
+out:
+       printk(KERN_WARNING
+              "Skipping PCI bus scan due to resource conflict\n");
 }
 
 /* Most MIPS systems have straight-forward swizzling needs.  */
@@ -121,11 +133,6 @@ static int __init pcibios_init(void)
        /* Scan all of the recorded PCI controllers.  */
        for (next_busno = 0, hose = hose_head; hose; hose = hose->next) {
 
-               if (request_resource(&iomem_resource, hose->mem_resource) < 0)
-                       goto out;
-               if (request_resource(&ioport_resource, hose->io_resource) < 0)
-                       goto out_free_mem_resource;
-
                if (!hose->iommu)
                        PCI_DMA_BUS_IS_PHYS = 1;
 
@@ -144,14 +151,6 @@ static int __init pcibios_init(void)
                                need_domain_info = 1;
                        }
                }
-               continue;
-
-out_free_mem_resource:
-               release_resource(hose->mem_resource);
-
-out:
-               printk(KERN_WARNING
-                      "Skipping PCI bus scan due to resource conflict\n");
        }
 
        if (!pci_probe_only)
index fd29fd4..e19d60d 100644 (file)
@@ -52,8 +52,7 @@
  * national semiconductor nv ram chip the op code is 3 bits and
  * the address is 6/8 bits.
  */
-static inline void eeprom_cmd(volatile unsigned int *ctrl, unsigned cmd,
-                             unsigned reg)
+static inline void eeprom_cmd(unsigned int *ctrl, unsigned cmd, unsigned reg)
 {
        unsigned short ser_cmd;
        int i;
@@ -61,33 +60,34 @@ static inline void eeprom_cmd(volatile unsigned int *ctrl, unsigned cmd,
        ser_cmd = cmd | (reg << (16 - BITS_IN_COMMAND));
        for (i = 0; i < BITS_IN_COMMAND; i++) {
                if (ser_cmd & (1<<15))  /* if high order bit set */
-                       *ctrl |= EEPROM_DATO;
+                       writel(readl(ctrl) | EEPROM_DATO, ctrl);
                else
-                       *ctrl &= ~EEPROM_DATO;
-               *ctrl &= ~EEPROM_ECLK;
-               *ctrl |= EEPROM_ECLK;
+                       writel(readl(ctrl) & ~EEPROM_DATO, ctrl);
+               writel(readl(ctrl) & ~EEPROM_ECLK, ctrl);
+               writel(readl(ctrl) | EEPROM_ECLK, ctrl);
                ser_cmd <<= 1;
        }
-       *ctrl &= ~EEPROM_DATO;  /* see data sheet timing diagram */
+       /* see data sheet timing diagram */
+       writel(readl(ctrl) & ~EEPROM_DATO, ctrl);
 }
 
-unsigned short ip22_eeprom_read(volatile unsigned int *ctrl, int reg)
+unsigned short ip22_eeprom_read(unsigned int *ctrl, int reg)
 {
        unsigned short res = 0;
        int i;
 
-       *ctrl &= ~EEPROM_EPROT;
+       writel(readl(ctrl) & ~EEPROM_EPROT, ctrl);
        eeprom_cs_on(ctrl);
        eeprom_cmd(ctrl, EEPROM_READ, reg);
 
        /* clock the data ouf of serial mem */
        for (i = 0; i < 16; i++) {
-               *ctrl &= ~EEPROM_ECLK;
+               writel(readl(ctrl) & ~EEPROM_ECLK, ctrl);
                delay();
-               *ctrl |= EEPROM_ECLK;
+               writel(readl(ctrl) | EEPROM_ECLK, ctrl);
                delay();
                res <<= 1;
-               if (*ctrl & EEPROM_DATI)
+               if (readl(ctrl) & EEPROM_DATI)
                        res |= 1;
        }
 
index 2055547..8e88a44 100644 (file)
@@ -94,7 +94,7 @@ static int indy_rtc_set_time(unsigned long tim)
 static unsigned long dosample(void)
 {
        u32 ct0, ct1;
-       volatile u8 msb, lsb;
+       u8 msb, lsb;
 
        /* Start the counter. */
        sgint->tcword = (SGINT_TCWORD_CNT2 | SGINT_TCWORD_CALL |
@@ -107,21 +107,21 @@ static unsigned long dosample(void)
 
        /* Latch and spin until top byte of counter2 is zero */
        do {
-               sgint->tcword = SGINT_TCWORD_CNT2 | SGINT_TCWORD_CLAT;
-               lsb = sgint->tcnt2;
-               msb = sgint->tcnt2;
+               writeb(SGINT_TCWORD_CNT2 | SGINT_TCWORD_CLAT, &sgint->tcword);
+               lsb = readb(&sgint->tcnt2);
+               msb = readb(&sgint->tcnt2);
                ct1 = read_c0_count();
        } while (msb);
 
        /* Stop the counter. */
-       sgint->tcword = (SGINT_TCWORD_CNT2 | SGINT_TCWORD_CALL |
-                        SGINT_TCWORD_MSWST);
+       writeb(sgint->tcword, (SGINT_TCWORD_CNT2 | SGINT_TCWORD_CALL |
+                              SGINT_TCWORD_MSWST));
        /*
         * Return the difference, this is how far the r4k counter increments
         * for every 1/HZ seconds. We round off the nearest 1 MHz of master
         * clock (= 1000000 / HZ / 2).
         */
-       /*return (ct1 - ct0 + (500000/HZ/2)) / (500000/HZ) * (500000/HZ);*/
+
        return (ct1 - ct0) / (500000/HZ) * (500000/HZ);
 }
 
index bdf24a7..e6b003e 100644 (file)
@@ -2,6 +2,7 @@ config SIBYTE_SB1250
        bool
        select HW_HAS_PCI
        select SIBYTE_ENABLE_LDT_IF_PCI
+       select SIBYTE_HAS_ZBUS_PROFILING
        select SIBYTE_SB1xxx_SOC
        select SYS_SUPPORTS_SMP
 
@@ -34,6 +35,7 @@ config SIBYTE_BCM112X
 config SIBYTE_BCM1x80
        bool
        select HW_HAS_PCI
+       select SIBYTE_HAS_ZBUS_PROFILING
        select SIBYTE_SB1xxx_SOC
        select SYS_SUPPORTS_SMP
 
diff --git a/arch/mips/sibyte/common/Makefile b/arch/mips/sibyte/common/Makefile
new file mode 100644 (file)
index 0000000..8a06a4f
--- /dev/null
@@ -0,0 +1,5 @@
+obj-y :=
+
+obj-$(CONFIG_SIBYTE_TBPROF)            += sb_tbprof.o
+
+EXTRA_AFLAGS := $(CFLAGS)
similarity index 80%
rename from arch/mips/sibyte/sb1250/bcm1250_tbprof.c
rename to arch/mips/sibyte/common/sb_tbprof.c
index ea0ca13..4fcdaa8 100644 (file)
 #include <linux/vmalloc.h>
 #include <linux/fs.h>
 #include <linux/errno.h>
-#include <linux/types.h>
 #include <linux/wait.h>
-
 #include <asm/io.h>
 #include <asm/sibyte/sb1250.h>
+
+#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
+#include <asm/sibyte/bcm1480_regs.h>
+#include <asm/sibyte/bcm1480_scd.h>
+#include <asm/sibyte/bcm1480_int.h>
+#elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X)
 #include <asm/sibyte/sb1250_regs.h>
 #include <asm/sibyte/sb1250_scd.h>
 #include <asm/sibyte/sb1250_int.h>
+#else
+#error invalid SiByte UART configuation
+#endif
+
+#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
+#undef K_INT_TRACE_FREEZE
+#define K_INT_TRACE_FREEZE K_BCM1480_INT_TRACE_FREEZE
+#undef K_INT_PERF_CNT
+#define K_INT_PERF_CNT K_BCM1480_INT_PERF_CNT
+#endif
+
 #include <asm/system.h>
 #include <asm/uaccess.h>
 
@@ -118,7 +133,7 @@ static struct sbprof_tb sbp;
                        : /* inputs */ \
                        : /* modifies */ "$8" )
 
-#define DEVNAME "bcm1250_tbprof"
+#define DEVNAME "sb_tbprof"
 
 #define TB_FULL (sbp.next_tb_sample == MAX_TB_SAMPLES)
 
@@ -132,6 +147,7 @@ static struct sbprof_tb sbp;
  * overflow.
  *
  * We map the interrupt for trace_buffer_freeze to handle it on CPU 0.
+ *
  */
 
 static u64 tb_period;
@@ -143,25 +159,36 @@ static void arm_tb(void)
        u64 tb_options = M_SCD_TRACE_CFG_FREEZE_FULL;
 
        /*
-        * Generate an SCD_PERFCNT interrupt in TB_PERIOD Zclks to trigger
-        *start of trace.  XXX vary sampling period
+        * Generate an SCD_PERFCNT interrupt in TB_PERIOD Zclks to
+        * trigger start of trace.  XXX vary sampling period
         */
        __raw_writeq(0, IOADDR(A_SCD_PERF_CNT_1));
        scdperfcnt = __raw_readq(IOADDR(A_SCD_PERF_CNT_CFG));
 
        /*
-        * Unfortunately, in Pass 2 we must clear all counters to knock down a
-        * previous interrupt request.  This means that bus profiling requires
-        * ALL of the SCD perf counters.
+        * Unfortunately, in Pass 2 we must clear all counters to knock down
+        * a previous interrupt request.  This means that bus profiling
+        * requires ALL of the SCD perf counters.
         */
+#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
+       __raw_writeq((scdperfcnt & ~M_SPC_CFG_SRC1) |
+                                               /* keep counters 0,2,3,4,5,6,7 as is */
+                    V_SPC_CFG_SRC1(1),         /* counter 1 counts cycles */
+                    IOADDR(A_BCM1480_SCD_PERF_CNT_CFG0));
+       __raw_writeq(
+                    M_SPC_CFG_ENABLE |         /* enable counting */
+                    M_SPC_CFG_CLEAR |          /* clear all counters */
+                    V_SPC_CFG_SRC1(1),         /* counter 1 counts cycles */
+                    IOADDR(A_BCM1480_SCD_PERF_CNT_CFG1));
+#else
        __raw_writeq((scdperfcnt & ~M_SPC_CFG_SRC1) |
                                                /* keep counters 0,2,3 as is */
                     M_SPC_CFG_ENABLE |         /* enable counting */
                     M_SPC_CFG_CLEAR |          /* clear all counters */
                     V_SPC_CFG_SRC1(1),         /* counter 1 counts cycles */
                     IOADDR(A_SCD_PERF_CNT_CFG));
+#endif
        __raw_writeq(next, IOADDR(A_SCD_PERF_CNT_1));
-
        /* Reset the trace buffer */
        __raw_writeq(M_SCD_TRACE_CFG_RESET, IOADDR(A_SCD_TRACE_CFG));
 #if 0 && defined(M_SCD_TRACE_CFG_FORCECNT)
@@ -190,38 +217,37 @@ static irqreturn_t sbprof_tb_intr(int irq, void *dev_id)
                        /* Subscripts decrease to put bundle in the order */
                        /*   t0 lo, t0 hi, t1 lo, t1 hi, t2 lo, t2 hi */
                        p[i - 1] = __raw_readq(IOADDR(A_SCD_TRACE_READ));
-                                                               /* read t2 hi */
+                       /* read t2 hi */
                        p[i - 2] = __raw_readq(IOADDR(A_SCD_TRACE_READ));
-                                                               /* read t2 lo */
+                       /* read t2 lo */
                        p[i - 3] = __raw_readq(IOADDR(A_SCD_TRACE_READ));
-                                                               /* read t1 hi */
+                       /* read t1 hi */
                        p[i - 4] = __raw_readq(IOADDR(A_SCD_TRACE_READ));
-                                                               /* read t1 lo */
+                       /* read t1 lo */
                        p[i - 5] = __raw_readq(IOADDR(A_SCD_TRACE_READ));
-                                                               /* read t0 hi */
+                       /* read t0 hi */
                        p[i - 6] = __raw_readq(IOADDR(A_SCD_TRACE_READ));
-                                                               /* read t0 lo */
+                       /* read t0 lo */
                }
                if (!sbp.tb_enable) {
                        pr_debug(DEVNAME ": tb_intr shutdown\n");
                        __raw_writeq(M_SCD_TRACE_CFG_RESET,
                                     IOADDR(A_SCD_TRACE_CFG));
                        sbp.tb_armed = 0;
-                       wake_up(&sbp.tb_sync);
+                       wake_up_interruptible(&sbp.tb_sync);
                } else {
-                       arm_tb();       /* knock down current interrupt and get another one later */
+                       /* knock down current interrupt and get another one later */
+                       arm_tb();
                }
        } else {
                /* No more trace buffer samples */
                pr_debug(DEVNAME ": tb_intr full\n");
                __raw_writeq(M_SCD_TRACE_CFG_RESET, IOADDR(A_SCD_TRACE_CFG));
                sbp.tb_armed = 0;
-               if (!sbp.tb_enable) {
-                       wake_up(&sbp.tb_sync);
-               }
-               wake_up(&sbp.tb_read);
+               if (!sbp.tb_enable)
+                       wake_up_interruptible(&sbp.tb_sync);
+               wake_up_interruptible(&sbp.tb_read);
        }
-
        return IRQ_HANDLED;
 }
 
@@ -250,8 +276,8 @@ static int sbprof_zbprof_start(struct file *filp)
        sbp.next_tb_sample = 0;
        filp->f_pos = 0;
 
-       err = request_irq(K_INT_TRACE_FREEZE, sbprof_tb_intr, 0,
-                       DEVNAME " trace freeze", &sbp);
+       err = request_irq (K_INT_TRACE_FREEZE, sbprof_tb_intr, 0,
+                          DEVNAME " trace freeze", &sbp);
        if (err)
                return -EBUSY;
 
@@ -263,23 +289,29 @@ static int sbprof_zbprof_start(struct file *filp)
                     IOADDR(A_SCD_PERF_CNT_CFG));
 
        /*
-        * We grab this interrupt to prevent others from trying to use it, even
-        * though we don't want to service the interrupts (they only feed into
-        * the trace-on-interrupt mechanism)
+        * We grab this interrupt to prevent others from trying to use
+         * it, even though we don't want to service the interrupts
+         * (they only feed into the trace-on-interrupt mechanism)
         */
-       err = request_irq(K_INT_PERF_CNT, sbprof_pc_intr, 0,
-                       DEVNAME " scd perfcnt", &sbp);
-       if (err)
-               goto out_free_irq;
+       if (request_irq(K_INT_PERF_CNT, sbprof_pc_intr, 0, DEVNAME " scd perfcnt", &sbp)) {
+               free_irq(K_INT_TRACE_FREEZE, &sbp);
+               return -EBUSY;
+       }
 
        /*
-        * I need the core to mask these, but the interrupt mapper to pass them
-        * through.  I am exploiting my knowledge that cp0_status masks out
-        * IP[5]. krw
+        * I need the core to mask these, but the interrupt mapper to
+        *  pass them through.  I am exploiting my knowledge that
+        *  cp0_status masks out IP[5]. krw
         */
+#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
+       __raw_writeq(K_BCM1480_INT_MAP_I3,
+                    IOADDR(A_BCM1480_IMR_REGISTER(0, R_BCM1480_IMR_INTERRUPT_MAP_BASE_L) +
+                           ((K_BCM1480_INT_PERF_CNT & 0x3f) << 3)));
+#else
        __raw_writeq(K_INT_MAP_I3,
                     IOADDR(A_IMR_REGISTER(0, R_IMR_INTERRUPT_MAP_BASE) +
                            (K_INT_PERF_CNT << 3)));
+#endif
 
        /* Initialize address traps */
        __raw_writeq(0, IOADDR(A_ADDR_TRAP_UP_0));
@@ -298,7 +330,7 @@ static int sbprof_zbprof_start(struct file *filp)
        __raw_writeq(0, IOADDR(A_ADDR_TRAP_CFG_3));
 
        /* Initialize Trace Event 0-7 */
-       /*                              when interrupt */
+       /*                              when interrupt  */
        __raw_writeq(M_SCD_TREVT_INTERRUPT, IOADDR(A_SCD_TRACE_EVENT_0));
        __raw_writeq(0, IOADDR(A_SCD_TRACE_EVENT_1));
        __raw_writeq(0, IOADDR(A_SCD_TRACE_EVENT_2));
@@ -324,24 +356,23 @@ static int sbprof_zbprof_start(struct file *filp)
        __raw_writeq(0, IOADDR(A_SCD_TRACE_SEQUENCE_7));
 
        /* Now indicate the PERF_CNT interrupt as a trace-relevant interrupt */
+#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
+       __raw_writeq(1ULL << (K_BCM1480_INT_PERF_CNT & 0x3f),
+                    IOADDR(A_BCM1480_IMR_REGISTER(0, R_BCM1480_IMR_INTERRUPT_TRACE_L)));
+#else
        __raw_writeq(1ULL << K_INT_PERF_CNT,
                     IOADDR(A_IMR_REGISTER(0, R_IMR_INTERRUPT_TRACE)));
-
+#endif
        arm_tb();
 
        pr_debug(DEVNAME ": done starting\n");
 
        return 0;
-
-out_free_irq:
-       free_irq(K_INT_TRACE_FREEZE, &sbp);
-
-       return err;
 }
 
 static int sbprof_zbprof_stop(void)
 {
-       int err;
+       int err = 0;
 
        pr_debug(DEVNAME ": stopping\n");
 
@@ -365,7 +396,7 @@ static int sbprof_zbprof_stop(void)
 
        pr_debug(DEVNAME ": done stopping\n");
 
-       return 0;
+       return err;
 }
 
 static int sbprof_tb_open(struct inode *inode, struct file *filp)
@@ -380,11 +411,9 @@ static int sbprof_tb_open(struct inode *inode, struct file *filp)
                return -EBUSY;
 
        memset(&sbp, 0, sizeof(struct sbprof_tb));
-
        sbp.sbprof_tbbuf = vmalloc(MAX_TBSAMPLE_BYTES);
        if (!sbp.sbprof_tbbuf)
                return -ENOMEM;
-
        memset(sbp.sbprof_tbbuf, 0, MAX_TBSAMPLE_BYTES);
        init_waitqueue_head(&sbp.tb_sync);
        init_waitqueue_head(&sbp.tb_read);
@@ -397,8 +426,9 @@ static int sbprof_tb_open(struct inode *inode, struct file *filp)
 
 static int sbprof_tb_release(struct inode *inode, struct file *filp)
 {
-       int minor = iminor(inode);
+       int minor;
 
+       minor = iminor(inode);
        if (minor != 0 || !sbp.open)
                return -ENODEV;
 
@@ -419,10 +449,10 @@ static ssize_t sbprof_tb_read(struct file *filp, char *buf,
                              size_t size, loff_t *offp)
 {
        int cur_sample, sample_off, cur_count, sample_left;
-       long  cur_off = *offp;
-       char *dest    =  buf;
-       int   count   =  0;
        char *src;
+       int   count   =  0;
+       char *dest    =  buf;
+       long  cur_off = *offp;
 
        if (!access_ok(VERIFY_WRITE, buf, size))
                return -EFAULT;
@@ -445,7 +475,6 @@ static ssize_t sbprof_tb_read(struct file *filp, char *buf,
                        mutex_unlock(&sbp.lock);
                        return err;
                }
-
                pr_debug(DEVNAME ": read from sample %d, %d bytes\n",
                         cur_sample, cur_count);
                size -= cur_count;
@@ -461,45 +490,46 @@ static ssize_t sbprof_tb_read(struct file *filp, char *buf,
                dest += cur_count;
                count += cur_count;
        }
-
        *offp = cur_off;
        mutex_unlock(&sbp.lock);
 
        return count;
 }
 
-static long sbprof_tb_ioctl(struct file *filp, unsigned int command,
-       unsigned long arg)
+static long sbprof_tb_ioctl(struct file *filp,
+                           unsigned int command,
+                           unsigned long arg)
 {
-       int error = 0;
+       int err = 0;
 
        switch (command) {
        case SBPROF_ZBSTART:
                mutex_lock(&sbp.lock);
-               error = sbprof_zbprof_start(filp);
+               err = sbprof_zbprof_start(filp);
                mutex_unlock(&sbp.lock);
                break;
 
        case SBPROF_ZBSTOP:
                mutex_lock(&sbp.lock);
-               error = sbprof_zbprof_stop();
+               err = sbprof_zbprof_stop();
                mutex_unlock(&sbp.lock);
                break;
 
-       case SBPROF_ZBWAITFULL:
-               error = wait_event_interruptible(sbp.tb_read, TB_FULL);
-               if (error)
+       case SBPROF_ZBWAITFULL: {
+               err = wait_event_interruptible(sbp.tb_read, TB_FULL);
+               if (err)
                        break;
 
-               error = put_user(TB_FULL, (int *) arg);
+               err = put_user(TB_FULL, (int *) arg);
                break;
+       }
 
        default:
-               error = -EINVAL;
+               err = -EINVAL;
                break;
        }
 
-       return error;
+       return err;
 }
 
 static const struct file_operations sbprof_tb_fops = {
@@ -544,8 +574,8 @@ static int __init sbprof_tb_init(void)
 
        sbp.open = 0;
        tb_period = zbbus_mhz * 10000LL;
-       pr_info(DEVNAME ": initialized - tb_period = %lld\n", tb_period);
-
+       pr_info(DEVNAME ": initialized - tb_period = %lld\n",
+               (long long) tb_period);
        return 0;
 
 out_class:
index 04c0f1a..df662c6 100644 (file)
@@ -1,6 +1,5 @@
 obj-y := setup.o irq.o time.o
 
 obj-$(CONFIG_SMP)                      += smp.o
-obj-$(CONFIG_SIBYTE_TBPROF)            += bcm1250_tbprof.o
 obj-$(CONFIG_SIBYTE_STANDALONE)                += prom.o
 obj-$(CONFIG_SIBYTE_BUS_WATCHER)       += bus_watcher.o
index 87188f0..f4a6169 100644 (file)
@@ -141,6 +141,18 @@ static int __init setup_bcm112x(void)
                periph_rev = 3;
                pass_str = "A2";
                break;
+       case K_SYS_REVISION_BCM112x_A3:
+               periph_rev = 3;
+               pass_str = "A3";
+               break;
+       case K_SYS_REVISION_BCM112x_A4:
+               periph_rev = 3;
+               pass_str = "A4";
+               break;
+       case K_SYS_REVISION_BCM112x_B0:
+               periph_rev = 3;
+               pass_str = "B0";
+               break;
        default:
                printk("Unknown %s rev %x\n", soc_str, soc_pass);
                ret = 1;
index 8e8593b..9ee208d 100644 (file)
@@ -91,7 +91,7 @@ static struct platform_device pcimt_serial8250_device = {
 };
 
 static struct resource sni_io_resource = {
-       .start  = 0x00001000UL,
+       .start  = 0x00000000UL,
        .end    = 0x03bfffffUL,
        .name   = "PCIMT IO MEM",
        .flags  = IORESOURCE_IO,
@@ -132,107 +132,19 @@ static struct resource pcimt_io_resources[] = {
 };
 
 static struct resource sni_mem_resource = {
-       .start  = 0x10000000UL,
-       .end    = 0xffffffffUL,
+       .start  = 0x18000000UL,
+       .end    = 0x1fbfffffUL,
        .name   = "PCIMT PCI MEM",
        .flags  = IORESOURCE_MEM
 };
 
-/*
- * The RM200/RM300 has a few holes in it's PCI/EISA memory address space used
- * for other purposes.  Be paranoid and allocate all of the before the PCI
- * code gets a chance to to map anything else there ...
- *
- * This leaves the following areas available:
- *
- * 0x10000000 - 0x1009ffff (640kB) PCI/EISA/ISA Bus Memory
- * 0x10100000 - 0x13ffffff ( 15MB) PCI/EISA/ISA Bus Memory
- * 0x18000000 - 0x1fbfffff (124MB) PCI/EISA Bus Memory
- * 0x1ff08000 - 0x1ffeffff (816kB) PCI/EISA Bus Memory
- * 0xa0000000 - 0xffffffff (1.5GB) PCI/EISA Bus Memory
- */
-static struct resource pcimt_mem_resources[] = {
-       {
-               .start  = 0x100a0000,
-               .end    = 0x100bffff,
-               .name   = "Video RAM area",
-               .flags  = IORESOURCE_BUSY
-       }, {
-               .start  = 0x100c0000,
-               .end    = 0x100fffff,
-               .name   = "ISA Reserved",
-               .flags  = IORESOURCE_BUSY
-       }, {
-               .start  = 0x14000000,
-               .end    = 0x17bfffff,
-               .name   = "PCI IO",
-               .flags  = IORESOURCE_BUSY
-       }, {
-               .start  = 0x17c00000,
-               .end    = 0x17ffffff,
-               .name   = "Cache Replacement Area",
-               .flags  = IORESOURCE_BUSY
-       }, {
-               .start  = 0x1a000000,
-               .end    = 0x1a000003,
-               .name   = "PCI INT Acknowledge",
-               .flags  = IORESOURCE_BUSY
-       }, {
-               .start  = 0x1fc00000,
-               .end    = 0x1fc7ffff,
-               .name   = "Boot PROM",
-               .flags  = IORESOURCE_BUSY
-       }, {
-               .start  = 0x1fc80000,
-               .end    = 0x1fcfffff,
-               .name   = "Diag PROM",
-               .flags  = IORESOURCE_BUSY
-       }, {
-               .start  = 0x1fd00000,
-               .end    = 0x1fdfffff,
-               .name   = "X-Bus",
-               .flags  = IORESOURCE_BUSY
-       }, {
-               .start  = 0x1fe00000,
-               .end    = 0x1fefffff,
-               .name   = "BIOS map",
-               .flags  = IORESOURCE_BUSY
-       }, {
-               .start  = 0x1ff00000,
-               .end    = 0x1ff7ffff,
-               .name   = "NVRAM / EEPROM",
-               .flags  = IORESOURCE_BUSY
-       }, {
-               .start  = 0x1fff0000,
-               .end    = 0x1fffefff,
-               .name   = "ASIC PCI",
-               .flags  = IORESOURCE_BUSY
-       }, {
-               .start  = 0x1ffff000,
-               .end    = 0x1fffffff,
-               .name   = "MP Agent",
-               .flags  = IORESOURCE_BUSY
-       }, {
-               .start  = 0x20000000,
-               .end    = 0x9fffffff,
-               .name   = "Main Memory",
-               .flags  = IORESOURCE_BUSY
-       }
-};
-
 static void __init sni_pcimt_resource_init(void)
 {
        int i;
 
        /* request I/O space for devices used on all i[345]86 PCs */
        for (i = 0; i < ARRAY_SIZE(pcimt_io_resources); i++)
-               request_resource(&ioport_resource, pcimt_io_resources + i);
-
-       /* request mem space for pcimt-specific devices */
-       for (i = 0; i < ARRAY_SIZE(pcimt_mem_resources); i++)
-               request_resource(&sni_mem_resource, pcimt_mem_resources + i);
-
-       ioport_resource.end = sni_io_resource.end;
+               request_resource(&sni_io_resource, pcimt_io_resources + i);
 }
 
 extern struct pci_ops sni_pcimt_ops;
@@ -240,9 +152,10 @@ extern struct pci_ops sni_pcimt_ops;
 static struct pci_controller sni_controller = {
        .pci_ops        = &sni_pcimt_ops,
        .mem_resource   = &sni_mem_resource,
-       .mem_offset     = 0x10000000UL,
+       .mem_offset     = 0x00000000UL,
        .io_resource    = &sni_io_resource,
-       .io_offset      = 0x00000000UL
+       .io_offset      = 0x00000000UL,
+       .io_map_base    = SNI_PORT_BASE
 };
 
 static void enable_pcimt_irq(unsigned int irq)
@@ -363,15 +276,17 @@ void __init sni_pcimt_irq_init(void)
 
 void sni_pcimt_init(void)
 {
-       sni_pcimt_resource_init();
        sni_pcimt_detect();
        sni_pcimt_sc_init();
        rtc_mips_get_time = mc146818_get_cmos_time;
        rtc_mips_set_time = mc146818_set_rtc_mmss;
        board_time_init = sni_cpu_time_init;
+       ioport_resource.end = sni_io_resource.end;
 #ifdef CONFIG_PCI
+       PCIBIOS_MIN_IO = 0x9000;
        register_pci_controller(&sni_controller);
 #endif
+       sni_pcimt_resource_init();
 }
 
 static int __init snirm_pcimt_setup_devinit(void)
index 1dfc3f0..00d151f 100644 (file)
@@ -43,7 +43,7 @@ static struct platform_device pcit_serial8250_device = {
 };
 
 static struct plat_serial8250_port pcit_cplus_data[] = {
-       PORT(0x3f8, 4),
+       PORT(0x3f8, 0),
        PORT(0x2f8, 3),
        PORT(0x3e8, 4),
        PORT(0x2e8, 3),
@@ -59,9 +59,9 @@ static struct platform_device pcit_cplus_serial8250_device = {
 };
 
 static struct resource sni_io_resource = {
-       .start  = 0x00001000UL,
+       .start  = 0x00000000UL,
        .end    = 0x03bfffffUL,
-       .name   = "PCIT IO MEM",
+       .name   = "PCIT IO",
        .flags  = IORESOURCE_IO,
 };
 
@@ -91,6 +91,11 @@ static struct resource pcit_io_resources[] = {
                .end    = 0xdf,
                .name   = "dma2",
                .flags  = IORESOURCE_BUSY
+       }, {
+               .start  =  0xcf8,
+               .end    = 0xcfb,
+               .name   = "PCI config addr",
+               .flags  = IORESOURCE_BUSY
        }, {
                .start  =  0xcfc,
                .end    = 0xcff,
@@ -100,107 +105,19 @@ static struct resource pcit_io_resources[] = {
 };
 
 static struct resource sni_mem_resource = {
-       .start  = 0x10000000UL,
-       .end    = 0xffffffffUL,
+       .start  = 0x18000000UL,
+       .end    = 0x1fbfffffUL,
        .name   = "PCIT PCI MEM",
        .flags  = IORESOURCE_MEM
 };
 
-/*
- * The RM200/RM300 has a few holes in it's PCI/EISA memory address space used
- * for other purposes.  Be paranoid and allocate all of the before the PCI
- * code gets a chance to to map anything else there ...
- *
- * This leaves the following areas available:
- *
- * 0x10000000 - 0x1009ffff (640kB) PCI/EISA/ISA Bus Memory
- * 0x10100000 - 0x13ffffff ( 15MB) PCI/EISA/ISA Bus Memory
- * 0x18000000 - 0x1fbfffff (124MB) PCI/EISA Bus Memory
- * 0x1ff08000 - 0x1ffeffff (816kB) PCI/EISA Bus Memory
- * 0xa0000000 - 0xffffffff (1.5GB) PCI/EISA Bus Memory
- */
-static struct resource pcit_mem_resources[] = {
-       {
-               .start  = 0x14000000,
-               .end    = 0x17bfffff,
-               .name   = "PCI IO",
-               .flags  = IORESOURCE_BUSY
-       }, {
-               .start  = 0x17c00000,
-               .end    = 0x17ffffff,
-               .name   = "Cache Replacement Area",
-               .flags  = IORESOURCE_BUSY
-       }, {
-               .start  = 0x180a0000,
-               .end    = 0x180bffff,
-               .name   = "Video RAM area",
-               .flags  = IORESOURCE_BUSY
-       }, {
-               .start  = 0x180c0000,
-               .end    = 0x180fffff,
-               .name   = "ISA Reserved",
-               .flags  = IORESOURCE_BUSY
-       }, {
-               .start  = 0x19000000,
-               .end    = 0x1fbfffff,
-               .name   = "PCI MEM",
-               .flags  = IORESOURCE_BUSY
-       }, {
-               .start  = 0x1fc00000,
-               .end    = 0x1fc7ffff,
-               .name   = "Boot PROM",
-               .flags  = IORESOURCE_BUSY
-       }, {
-               .start  = 0x1fc80000,
-               .end    = 0x1fcfffff,
-               .name   = "Diag PROM",
-               .flags  = IORESOURCE_BUSY
-       }, {
-               .start  = 0x1fd00000,
-               .end    = 0x1fdfffff,
-               .name   = "X-Bus",
-               .flags  = IORESOURCE_BUSY
-       }, {
-               .start  = 0x1fe00000,
-               .end    = 0x1fefffff,
-               .name   = "BIOS map",
-               .flags  = IORESOURCE_BUSY
-       }, {
-               .start  = 0x1ff00000,
-               .end    = 0x1ff7ffff,
-               .name   = "NVRAM / EEPROM",
-               .flags  = IORESOURCE_BUSY
-       }, {
-               .start  = 0x1fff0000,
-               .end    = 0x1fffefff,
-               .name   = "MAUI ASIC",
-               .flags  = IORESOURCE_BUSY
-       }, {
-               .start  = 0x1ffff000,
-               .end    = 0x1fffffff,
-               .name   = "MP Agent",
-               .flags  = IORESOURCE_BUSY
-       }, {
-               .start  = 0x20000000,
-               .end    = 0x9fffffff,
-               .name   = "Main Memory",
-               .flags  = IORESOURCE_BUSY
-       }
-};
-
 static void __init sni_pcit_resource_init(void)
 {
        int i;
 
        /* request I/O space for devices used on all i[345]86 PCs */
        for (i = 0; i < ARRAY_SIZE(pcit_io_resources); i++)
-               request_resource(&ioport_resource, pcit_io_resources + i);
-
-       /* request mem space for pcimt-specific devices */
-       for (i = 0; i < ARRAY_SIZE(pcit_mem_resources); i++)
-               request_resource(&sni_mem_resource, pcit_mem_resources + i);
-
-       ioport_resource.end = sni_io_resource.end;
+               request_resource(&sni_io_resource, pcit_io_resources + i);
 }
 
 
@@ -209,9 +126,10 @@ extern struct pci_ops sni_pcit_ops;
 static struct pci_controller sni_pcit_controller = {
        .pci_ops        = &sni_pcit_ops,
        .mem_resource   = &sni_mem_resource,
-       .mem_offset     = 0x10000000UL,
+       .mem_offset     = 0x00000000UL,
        .io_resource    = &sni_io_resource,
-       .io_offset      = 0x00000000UL
+       .io_offset      = 0x00000000UL,
+       .io_map_base    = SNI_PORT_BASE
 };
 
 static void enable_pcit_irq(unsigned int irq)
@@ -262,7 +180,7 @@ static void pcit_hwint0(void)
        int irq;
 
        clear_c0_status(IE_IRQ0);
-       irq = ffs((pending >> 16) & 0x7f);
+       irq = ffs((pending >> 16) & 0x3f);
 
        if (likely(irq > 0))
                do_IRQ (irq + SNI_PCIT_INT_START - 1);
@@ -289,6 +207,8 @@ static void sni_pcit_hwint_cplus(void)
 
        if (pending & C_IRQ0)
                pcit_hwint0();
+       else if (pending & C_IRQ1)
+               do_IRQ (MIPS_CPU_IRQ_BASE + 3);
        else if (pending & C_IRQ2)
                do_IRQ (MIPS_CPU_IRQ_BASE + 4);
        else if (pending & C_IRQ3)
@@ -317,21 +237,23 @@ void __init sni_pcit_cplus_irq_init(void)
        mips_cpu_irq_init();
        for (i = SNI_PCIT_INT_START; i <= SNI_PCIT_INT_END; i++)
                set_irq_chip(i, &pcit_irq_type);
-       *(volatile u32 *)SNI_PCIT_INT_REG = 0;
+       *(volatile u32 *)SNI_PCIT_INT_REG = 0x40000000;
        sni_hwint = sni_pcit_hwint_cplus;
        change_c0_status(ST0_IM, IE_IRQ0);
-       setup_irq (SNI_PCIT_INT_START + 6, &sni_isa_irq);
+       setup_irq (MIPS_CPU_IRQ_BASE + 3, &sni_isa_irq);
 }
 
 void sni_pcit_init(void)
 {
-       sni_pcit_resource_init();
        rtc_mips_get_time = mc146818_get_cmos_time;
        rtc_mips_set_time = mc146818_set_rtc_mmss;
        board_time_init = sni_cpu_time_init;
+       ioport_resource.end = sni_io_resource.end;
 #ifdef CONFIG_PCI
+       PCIBIOS_MIN_IO = 0x9000;
        register_pci_controller(&sni_pcit_controller);
 #endif
+       sni_pcit_resource_init();
 }
 
 static int __init snirm_pcit_setup_devinit(void)
index 92f41f6..8f4d3e7 100644 (file)
@@ -1,6 +1,10 @@
-config CASIO_E55
-       bool "Support for CASIO CASSIOPEIA E-10/15/55/65"
+choice
+       prompt "Machine type"
        depends on MACH_VR41XX
+       default TANBAC_TB022X
+
+config CASIO_E55
+       bool "CASIO CASSIOPEIA E-10/15/55/65"
        select DMA_NONCOHERENT
        select IRQ_CPU
        select ISA
@@ -8,8 +12,7 @@ config CASIO_E55
        select SYS_SUPPORTS_LITTLE_ENDIAN
 
 config IBM_WORKPAD
-       bool "Support for IBM WorkPad z50"
-       depends on MACH_VR41XX
+       bool "IBM WorkPad z50"
        select DMA_NONCOHERENT
        select IRQ_CPU
        select ISA
@@ -17,26 +20,18 @@ config IBM_WORKPAD
        select SYS_SUPPORTS_LITTLE_ENDIAN
 
 config NEC_CMBVR4133
-       bool "Support for NEC CMB-VR4133"
-       depends on MACH_VR41XX
+       bool "NEC CMB-VR4133"
        select DMA_NONCOHERENT
        select IRQ_CPU
        select HW_HAS_PCI
        select SYS_SUPPORTS_32BIT_KERNEL
        select SYS_SUPPORTS_LITTLE_ENDIAN
 
-config ROCKHOPPER
-       bool "Support for Rockhopper baseboard"
-       depends on NEC_CMBVR4133
-       select I8259
-       select HAVE_STD_PC_SERIAL_PORT
-
 config TANBAC_TB022X
-       bool "Support for TANBAC VR4131 multichip module and TANBAC VR4131DIMM"
-       depends on MACH_VR41XX
+       bool "TANBAC VR4131 multichip module and TANBAC VR4131DIMM"
        select DMA_NONCOHERENT
-       select HW_HAS_PCI
        select IRQ_CPU
+       select HW_HAS_PCI
        select SYS_SUPPORTS_32BIT_KERNEL
        select SYS_SUPPORTS_LITTLE_ENDIAN
        help
@@ -46,40 +41,65 @@ config TANBAC_TB022X
          Please refer to <http://www.tanbac.co.jp/>
          about VR4131 multichip module and VR4131DIMM.
 
-config TANBAC_TB0226
-       bool "Support for TANBAC Mbase(TB0226)"
+config VICTOR_MPC30X
+       bool "Victor MP-C303/304"
+       select DMA_NONCOHERENT
+       select IRQ_CPU
+       select HW_HAS_PCI
+       select PCI_VR41XX
+       select SYS_SUPPORTS_32BIT_KERNEL
+       select SYS_SUPPORTS_LITTLE_ENDIAN
+
+config ZAO_CAPCELLA
+       bool "ZAO Networks Capcella"
+       select DMA_NONCOHERENT
+       select IRQ_CPU
+       select HW_HAS_PCI
+       select PCI_VR41XX
+       select SYS_SUPPORTS_32BIT_KERNEL
+       select SYS_SUPPORTS_LITTLE_ENDIAN
+
+endchoice
+
+config ROCKHOPPER
+       bool "Support for Rockhopper base board"
+       depends on NEC_CMBVR4133
+       select PCI_VR41XX
+       select I8259
+       select HAVE_STD_PC_SERIAL_PORT
+
+choice
+       prompt "Base board type"
        depends on TANBAC_TB022X
+       default TANBAC_TB0287
+
+config TANBAC_TB0219
+       bool "TANBAC DIMM Evaluation Kit(TB0219)"
        select GPIO_VR41XX
+       select PCI_VR41XX
+       help
+         The TANBAC DIMM Evaluation Kit(TB0219) is a MIPS-based platform
+         manufactured by TANBAC.
+         Please refer to <http://www.tanbac.co.jp/> about DIMM Evaluation Kit.
+
+config TANBAC_TB0226
+       bool "TANBAC Mbase(TB0226)"
+       select GPIO_VR41XX
+       select PCI_VR41XX
        help
          The TANBAC Mbase(TB0226) is a MIPS-based platform
          manufactured by TANBAC.
          Please refer to <http://www.tanbac.co.jp/> about Mbase.
 
 config TANBAC_TB0287
-       bool "Support for TANBAC Mini-ITX DIMM base(TB0287)"
-       depends on TANBAC_TB022X
+       bool "TANBAC Mini-ITX DIMM base(TB0287)"
+       select PCI_VR41XX
        help
          The TANBAC Mini-ITX DIMM base(TB0287) is a MIPS-based platform
          manufactured by TANBAC.
          Please refer to <http://www.tanbac.co.jp/> about Mini-ITX DIMM base.
 
-config VICTOR_MPC30X
-       bool "Support for Victor MP-C303/304"
-       depends on MACH_VR41XX
-       select DMA_NONCOHERENT
-       select HW_HAS_PCI
-       select IRQ_CPU
-       select SYS_SUPPORTS_32BIT_KERNEL
-       select SYS_SUPPORTS_LITTLE_ENDIAN
-
-config ZAO_CAPCELLA
-       bool "Support for ZAO Networks Capcella"
-       depends on MACH_VR41XX
-       select DMA_NONCOHERENT
-       select HW_HAS_PCI
-       select IRQ_CPU
-       select SYS_SUPPORTS_32BIT_KERNEL
-       select SYS_SUPPORTS_LITTLE_ENDIAN
+endchoice
 
 config PCI_VR41XX
        bool "Add PCI control unit support of NEC VR4100 series"
index ecee596..2f8e9c0 100644 (file)
@@ -84,8 +84,6 @@ EXPORT_SYMBOL(strncpy);
 EXPORT_SYMBOL(strcat);
 EXPORT_SYMBOL(strlen);
 EXPORT_SYMBOL(strcmp);
-EXPORT_SYMBOL(strcasecmp);
-EXPORT_SYMBOL(strncasecmp);
 
 EXPORT_SYMBOL(csum_partial);
 EXPORT_SYMBOL(csum_partial_copy_generic);
index 4b1ba49..450258d 100644 (file)
@@ -7,13 +7,12 @@ EXTRA_CFLAGS          += -mno-minimal-toc
 endif
 
 ifeq ($(CONFIG_PPC_MERGE),y)
-obj-y                  := string.o strcase.o
+obj-y                  := string.o
 obj-$(CONFIG_PPC32)    += div64.o copy_32.o checksum_32.o
 endif
 
 obj-$(CONFIG_PPC64)    += checksum_64.o copypage_64.o copyuser_64.o \
-                          memcpy_64.o usercopy_64.o mem_64.o string.o \
-                          strcase.o
+                          memcpy_64.o usercopy_64.o mem_64.o string.o
 obj-$(CONFIG_QUICC_ENGINE) += rheap.o
 obj-$(CONFIG_XMON)     += sstep.o
 obj-$(CONFIG_KPROBES)  += sstep.o
diff --git a/arch/powerpc/lib/strcase.c b/arch/powerpc/lib/strcase.c
deleted file mode 100644 (file)
index f8ec1eb..0000000
+++ /dev/null
@@ -1,25 +0,0 @@
-#include <linux/types.h>
-#include <linux/ctype.h>
-#include <linux/string.h>
-
-int strcasecmp(const char *s1, const char *s2)
-{
-       int c1, c2;
-
-       do {
-               c1 = tolower(*s1++);
-               c2 = tolower(*s2++);
-       } while (c1 == c2 && c1 != 0);
-       return c1 - c2;
-}
-
-int strncasecmp(const char *s1, const char *s2, size_t n)
-{
-       int c1, c2;
-
-       do {
-               c1 = tolower(*s1++);
-               c2 = tolower(*s2++);
-       } while ((--n > 0) && c1 == c2 && c1 != 0);
-       return c1 - c2;
-}
index a6056c2..48ce84f 100644 (file)
@@ -477,7 +477,6 @@ for (;;) {
                        cep->stats.rx_dropped++;
                }
                else {
-                       skb->dev = dev;
                        skb_put(skb,pkt_len-4); /* Make room */
                        eth_copy_and_sum(skb,
                                (unsigned char *)__va(bdp->cbd_bufaddr),
index 06b84c3..9db825f 100644 (file)
@@ -734,7 +734,6 @@ for (;;) {
                        cep->stats.rx_dropped++;
                }
                else {
-                       skb->dev = dev;
                        skb_put(skb,pkt_len);   /* Make room */
                        eth_copy_and_sum(skb,
                                (unsigned char *)__va(bdp->cbd_bufaddr),
index b23c45b..bfa3f52 100644 (file)
@@ -506,7 +506,6 @@ for (;;) {
                        cep->stats.rx_dropped++;
                }
                else {
-                       skb->dev = dev;
                        skb_put(skb,pkt_len-4); /* Make room */
                        eth_copy_and_sum(skb,
                                cep->rx_vaddr[bdp - cep->rx_bd_base],
index e6c28fb..57a9a61 100644 (file)
@@ -724,7 +724,6 @@ while (!(bdp->cbd_sc & BD_ENET_RX_EMPTY)) {
                printk("%s: Memory squeeze, dropping packet.\n", dev->name);
                fep->stats.rx_dropped++;
        } else {
-               skb->dev = dev;
                skb_put(skb,pkt_len-4); /* Make room */
                eth_copy_and_sum(skb, data, pkt_len-4, 0);
                skb->protocol=eth_type_trans(skb,dev);
index 1318b6f..4ad4996 100644 (file)
@@ -93,8 +93,6 @@ EXPORT_SYMBOL(strncpy);
 EXPORT_SYMBOL(strcat);
 EXPORT_SYMBOL(strlen);
 EXPORT_SYMBOL(strcmp);
-EXPORT_SYMBOL(strcasecmp);
-EXPORT_SYMBOL(strncasecmp);
 EXPORT_SYMBOL(__div64_32);
 
 EXPORT_SYMBOL(csum_partial);
index 50358e4..422bef9 100644 (file)
@@ -2,7 +2,7 @@
 # Makefile for ppc-specific library files..
 #
 
-obj-y                  := checksum.o string.o strcase.o div64.o
+obj-y                  := checksum.o string.o div64.o
 
 obj-$(CONFIG_8xx)      += rheap.o
 obj-$(CONFIG_CPM2)     += rheap.o
diff --git a/arch/ppc/lib/strcase.c b/arch/ppc/lib/strcase.c
deleted file mode 100644 (file)
index 3b0094c..0000000
+++ /dev/null
@@ -1,24 +0,0 @@
-#include <linux/ctype.h>
-#include <linux/types.h>
-
-int strcasecmp(const char *s1, const char *s2)
-{
-       int c1, c2;
-
-       do {
-               c1 = tolower(*s1++);
-               c2 = tolower(*s2++);
-       } while (c1 == c2 && c1 != 0);
-       return c1 - c2;
-}
-
-int strncasecmp(const char *s1, const char *s2, size_t n)
-{
-       int c1, c2;
-
-       do {
-               c1 = tolower(*s1++);
-               c2 = tolower(*s2++);
-       } while ((--n > 0) && c1 == c2 && c1 != 0);
-       return c1 - c2;
-}
index 0f293aa..e6ec418 100644 (file)
@@ -41,6 +41,11 @@ config GENERIC_HWEIGHT
 config GENERIC_TIME
        def_bool y
 
+config GENERIC_BUG
+       bool
+       depends on BUG
+       default y
+
 config NO_IOMEM
        def_bool y
 
@@ -514,6 +519,14 @@ config KEXEC
          current kernel, and to start another kernel.  It is like a reboot
          but is independent of hardware/microcode support.
 
+config ZFCPDUMP
+       tristate "zfcpdump support"
+       select SMP
+       default n
+       help
+         Select this option if you want to build an zfcpdump enabled kernel.
+         Refer to "Documentation/s390/zfcpdump.txt" for more details on this.
+
 endmenu
 
 source "net/Kconfig"
index b1e5584..68441e0 100644 (file)
@@ -67,8 +67,10 @@ endif
 
 ifeq ($(call cc-option-yn,-mstack-size=8192 -mstack-guard=128),y)
 cflags-$(CONFIG_CHECK_STACK) += -mstack-size=$(STACK_SIZE)
+ifneq ($(call cc-option-yn,-mstack-size=8192),y)
 cflags-$(CONFIG_CHECK_STACK) += -mstack-guard=$(CONFIG_STACK_GUARD)
 endif
+endif
 
 ifeq ($(call cc-option-yn,-mwarn-dynamicstack),y)
 cflags-$(CONFIG_WARN_STACK) += -mwarn-dynamicstack
@@ -103,6 +105,9 @@ install: vmlinux
 image: vmlinux
        $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
 
+zfcpdump:
+       $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
+
 archclean:
        $(Q)$(MAKE) $(clean)=$(boot)
 
index 0c3cf4b..ee89b33 100644 (file)
@@ -668,45 +668,7 @@ EXPORT_SYMBOL_GPL(appldata_register_ops);
 EXPORT_SYMBOL_GPL(appldata_unregister_ops);
 EXPORT_SYMBOL_GPL(appldata_diag);
 
-#ifdef MODULE
-/*
- * Kernel symbols needed by appldata_mem and appldata_os modules.
- * However, if this file is compiled as a module (for testing only), these
- * symbols are not exported. In this case, we define them locally and export
- * those.
- */
-void si_swapinfo(struct sysinfo *val)
-{
-       val->freeswap = -1ul;
-       val->totalswap = -1ul;
-}
-
-unsigned long avenrun[3] = {-1 - FIXED_1/200, -1 - FIXED_1/200,
-                               -1 - FIXED_1/200};
-int nr_threads = -1;
-
-void get_full_page_state(struct page_state *ps)
-{
-       memset(ps, -1, sizeof(struct page_state));
-}
-
-unsigned long nr_running(void)
-{
-       return -1;
-}
-
-unsigned long nr_iowait(void)
-{
-       return -1;
-}
-
-/*unsigned long nr_context_switches(void)
-{
-       return -1;
-}*/
-#endif /* MODULE */
 EXPORT_SYMBOL_GPL(si_swapinfo);
 EXPORT_SYMBOL_GPL(nr_threads);
 EXPORT_SYMBOL_GPL(nr_running);
 EXPORT_SYMBOL_GPL(nr_iowait);
-//EXPORT_SYMBOL_GPL(nr_context_switches);
index f64b8c8..516b3ac 100644 (file)
@@ -108,10 +108,10 @@ static void appldata_get_net_sum_data(void *data)
        collisions = 0;
        read_lock(&dev_base_lock);
        for (dev = dev_base; dev != NULL; dev = dev->next) {
-               if (dev->get_stats == NULL) {
+               stats = dev->get_stats(dev);
+               if (stats == NULL) {
                        continue;
                }
-               stats = dev->get_stats(dev);
                rx_packets += stats->rx_packets;
                tx_packets += stats->tx_packets;
                rx_bytes   += stats->rx_bytes;
index 969639f..af4460e 100644 (file)
  */
 #include <linux/init.h>
 #include <linux/module.h>
-#include <linux/mm.h>
 #include <linux/crypto.h>
-#include <asm/scatterlist.h>
-#include <asm/byteorder.h>
+
 #include "crypt_s390.h"
 
 #define SHA1_DIGEST_SIZE       20
 #define SHA1_BLOCK_SIZE                64
 
-struct crypt_s390_sha1_ctx {
-       u64 count;
+struct s390_sha1_ctx {
+       u64 count;              /* message length */
        u32 state[5];
-       u32 buf_len;
-       u8 buffer[2 * SHA1_BLOCK_SIZE];
+       u8 buf[2 * SHA1_BLOCK_SIZE];
 };
 
 static void sha1_init(struct crypto_tfm *tfm)
 {
-       struct crypt_s390_sha1_ctx *ctx = crypto_tfm_ctx(tfm);
-
-       ctx->state[0] = 0x67452301;
-       ctx->state[1] = 0xEFCDAB89;
-       ctx->state[2] = 0x98BADCFE;
-       ctx->state[3] = 0x10325476;
-       ctx->state[4] = 0xC3D2E1F0;
-
-       ctx->count = 0;
-       ctx->buf_len = 0;
+       struct s390_sha1_ctx *sctx = crypto_tfm_ctx(tfm);
+
+       sctx->state[0] = 0x67452301;
+       sctx->state[1] = 0xEFCDAB89;
+       sctx->state[2] = 0x98BADCFE;
+       sctx->state[3] = 0x10325476;
+       sctx->state[4] = 0xC3D2E1F0;
+       sctx->count = 0;
 }
 
 static void sha1_update(struct crypto_tfm *tfm, const u8 *data,
                        unsigned int len)
 {
-       struct crypt_s390_sha1_ctx *sctx;
-       long imd_len;
-
-       sctx = crypto_tfm_ctx(tfm);
-       sctx->count += len * 8; /* message bit length */
-
-       /* anything in buffer yet? -> must be completed */
-       if (sctx->buf_len && (sctx->buf_len + len) >= SHA1_BLOCK_SIZE) {
-               /* complete full block and hash */
-               memcpy(sctx->buffer + sctx->buf_len, data,
-                      SHA1_BLOCK_SIZE - sctx->buf_len);
-               crypt_s390_kimd(KIMD_SHA_1, sctx->state, sctx->buffer,
-                               SHA1_BLOCK_SIZE);
-               data += SHA1_BLOCK_SIZE - sctx->buf_len;
-               len -= SHA1_BLOCK_SIZE - sctx->buf_len;
-               sctx->buf_len = 0;
+       struct s390_sha1_ctx *sctx = crypto_tfm_ctx(tfm);
+       unsigned int index;
+       int ret;
+
+       /* how much is already in the buffer? */
+       index = sctx->count & 0x3f;
+
+       sctx->count += len;
+
+       if (index + len < SHA1_BLOCK_SIZE)
+               goto store;
+
+       /* process one stored block */
+       if (index) {
+               memcpy(sctx->buf + index, data, SHA1_BLOCK_SIZE - index);
+               ret = crypt_s390_kimd(KIMD_SHA_1, sctx->state, sctx->buf,
+                                     SHA1_BLOCK_SIZE);
+               BUG_ON(ret != SHA1_BLOCK_SIZE);
+               data += SHA1_BLOCK_SIZE - index;
+               len -= SHA1_BLOCK_SIZE - index;
        }
 
-       /* rest of data contains full blocks? */
-       imd_len = len & ~0x3ful;
-       if (imd_len) {
-               crypt_s390_kimd(KIMD_SHA_1, sctx->state, data, imd_len);
-               data += imd_len;
-               len -= imd_len;
+       /* process as many blocks as possible */
+       if (len >= SHA1_BLOCK_SIZE) {
+               ret = crypt_s390_kimd(KIMD_SHA_1, sctx->state, data,
+                                     len & ~(SHA1_BLOCK_SIZE - 1));
+               BUG_ON(ret != (len & ~(SHA1_BLOCK_SIZE - 1)));
+               data += ret;
+               len -= ret;
        }
-       /* anything left? store in buffer */
-       if (len) {
-               memcpy(sctx->buffer + sctx->buf_len , data, len);
-               sctx->buf_len += len;
-       }
-}
 
+store:
+       /* anything left? */
+       if (len)
+               memcpy(sctx->buf + index , data, len);
+}
 
-static void pad_message(struct crypt_s390_sha1_ctx* sctx)
+/* Add padding and return the message digest. */
+static void sha1_final(struct crypto_tfm *tfm, u8 *out)
 {
-       int index;
+       struct s390_sha1_ctx *sctx = crypto_tfm_ctx(tfm);
+       u64 bits;
+       unsigned int index, end;
+       int ret;
+
+       /* must perform manual padding */
+       index = sctx->count & 0x3f;
+       end =  (index < 56) ? SHA1_BLOCK_SIZE : (2 * SHA1_BLOCK_SIZE);
 
-       index = sctx->buf_len;
-       sctx->buf_len = (sctx->buf_len < 56) ?
-                        SHA1_BLOCK_SIZE:2 * SHA1_BLOCK_SIZE;
        /* start pad with 1 */
-       sctx->buffer[index] = 0x80;
+       sctx->buf[index] = 0x80;
+
        /* pad with zeros */
        index++;
-       memset(sctx->buffer + index, 0x00, sctx->buf_len - index);
-       /* append length */
-       memcpy(sctx->buffer + sctx->buf_len - 8, &sctx->count,
-              sizeof sctx->count);
-}
+       memset(sctx->buf + index, 0x00, end - index - 8);
 
-/* Add padding and return the message digest. */
-static void sha1_final(struct crypto_tfm *tfm, u8 *out)
-{
-       struct crypt_s390_sha1_ctx *sctx = crypto_tfm_ctx(tfm);
+       /* append message length */
+       bits = sctx->count * 8;
+       memcpy(sctx->buf + end - 8, &bits, sizeof(bits));
+
+       ret = crypt_s390_kimd(KIMD_SHA_1, sctx->state, sctx->buf, end);
+       BUG_ON(ret != end);
 
-       /* must perform manual padding */
-       pad_message(sctx);
-       crypt_s390_kimd(KIMD_SHA_1, sctx->state, sctx->buffer, sctx->buf_len);
        /* copy digest to out */
        memcpy(out, sctx->state, SHA1_DIGEST_SIZE);
+
        /* wipe context */
        memset(sctx, 0, sizeof *sctx);
 }
@@ -128,7 +129,7 @@ static struct crypto_alg alg = {
        .cra_priority   =       CRYPT_S390_PRIORITY,
        .cra_flags      =       CRYPTO_ALG_TYPE_DIGEST,
        .cra_blocksize  =       SHA1_BLOCK_SIZE,
-       .cra_ctxsize    =       sizeof(struct crypt_s390_sha1_ctx),
+       .cra_ctxsize    =       sizeof(struct s390_sha1_ctx),
        .cra_module     =       THIS_MODULE,
        .cra_list       =       LIST_HEAD_INIT(alg.cra_list),
        .cra_u          =       { .digest = {
index 78436c6..2ced333 100644 (file)
@@ -26,7 +26,7 @@
 #define SHA256_BLOCK_SIZE      64
 
 struct s390_sha256_ctx {
-       u64 count;
+       u64 count;              /* message length */
        u32 state[8];
        u8 buf[2 * SHA256_BLOCK_SIZE];
 };
@@ -54,10 +54,9 @@ static void sha256_update(struct crypto_tfm *tfm, const u8 *data,
        int ret;
 
        /* how much is already in the buffer? */
-       index = sctx->count / 8 & 0x3f;
+       index = sctx->count & 0x3f;
 
-       /* update message bit length */
-       sctx->count += len * 8;
+       sctx->count += len;
 
        if ((index + len) < SHA256_BLOCK_SIZE)
                goto store;
@@ -87,12 +86,17 @@ store:
                memcpy(sctx->buf + index , data, len);
 }
 
-static void pad_message(struct s390_sha256_ctx* sctx)
+/* Add padding and return the message digest */
+static void sha256_final(struct crypto_tfm *tfm, u8 *out)
 {
-       int index, end;
+       struct s390_sha256_ctx *sctx = crypto_tfm_ctx(tfm);
+       u64 bits;
+       unsigned int index, end;
+       int ret;
 
-       index = sctx->count / 8 & 0x3f;
-       end = index < 56 ? SHA256_BLOCK_SIZE : 2 * SHA256_BLOCK_SIZE;
+       /* must perform manual padding */
+       index = sctx->count & 0x3f;
+       end = (index < 56) ? SHA256_BLOCK_SIZE : (2 * SHA256_BLOCK_SIZE);
 
        /* start pad with 1 */
        sctx->buf[index] = 0x80;
@@ -102,21 +106,11 @@ static void pad_message(struct s390_sha256_ctx* sctx)
        memset(sctx->buf + index, 0x00, end - index - 8);
 
        /* append message length */
-       memcpy(sctx->buf + end - 8, &sctx->count, sizeof sctx->count);
-
-       sctx->count = end * 8;
-}
-
-/* Add padding and return the message digest */
-static void sha256_final(struct crypto_tfm *tfm, u8 *out)
-{
-       struct s390_sha256_ctx *sctx = crypto_tfm_ctx(tfm);
-
-       /* must perform manual padding */
-       pad_message(sctx);
+       bits = sctx->count * 8;
+       memcpy(sctx->buf + end - 8, &bits, sizeof(bits));
 
-       crypt_s390_kimd(KIMD_SHA_256, sctx->state, sctx->buf,
-                       sctx->count / 8);
+       ret = crypt_s390_kimd(KIMD_SHA_256, sctx->state, sctx->buf, end);
+       BUG_ON(ret != end);
 
        /* copy digest to out */
        memcpy(out, sctx->state, SHA256_DIGEST_SIZE);
index 741d2bb..0e4da8a 100644 (file)
@@ -12,6 +12,7 @@ CONFIG_RWSEM_XCHGADD_ALGORITHM=y
 # CONFIG_ARCH_HAS_ILOG2_U64 is not set
 CONFIG_GENERIC_HWEIGHT=y
 CONFIG_GENERIC_TIME=y
+CONFIG_GENERIC_BUG=y
 CONFIG_NO_IOMEM=y
 CONFIG_S390=y
 CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
@@ -166,6 +167,7 @@ CONFIG_NO_IDLE_HZ=y
 CONFIG_NO_IDLE_HZ_INIT=y
 CONFIG_S390_HYPFS_FS=y
 CONFIG_KEXEC=y
+# CONFIG_ZFCPDUMP is not set
 
 #
 # Networking
@@ -705,6 +707,7 @@ CONFIG_DEBUG_MUTEXES=y
 CONFIG_DEBUG_SPINLOCK_SLEEP=y
 # CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
 # CONFIG_DEBUG_KOBJECT is not set
+CONFIG_DEBUG_BUGVERBOSE=y
 # CONFIG_DEBUG_INFO is not set
 # CONFIG_DEBUG_VM is not set
 # CONFIG_DEBUG_LIST is not set
index 5492d25..3195d37 100644 (file)
@@ -6,7 +6,7 @@ EXTRA_AFLAGS    := -traditional
 
 obj-y  :=  bitmap.o traps.o time.o process.o base.o early.o \
             setup.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \
-           semaphore.o s390_ext.o debug.o irq.o ipl.o
+           semaphore.o s390_ext.o debug.o irq.o ipl.o dis.o
 
 obj-y  += $(if $(CONFIG_64BIT),entry64.o,entry.o)
 obj-y  += $(if $(CONFIG_64BIT),reipl64.o,reipl.o)
index 664c669..5236fdb 100644 (file)
@@ -495,29 +495,34 @@ sys32_rt_sigqueueinfo(int pid, int sig, compat_siginfo_t __user *uinfo)
  * sys32_execve() executes a new program after the asm stub has set
  * things up for us.  This should basically do what I want it to.
  */
-asmlinkage long
-sys32_execve(struct pt_regs regs)
+asmlinkage long sys32_execve(void)
 {
-        int error;
-        char * filename;
+       struct pt_regs *regs = task_pt_regs(current);
+       char *filename;
+       unsigned long result;
+       int rc;
 
-        filename = getname(compat_ptr(regs.orig_gpr2));
-        error = PTR_ERR(filename);
-        if (IS_ERR(filename))
+       filename = getname(compat_ptr(regs->orig_gpr2));
+       if (IS_ERR(filename)) {
+               result = PTR_ERR(filename);
                 goto out;
-        error = compat_do_execve(filename, compat_ptr(regs.gprs[3]),
-                                compat_ptr(regs.gprs[4]), &regs);
-       if (error == 0)
-       {
-               task_lock(current);
-               current->ptrace &= ~PT_DTRACE;
-               task_unlock(current);
-               current->thread.fp_regs.fpc=0;
-               asm volatile("sfpc %0,0" : : "d" (0));
        }
+       rc = compat_do_execve(filename, compat_ptr(regs->gprs[3]),
+                             compat_ptr(regs->gprs[4]), regs);
+       if (rc) {
+               result = rc;
+               goto out_putname;
+       }
+       task_lock(current);
+       current->ptrace &= ~PT_DTRACE;
+       task_unlock(current);
+       current->thread.fp_regs.fpc=0;
+       asm volatile("sfpc %0,0" : : "d" (0));
+       result = regs->gprs[2];
+out_putname:
         putname(filename);
 out:
-        return error;
+       return result;
 }
 
 
@@ -918,19 +923,20 @@ asmlinkage long sys32_write(unsigned int fd, char __user * buf, size_t count)
        return sys_write(fd, buf, count);
 }
 
-asmlinkage long sys32_clone(struct pt_regs regs)
+asmlinkage long sys32_clone(void)
 {
-        unsigned long clone_flags;
-        unsigned long newsp;
+       struct pt_regs *regs = task_pt_regs(current);
+       unsigned long clone_flags;
+       unsigned long newsp;
        int __user *parent_tidptr, *child_tidptr;
 
-        clone_flags = regs.gprs[3] & 0xffffffffUL;
-        newsp = regs.orig_gpr2 & 0x7fffffffUL;
-       parent_tidptr = compat_ptr(regs.gprs[4]);
-       child_tidptr = compat_ptr(regs.gprs[5]);
-        if (!newsp)
-                newsp = regs.gprs[15];
-        return do_fork(clone_flags, newsp, &regs, 0,
+       clone_flags = regs->gprs[3] & 0xffffffffUL;
+       newsp = regs->orig_gpr2 & 0x7fffffffUL;
+       parent_tidptr = compat_ptr(regs->gprs[4]);
+       child_tidptr = compat_ptr(regs->gprs[5]);
+       if (!newsp)
+               newsp = regs->gprs[15];
+       return do_fork(clone_flags, newsp, regs, 0,
                       parent_tidptr, child_tidptr);
 }
 
index 887a988..80a54a0 100644 (file)
@@ -255,9 +255,9 @@ sys32_rt_sigaction(int sig, const struct sigaction32 __user *act,
 }
 
 asmlinkage long
-sys32_sigaltstack(const stack_t32 __user *uss, stack_t32 __user *uoss,
-                                                       struct pt_regs *regs)
+sys32_sigaltstack(const stack_t32 __user *uss, stack_t32 __user *uoss)
 {
+       struct pt_regs *regs = task_pt_regs(current);
        stack_t kss, koss;
        unsigned long ss_sp;
        int ret, err = 0;
@@ -344,8 +344,9 @@ static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs)
        return 0;
 }
 
-asmlinkage long sys32_sigreturn(struct pt_regs *regs)
+asmlinkage long sys32_sigreturn(void)
 {
+       struct pt_regs *regs = task_pt_regs(current);
        sigframe32 __user *frame = (sigframe32 __user *)regs->gprs[15];
        sigset_t set;
 
@@ -370,8 +371,9 @@ badframe:
        return 0;
 }
 
-asmlinkage long sys32_rt_sigreturn(struct pt_regs *regs)
+asmlinkage long sys32_rt_sigreturn(void)
 {
+       struct pt_regs *regs = task_pt_regs(current);
        rt_sigframe32 __user *frame = (rt_sigframe32 __user *)regs->gprs[15];
        sigset_t set;
        stack_t st;
@@ -407,8 +409,8 @@ asmlinkage long sys32_rt_sigreturn(struct pt_regs *regs)
        return regs->gprs[2];
 
 badframe:
-        force_sig(SIGSEGV, current);
-        return 0;
+       force_sig(SIGSEGV, current);
+       return 0;
 }      
 
 /*
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
new file mode 100644 (file)
index 0000000..dabaf98
--- /dev/null
@@ -0,0 +1,1278 @@
+/*
+ * arch/s390/kernel/dis.c
+ *
+ * Disassemble s390 instructions.
+ *
+ * Copyright IBM Corp. 2007
+ * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
+ */
+
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/ptrace.h>
+#include <linux/timer.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/kallsyms.h>
+#include <linux/reboot.h>
+#include <linux/kprobes.h>
+
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/atomic.h>
+#include <asm/mathemu.h>
+#include <asm/cpcmd.h>
+#include <asm/s390_ext.h>
+#include <asm/lowcore.h>
+#include <asm/debug.h>
+#include <asm/kdebug.h>
+
+#ifndef CONFIG_64BIT
+#define ONELONG "%08lx: "
+#else /* CONFIG_64BIT */
+#define ONELONG "%016lx: "
+#endif /* CONFIG_64BIT */
+
+#define OPERAND_GPR    0x1     /* Operand printed as %rx */
+#define OPERAND_FPR    0x2     /* Operand printed as %fx */
+#define OPERAND_AR     0x4     /* Operand printed as %ax */
+#define OPERAND_CR     0x8     /* Operand printed as %cx */
+#define OPERAND_DISP   0x10    /* Operand printed as displacement */
+#define OPERAND_BASE   0x20    /* Operand printed as base register */
+#define OPERAND_INDEX  0x40    /* Operand printed as index register */
+#define OPERAND_PCREL  0x80    /* Operand printed as pc-relative symbol */
+#define OPERAND_SIGNED 0x100   /* Operand printed as signed value */
+#define OPERAND_LENGTH 0x200   /* Operand printed as length (+1) */
+
+enum {
+       UNUSED, /* Indicates the end of the operand list */
+       R_8,    /* GPR starting at position 8 */
+       R_12,   /* GPR starting at position 12 */
+       R_16,   /* GPR starting at position 16 */
+       R_20,   /* GPR starting at position 20 */
+       R_24,   /* GPR starting at position 24 */
+       R_28,   /* GPR starting at position 28 */
+       R_32,   /* GPR starting at position 32 */
+       F_8,    /* FPR starting at position 8 */
+       F_12,   /* FPR starting at position 12 */
+       F_16,   /* FPR starting at position 16 */
+       F_20,   /* FPR starting at position 16 */
+       F_24,   /* FPR starting at position 24 */
+       F_28,   /* FPR starting at position 28 */
+       F_32,   /* FPR starting at position 32 */
+       A_8,    /* Access reg. starting at position 8 */
+       A_12,   /* Access reg. starting at position 12 */
+       A_24,   /* Access reg. starting at position 24 */
+       A_28,   /* Access reg. starting at position 28 */
+       C_8,    /* Control reg. starting at position 8 */
+       C_12,   /* Control reg. starting at position 12 */
+       B_16,   /* Base register starting at position 16 */
+       B_32,   /* Base register starting at position 32 */
+       X_12,   /* Index register starting at position 12 */
+       D_20,   /* Displacement starting at position 20 */
+       D_36,   /* Displacement starting at position 36 */
+       D20_20, /* 20 bit displacement starting at 20 */
+       L4_8,   /* 4 bit length starting at position 8 */
+       L4_12,  /* 4 bit length starting at position 12 */
+       L8_8,   /* 8 bit length starting at position 8 */
+       U4_8,   /* 4 bit unsigned value starting at 8 */
+       U4_12,  /* 4 bit unsigned value starting at 12 */
+       U4_16,  /* 4 bit unsigned value starting at 16 */
+       U4_20,  /* 4 bit unsigned value starting at 20 */
+       U8_8,   /* 8 bit unsigned value starting at 8 */
+       U8_16,  /* 8 bit unsigned value starting at 16 */
+       I16_16, /* 16 bit signed value starting at 16 */
+       U16_16, /* 16 bit unsigned value starting at 16 */
+       J16_16, /* PC relative jump offset at 16 */
+       J32_16, /* PC relative long offset at 16 */
+       I32_16, /* 32 bit signed value starting at 16 */
+       U32_16, /* 32 bit unsigned value starting at 16 */
+       M_16,   /* 4 bit optional mask starting at 16 */
+       RO_28,  /* optional GPR starting at position 28 */
+};
+
+/*
+ * Enumeration of the different instruction formats.
+ * For details consult the principles of operation.
+ */
+enum {
+       INSTR_INVALID,
+       INSTR_E, INSTR_RIE_RRP, INSTR_RIL_RI, INSTR_RIL_RP, INSTR_RIL_RU,
+       INSTR_RIL_UP, INSTR_RI_RI, INSTR_RI_RP, INSTR_RI_RU, INSTR_RI_UP,
+       INSTR_RRE_00, INSTR_RRE_0R, INSTR_RRE_AA, INSTR_RRE_AR, INSTR_RRE_F0,
+       INSTR_RRE_FF, INSTR_RRE_R0, INSTR_RRE_RA, INSTR_RRE_RF, INSTR_RRE_RR,
+       INSTR_RRE_RR_OPT, INSTR_RRF_F0FF, INSTR_RRF_FUFF, INSTR_RRF_M0RR,
+       INSTR_RRF_R0RR, INSTR_RRF_RURR, INSTR_RRF_U0FF, INSTR_RRF_U0RF,
+       INSTR_RR_FF, INSTR_RR_R0, INSTR_RR_RR, INSTR_RR_U0, INSTR_RR_UR,
+       INSTR_RSE_CCRD, INSTR_RSE_RRRD, INSTR_RSE_RURD, INSTR_RSI_RRP,
+       INSTR_RSL_R0RD, INSTR_RSY_AARD, INSTR_RSY_CCRD, INSTR_RSY_RRRD,
+       INSTR_RSY_RURD, INSTR_RS_AARD, INSTR_RS_CCRD, INSTR_RS_R0RD,
+       INSTR_RS_RRRD, INSTR_RS_RURD, INSTR_RXE_FRRD, INSTR_RXE_RRRD,
+       INSTR_RXF_FRRDF, INSTR_RXY_FRRD, INSTR_RXY_RRRD, INSTR_RX_FRRD,
+       INSTR_RX_RRRD, INSTR_RX_URRD, INSTR_SIY_URD, INSTR_SI_URD,
+       INSTR_SSE_RDRD, INSTR_SSF_RRDRD, INSTR_SS_L0RDRD, INSTR_SS_LIRDRD,
+       INSTR_SS_LLRDRD, INSTR_SS_RRRDRD, INSTR_SS_RRRDRD2, INSTR_SS_RRRDRD3,
+       INSTR_S_00, INSTR_S_RD,
+};
+
+struct operand {
+       int bits;               /* The number of bits in the operand. */
+       int shift;              /* The number of bits to shift. */
+       int flags;              /* One bit syntax flags. */
+};
+
+struct insn {
+       const char name[5];
+       unsigned char opfrag;
+       unsigned char format;
+};
+
+static const struct operand operands[] =
+{
+       [UNUSED]  = { 0, 0, 0 },
+       [R_8]    = {  4,  8, OPERAND_GPR },
+       [R_12]   = {  4, 12, OPERAND_GPR },
+       [R_16]   = {  4, 16, OPERAND_GPR },
+       [R_20]   = {  4, 20, OPERAND_GPR },
+       [R_24]   = {  4, 24, OPERAND_GPR },
+       [R_28]   = {  4, 28, OPERAND_GPR },
+       [R_32]   = {  4, 32, OPERAND_GPR },
+       [F_8]    = {  4,  8, OPERAND_FPR },
+       [F_12]   = {  4, 12, OPERAND_FPR },
+       [F_16]   = {  4, 16, OPERAND_FPR },
+       [F_20]   = {  4, 16, OPERAND_FPR },
+       [F_24]   = {  4, 24, OPERAND_FPR },
+       [F_28]   = {  4, 28, OPERAND_FPR },
+       [F_32]   = {  4, 32, OPERAND_FPR },
+       [A_8]    = {  4,  8, OPERAND_AR },
+       [A_12]   = {  4, 12, OPERAND_AR },
+       [A_24]   = {  4, 24, OPERAND_AR },
+       [A_28]   = {  4, 28, OPERAND_AR },
+       [C_8]    = {  4,  8, OPERAND_CR },
+       [C_12]   = {  4, 12, OPERAND_CR },
+       [B_16]   = {  4, 16, OPERAND_BASE | OPERAND_GPR },
+       [B_32]   = {  4, 32, OPERAND_BASE | OPERAND_GPR },
+       [X_12]   = {  4, 12, OPERAND_INDEX | OPERAND_GPR },
+       [D_20]   = { 12, 20, OPERAND_DISP },
+       [D_36]   = { 12, 36, OPERAND_DISP },
+       [D20_20] = { 20, 20, OPERAND_DISP | OPERAND_SIGNED },
+       [L4_8]   = {  4,  8, OPERAND_LENGTH },
+       [L4_12]  = {  4, 12, OPERAND_LENGTH },
+       [L8_8]   = {  8,  8, OPERAND_LENGTH },
+       [U4_8]   = {  4,  8, 0 },
+       [U4_12]  = {  4, 12, 0 },
+       [U4_16]  = {  4, 16, 0 },
+       [U4_20]  = {  4, 20, 0 },
+       [U8_8]   = {  8,  8, 0 },
+       [U8_16]  = {  8, 16, 0 },
+       [I16_16] = { 16, 16, OPERAND_SIGNED },
+       [U16_16] = { 16, 16, 0 },
+       [J16_16] = { 16, 16, OPERAND_PCREL },
+       [J32_16] = { 32, 16, OPERAND_PCREL },
+       [I32_16] = { 32, 16, OPERAND_SIGNED },
+       [U32_16] = { 32, 16, 0 },
+       [M_16]   = {  4, 16, 0 },
+       [RO_28]  = {  4, 28, OPERAND_GPR }
+};
+
+static const unsigned char formats[][7] = {
+       [INSTR_E]         = { 0xff, 0,0,0,0,0,0 },             /* e.g. pr    */
+       [INSTR_RIE_RRP]   = { 0xff, R_8,R_12,J16_16,0,0,0 },   /* e.g. brxhg */
+       [INSTR_RIL_RP]    = { 0x0f, R_8,J32_16,0,0,0,0 },      /* e.g. brasl */
+       [INSTR_RIL_UP]    = { 0x0f, U4_8,J32_16,0,0,0,0 },     /* e.g. brcl  */
+       [INSTR_RIL_RI]    = { 0x0f, R_8,I32_16,0,0,0,0 },      /* e.g. afi   */
+       [INSTR_RIL_RU]    = { 0x0f, R_8,U32_16,0,0,0,0 },      /* e.g. alfi  */
+       [INSTR_RI_RI]     = { 0x0f, R_8,I16_16,0,0,0,0 },      /* e.g. ahi   */
+       [INSTR_RI_RP]     = { 0x0f, R_8,J16_16,0,0,0,0 },      /* e.g. brct  */
+       [INSTR_RI_RU]     = { 0x0f, R_8,U16_16,0,0,0,0 },      /* e.g. tml   */
+       [INSTR_RI_UP]     = { 0x0f, U4_8,J16_16,0,0,0,0 },     /* e.g. brc   */
+       [INSTR_RRE_00]    = { 0xff, 0,0,0,0,0,0 },             /* e.g. palb  */
+       [INSTR_RRE_0R]    = { 0xff, R_28,0,0,0,0,0 },          /* e.g. tb    */
+       [INSTR_RRE_AA]    = { 0xff, A_24,A_28,0,0,0,0 },       /* e.g. cpya  */
+       [INSTR_RRE_AR]    = { 0xff, A_24,R_28,0,0,0,0 },       /* e.g. sar   */
+       [INSTR_RRE_F0]    = { 0xff, F_24,0,0,0,0,0 },          /* e.g. sqer  */
+       [INSTR_RRE_FF]    = { 0xff, F_24,F_28,0,0,0,0 },       /* e.g. debr  */
+       [INSTR_RRE_R0]    = { 0xff, R_24,0,0,0,0,0 },          /* e.g. ipm   */
+       [INSTR_RRE_RA]    = { 0xff, R_24,A_28,0,0,0,0 },       /* e.g. ear   */
+       [INSTR_RRE_RF]    = { 0xff, R_24,F_28,0,0,0,0 },       /* e.g. cefbr */
+       [INSTR_RRE_RR]    = { 0xff, R_24,R_28,0,0,0,0 },       /* e.g. lura  */
+       [INSTR_RRE_RR_OPT]= { 0xff, R_24,RO_28,0,0,0,0 },      /* efpc, sfpc */
+       [INSTR_RRF_F0FF]  = { 0xff, F_16,F_24,F_28,0,0,0 },    /* e.g. madbr */
+       [INSTR_RRF_FUFF]  = { 0xff, F_24,F_16,F_28,U4_20,0,0 },/* e.g. didbr */
+       [INSTR_RRF_RURR]  = { 0xff, R_24,R_28,R_16,U4_20,0,0 },/* e.g. .insn */
+       [INSTR_RRF_R0RR]  = { 0xff, R_24,R_28,R_16,0,0,0 },    /* e.g. idte  */
+       [INSTR_RRF_U0FF]  = { 0xff, F_24,U4_16,F_28,0,0,0 },   /* e.g. fixr  */
+       [INSTR_RRF_U0RF]  = { 0xff, R_24,U4_16,F_28,0,0,0 },   /* e.g. cfebr */
+       [INSTR_RRF_M0RR]  = { 0xff, R_24,R_28,M_16,0,0,0 },    /* e.g. sske  */
+       [INSTR_RR_FF]     = { 0xff, F_8,F_12,0,0,0,0 },        /* e.g. adr   */
+       [INSTR_RR_R0]     = { 0xff, R_8, 0,0,0,0,0 },          /* e.g. spm   */
+       [INSTR_RR_RR]     = { 0xff, R_8,R_12,0,0,0,0 },        /* e.g. lr    */
+       [INSTR_RR_U0]     = { 0xff, U8_8, 0,0,0,0,0 },         /* e.g. svc   */
+       [INSTR_RR_UR]     = { 0xff, U4_8,R_12,0,0,0,0 },       /* e.g. bcr   */
+       [INSTR_RSE_RRRD]  = { 0xff, R_8,R_12,D_20,B_16,0,0 },  /* e.g. lmh   */
+       [INSTR_RSE_CCRD]  = { 0xff, C_8,C_12,D_20,B_16,0,0 },  /* e.g. lmh   */
+       [INSTR_RSE_RURD]  = { 0xff, R_8,U4_12,D_20,B_16,0,0 }, /* e.g. icmh  */
+       [INSTR_RSL_R0RD]  = { 0xff, R_8,D_20,B_16,0,0,0 },     /* e.g. tp    */
+       [INSTR_RSI_RRP]   = { 0xff, R_8,R_12,J16_16,0,0,0 },   /* e.g. brxh  */
+       [INSTR_RSY_RRRD]  = { 0xff, R_8,R_12,D20_20,B_16,0,0 },/* e.g. stmy  */
+       [INSTR_RSY_RURD]  = { 0xff, R_8,U4_12,D20_20,B_16,0,0 },
+                                                              /* e.g. icmh  */
+       [INSTR_RSY_AARD]  = { 0xff, A_8,A_12,D20_20,B_16,0,0 },/* e.g. lamy  */
+       [INSTR_RSY_CCRD]  = { 0xff, C_8,C_12,D20_20,B_16,0,0 },/* e.g. lamy  */
+       [INSTR_RS_AARD]   = { 0xff, A_8,A_12,D_20,B_16,0,0 },  /* e.g. lam   */
+       [INSTR_RS_CCRD]   = { 0xff, C_8,C_12,D_20,B_16,0,0 },  /* e.g. lctl  */
+       [INSTR_RS_R0RD]   = { 0xff, R_8,D_20,B_16,0,0,0 },     /* e.g. sll   */
+       [INSTR_RS_RRRD]   = { 0xff, R_8,R_12,D_20,B_16,0,0 },  /* e.g. cs    */
+       [INSTR_RS_RURD]   = { 0xff, R_8,U4_12,D_20,B_16,0,0 }, /* e.g. icm   */
+       [INSTR_RXE_FRRD]  = { 0xff, F_8,D_20,X_12,B_16,0,0 },  /* e.g. axbr  */
+       [INSTR_RXE_RRRD]  = { 0xff, R_8,D_20,X_12,B_16,0,0 },  /* e.g. lg    */
+       [INSTR_RXF_FRRDF] = { 0xff, F_32,F_8,D_20,X_12,B_16,0 },
+                                                              /* e.g. madb  */
+       [INSTR_RXY_RRRD]  = { 0xff, R_8,D20_20,X_12,B_16,0,0 },/* e.g. ly    */
+       [INSTR_RXY_FRRD]  = { 0xff, F_8,D20_20,X_12,B_16,0,0 },/* e.g. ley   */
+       [INSTR_RX_FRRD]   = { 0xff, F_8,D_20,X_12,B_16,0,0 },  /* e.g. ae    */
+       [INSTR_RX_RRRD]   = { 0xff, R_8,D_20,X_12,B_16,0,0 },  /* e.g. l     */
+       [INSTR_RX_URRD]   = { 0x00, U4_8,D_20,X_12,B_16,0,0 }, /* e.g. bc    */
+       [INSTR_SI_URD]    = { 0x00, D_20,B_16,U8_8,0,0,0 },    /* e.g. cli   */
+       [INSTR_SIY_URD]   = { 0xff, D20_20,B_16,U8_8,0,0,0 },  /* e.g. tmy   */
+       [INSTR_SSE_RDRD]  = { 0xff, D_20,B_16,D_36,B_32,0,0 }, /* e.g. mvsdk */
+       [INSTR_SS_L0RDRD] = { 0xff, D_20,L8_8,B_16,D_36,B_32,0 },
+                                                              /* e.g. mvc   */
+       [INSTR_SS_LIRDRD] = { 0xff, D_20,L4_8,B_16,D_36,B_32,U4_12 },
+                                                              /* e.g. srp   */
+       [INSTR_SS_LLRDRD] = { 0xff, D_20,L4_8,B_16,D_36,L4_12,B_32 },
+                                                              /* e.g. pack  */
+       [INSTR_SS_RRRDRD] = { 0xff, D_20,R_8,B_16,D_36,B_32,R_12 },
+                                                              /* e.g. mvck  */
+       [INSTR_SS_RRRDRD2]= { 0xff, R_8,D_20,B_16,R_12,D_36,B_32 },
+                                                              /* e.g. plo   */
+       [INSTR_SS_RRRDRD3]= { 0xff, R_8,R_12,D_20,B_16,D_36,B_32 },
+                                                              /* e.g. lmd   */
+       [INSTR_S_00]      = { 0xff, 0,0,0,0,0,0 },             /* e.g. hsch  */
+       [INSTR_S_RD]      = { 0xff, D_20,B_16,0,0,0,0 },       /* e.g. lpsw  */
+       [INSTR_SSF_RRDRD] = { 0x00, D_20,B_16,D_36,B_32,R_8,0 },
+                                                              /* e.g. mvcos */
+};
+
+static struct insn opcode[] = {
+#ifdef CONFIG_64BIT
+       { "lmd", 0xef, INSTR_SS_RRRDRD3 },
+#endif
+       { "spm", 0x04, INSTR_RR_R0 },
+       { "balr", 0x05, INSTR_RR_RR },
+       { "bctr", 0x06, INSTR_RR_RR },
+       { "bcr", 0x07, INSTR_RR_UR },
+       { "svc", 0x0a, INSTR_RR_U0 },
+       { "bsm", 0x0b, INSTR_RR_RR },
+       { "bassm", 0x0c, INSTR_RR_RR },
+       { "basr", 0x0d, INSTR_RR_RR },
+       { "mvcl", 0x0e, INSTR_RR_RR },
+       { "clcl", 0x0f, INSTR_RR_RR },
+       { "lpr", 0x10, INSTR_RR_RR },
+       { "lnr", 0x11, INSTR_RR_RR },
+       { "ltr", 0x12, INSTR_RR_RR },
+       { "lcr", 0x13, INSTR_RR_RR },
+       { "nr", 0x14, INSTR_RR_RR },
+       { "clr", 0x15, INSTR_RR_RR },
+       { "or", 0x16, INSTR_RR_RR },
+       { "xr", 0x17, INSTR_RR_RR },
+       { "lr", 0x18, INSTR_RR_RR },
+       { "cr", 0x19, INSTR_RR_RR },
+       { "ar", 0x1a, INSTR_RR_RR },
+       { "sr", 0x1b, INSTR_RR_RR },
+       { "mr", 0x1c, INSTR_RR_RR },
+       { "dr", 0x1d, INSTR_RR_RR },
+       { "alr", 0x1e, INSTR_RR_RR },
+       { "slr", 0x1f, INSTR_RR_RR },
+       { "lpdr", 0x20, INSTR_RR_FF },
+       { "lndr", 0x21, INSTR_RR_FF },
+       { "ltdr", 0x22, INSTR_RR_FF },
+       { "lcdr", 0x23, INSTR_RR_FF },
+       { "hdr", 0x24, INSTR_RR_FF },
+       { "ldxr", 0x25, INSTR_RR_FF },
+       { "lrdr", 0x25, INSTR_RR_FF },
+       { "mxr", 0x26, INSTR_RR_FF },
+       { "mxdr", 0x27, INSTR_RR_FF },
+       { "ldr", 0x28, INSTR_RR_FF },
+       { "cdr", 0x29, INSTR_RR_FF },
+       { "adr", 0x2a, INSTR_RR_FF },
+       { "sdr", 0x2b, INSTR_RR_FF },
+       { "mdr", 0x2c, INSTR_RR_FF },
+       { "ddr", 0x2d, INSTR_RR_FF },
+       { "awr", 0x2e, INSTR_RR_FF },
+       { "swr", 0x2f, INSTR_RR_FF },
+       { "lper", 0x30, INSTR_RR_FF },
+       { "lner", 0x31, INSTR_RR_FF },
+       { "lter", 0x32, INSTR_RR_FF },
+       { "lcer", 0x33, INSTR_RR_FF },
+       { "her", 0x34, INSTR_RR_FF },
+       { "ledr", 0x35, INSTR_RR_FF },
+       { "lrer", 0x35, INSTR_RR_FF },
+       { "axr", 0x36, INSTR_RR_FF },
+       { "sxr", 0x37, INSTR_RR_FF },
+       { "ler", 0x38, INSTR_RR_FF },
+       { "cer", 0x39, INSTR_RR_FF },
+       { "aer", 0x3a, INSTR_RR_FF },
+       { "ser", 0x3b, INSTR_RR_FF },
+       { "mder", 0x3c, INSTR_RR_FF },
+       { "mer", 0x3c, INSTR_RR_FF },
+       { "der", 0x3d, INSTR_RR_FF },
+       { "aur", 0x3e, INSTR_RR_FF },
+       { "sur", 0x3f, INSTR_RR_FF },
+       { "sth", 0x40, INSTR_RX_RRRD },
+       { "la", 0x41, INSTR_RX_RRRD },
+       { "stc", 0x42, INSTR_RX_RRRD },
+       { "ic", 0x43, INSTR_RX_RRRD },
+       { "ex", 0x44, INSTR_RX_RRRD },
+       { "bal", 0x45, INSTR_RX_RRRD },
+       { "bct", 0x46, INSTR_RX_RRRD },
+       { "bc", 0x47, INSTR_RX_URRD },
+       { "lh", 0x48, INSTR_RX_RRRD },
+       { "ch", 0x49, INSTR_RX_RRRD },
+       { "ah", 0x4a, INSTR_RX_RRRD },
+       { "sh", 0x4b, INSTR_RX_RRRD },
+       { "mh", 0x4c, INSTR_RX_RRRD },
+       { "bas", 0x4d, INSTR_RX_RRRD },
+       { "cvd", 0x4e, INSTR_RX_RRRD },
+       { "cvb", 0x4f, INSTR_RX_RRRD },
+       { "st", 0x50, INSTR_RX_RRRD },
+       { "lae", 0x51, INSTR_RX_RRRD },
+       { "n", 0x54, INSTR_RX_RRRD },
+       { "cl", 0x55, INSTR_RX_RRRD },
+       { "o", 0x56, INSTR_RX_RRRD },
+       { "x", 0x57, INSTR_RX_RRRD },
+       { "l", 0x58, INSTR_RX_RRRD },
+       { "c", 0x59, INSTR_RX_RRRD },
+       { "a", 0x5a, INSTR_RX_RRRD },
+       { "s", 0x5b, INSTR_RX_RRRD },
+       { "m", 0x5c, INSTR_RX_RRRD },
+       { "d", 0x5d, INSTR_RX_RRRD },
+       { "al", 0x5e, INSTR_RX_RRRD },
+       { "sl", 0x5f, INSTR_RX_RRRD },
+       { "std", 0x60, INSTR_RX_FRRD },
+       { "mxd", 0x67, INSTR_RX_FRRD },
+       { "ld", 0x68, INSTR_RX_FRRD },
+       { "cd", 0x69, INSTR_RX_FRRD },
+       { "ad", 0x6a, INSTR_RX_FRRD },
+       { "sd", 0x6b, INSTR_RX_FRRD },
+       { "md", 0x6c, INSTR_RX_FRRD },
+       { "dd", 0x6d, INSTR_RX_FRRD },
+       { "aw", 0x6e, INSTR_RX_FRRD },
+       { "sw", 0x6f, INSTR_RX_FRRD },
+       { "ste", 0x70, INSTR_RX_FRRD },
+       { "ms", 0x71, INSTR_RX_RRRD },
+       { "le", 0x78, INSTR_RX_FRRD },
+       { "ce", 0x79, INSTR_RX_FRRD },
+       { "ae", 0x7a, INSTR_RX_FRRD },
+       { "se", 0x7b, INSTR_RX_FRRD },
+       { "mde", 0x7c, INSTR_RX_FRRD },
+       { "me", 0x7c, INSTR_RX_FRRD },
+       { "de", 0x7d, INSTR_RX_FRRD },
+       { "au", 0x7e, INSTR_RX_FRRD },
+       { "su", 0x7f, INSTR_RX_FRRD },
+       { "ssm", 0x80, INSTR_S_RD },
+       { "lpsw", 0x82, INSTR_S_RD },
+       { "diag", 0x83, INSTR_RS_RRRD },
+       { "brxh", 0x84, INSTR_RSI_RRP },
+       { "brxle", 0x85, INSTR_RSI_RRP },
+       { "bxh", 0x86, INSTR_RS_RRRD },
+       { "bxle", 0x87, INSTR_RS_RRRD },
+       { "srl", 0x88, INSTR_RS_R0RD },
+       { "sll", 0x89, INSTR_RS_R0RD },
+       { "sra", 0x8a, INSTR_RS_R0RD },
+       { "sla", 0x8b, INSTR_RS_R0RD },
+       { "srdl", 0x8c, INSTR_RS_R0RD },
+       { "sldl", 0x8d, INSTR_RS_R0RD },
+       { "srda", 0x8e, INSTR_RS_R0RD },
+       { "slda", 0x8f, INSTR_RS_R0RD },
+       { "stm", 0x90, INSTR_RS_RRRD },
+       { "tm", 0x91, INSTR_SI_URD },
+       { "mvi", 0x92, INSTR_SI_URD },
+       { "ts", 0x93, INSTR_S_RD },
+       { "ni", 0x94, INSTR_SI_URD },
+       { "cli", 0x95, INSTR_SI_URD },
+       { "oi", 0x96, INSTR_SI_URD },
+       { "xi", 0x97, INSTR_SI_URD },
+       { "lm", 0x98, INSTR_RS_RRRD },
+       { "trace", 0x99, INSTR_RS_RRRD },
+       { "lam", 0x9a, INSTR_RS_AARD },
+       { "stam", 0x9b, INSTR_RS_AARD },
+       { "mvcle", 0xa8, INSTR_RS_RRRD },
+       { "clcle", 0xa9, INSTR_RS_RRRD },
+       { "stnsm", 0xac, INSTR_SI_URD },
+       { "stosm", 0xad, INSTR_SI_URD },
+       { "sigp", 0xae, INSTR_RS_RRRD },
+       { "mc", 0xaf, INSTR_SI_URD },
+       { "lra", 0xb1, INSTR_RX_RRRD },
+       { "stctl", 0xb6, INSTR_RS_CCRD },
+       { "lctl", 0xb7, INSTR_RS_CCRD },
+       { "cs", 0xba, INSTR_RS_RRRD },
+       { "cds", 0xbb, INSTR_RS_RRRD },
+       { "clm", 0xbd, INSTR_RS_RURD },
+       { "stcm", 0xbe, INSTR_RS_RURD },
+       { "icm", 0xbf, INSTR_RS_RURD },
+       { "mvn", 0xd1, INSTR_SS_L0RDRD },
+       { "mvc", 0xd2, INSTR_SS_L0RDRD },
+       { "mvz", 0xd3, INSTR_SS_L0RDRD },
+       { "nc", 0xd4, INSTR_SS_L0RDRD },
+       { "clc", 0xd5, INSTR_SS_L0RDRD },
+       { "oc", 0xd6, INSTR_SS_L0RDRD },
+       { "xc", 0xd7, INSTR_SS_L0RDRD },
+       { "mvck", 0xd9, INSTR_SS_RRRDRD },
+       { "mvcp", 0xda, INSTR_SS_RRRDRD },
+       { "mvcs", 0xdb, INSTR_SS_RRRDRD },
+       { "tr", 0xdc, INSTR_SS_L0RDRD },
+       { "trt", 0xdd, INSTR_SS_L0RDRD },
+       { "ed", 0xde, INSTR_SS_L0RDRD },
+       { "edmk", 0xdf, INSTR_SS_L0RDRD },
+       { "pku", 0xe1, INSTR_SS_L0RDRD },
+       { "unpku", 0xe2, INSTR_SS_L0RDRD },
+       { "mvcin", 0xe8, INSTR_SS_L0RDRD },
+       { "pka", 0xe9, INSTR_SS_L0RDRD },
+       { "unpka", 0xea, INSTR_SS_L0RDRD },
+       { "plo", 0xee, INSTR_SS_RRRDRD2 },
+       { "srp", 0xf0, INSTR_SS_LIRDRD },
+       { "mvo", 0xf1, INSTR_SS_LLRDRD },
+       { "pack", 0xf2, INSTR_SS_LLRDRD },
+       { "unpk", 0xf3, INSTR_SS_LLRDRD },
+       { "zap", 0xf8, INSTR_SS_LLRDRD },
+       { "cp", 0xf9, INSTR_SS_LLRDRD },
+       { "ap", 0xfa, INSTR_SS_LLRDRD },
+       { "sp", 0xfb, INSTR_SS_LLRDRD },
+       { "mp", 0xfc, INSTR_SS_LLRDRD },
+       { "dp", 0xfd, INSTR_SS_LLRDRD },
+       { "", 0, INSTR_INVALID }
+};
+
+static struct insn opcode_01[] = {
+#ifdef CONFIG_64BIT
+       { "sam64", 0x0e, INSTR_E },
+#endif
+       { "pr", 0x01, INSTR_E },
+       { "upt", 0x02, INSTR_E },
+       { "sckpf", 0x07, INSTR_E },
+       { "tam", 0x0b, INSTR_E },
+       { "sam24", 0x0c, INSTR_E },
+       { "sam31", 0x0d, INSTR_E },
+       { "trap2", 0xff, INSTR_E },
+       { "", 0, INSTR_INVALID }
+};
+
+static struct insn opcode_a5[] = {
+#ifdef CONFIG_64BIT
+       { "iihh", 0x00, INSTR_RI_RU },
+       { "iihl", 0x01, INSTR_RI_RU },
+       { "iilh", 0x02, INSTR_RI_RU },
+       { "iill", 0x03, INSTR_RI_RU },
+       { "nihh", 0x04, INSTR_RI_RU },
+       { "nihl", 0x05, INSTR_RI_RU },
+       { "nilh", 0x06, INSTR_RI_RU },
+       { "nill", 0x07, INSTR_RI_RU },
+       { "oihh", 0x08, INSTR_RI_RU },
+       { "oihl", 0x09, INSTR_RI_RU },
+       { "oilh", 0x0a, INSTR_RI_RU },
+       { "oill", 0x0b, INSTR_RI_RU },
+       { "llihh", 0x0c, INSTR_RI_RU },
+       { "llihl", 0x0d, INSTR_RI_RU },
+       { "llilh", 0x0e, INSTR_RI_RU },
+       { "llill", 0x0f, INSTR_RI_RU },
+#endif
+       { "", 0, INSTR_INVALID }
+};
+
+static struct insn opcode_a7[] = {
+#ifdef CONFIG_64BIT
+       { "tmhh", 0x02, INSTR_RI_RU },
+       { "tmhl", 0x03, INSTR_RI_RU },
+       { "brctg", 0x07, INSTR_RI_RP },
+       { "lghi", 0x09, INSTR_RI_RI },
+       { "aghi", 0x0b, INSTR_RI_RI },
+       { "mghi", 0x0d, INSTR_RI_RI },
+       { "cghi", 0x0f, INSTR_RI_RI },
+#endif
+       { "tmlh", 0x00, INSTR_RI_RU },
+       { "tmll", 0x01, INSTR_RI_RU },
+       { "brc", 0x04, INSTR_RI_UP },
+       { "bras", 0x05, INSTR_RI_RP },
+       { "brct", 0x06, INSTR_RI_RP },
+       { "lhi", 0x08, INSTR_RI_RI },
+       { "ahi", 0x0a, INSTR_RI_RI },
+       { "mhi", 0x0c, INSTR_RI_RI },
+       { "chi", 0x0e, INSTR_RI_RI },
+       { "", 0, INSTR_INVALID }
+};
+
+static struct insn opcode_b2[] = {
+#ifdef CONFIG_64BIT
+       { "sske", 0x2b, INSTR_RRF_M0RR },
+       { "stckf", 0x7c, INSTR_S_RD },
+       { "cu21", 0xa6, INSTR_RRF_M0RR },
+       { "cuutf", 0xa6, INSTR_RRF_M0RR },
+       { "cu12", 0xa7, INSTR_RRF_M0RR },
+       { "cutfu", 0xa7, INSTR_RRF_M0RR },
+       { "stfle", 0xb0, INSTR_S_RD },
+       { "lpswe", 0xb2, INSTR_S_RD },
+#endif
+       { "stidp", 0x02, INSTR_S_RD },
+       { "sck", 0x04, INSTR_S_RD },
+       { "stck", 0x05, INSTR_S_RD },
+       { "sckc", 0x06, INSTR_S_RD },
+       { "stckc", 0x07, INSTR_S_RD },
+       { "spt", 0x08, INSTR_S_RD },
+       { "stpt", 0x09, INSTR_S_RD },
+       { "spka", 0x0a, INSTR_S_RD },
+       { "ipk", 0x0b, INSTR_S_00 },
+       { "ptlb", 0x0d, INSTR_S_00 },
+       { "spx", 0x10, INSTR_S_RD },
+       { "stpx", 0x11, INSTR_S_RD },
+       { "stap", 0x12, INSTR_S_RD },
+       { "sie", 0x14, INSTR_S_RD },
+       { "pc", 0x18, INSTR_S_RD },
+       { "sac", 0x19, INSTR_S_RD },
+       { "cfc", 0x1a, INSTR_S_RD },
+       { "ipte", 0x21, INSTR_RRE_RR },
+       { "ipm", 0x22, INSTR_RRE_R0 },
+       { "ivsk", 0x23, INSTR_RRE_RR },
+       { "iac", 0x24, INSTR_RRE_R0 },
+       { "ssar", 0x25, INSTR_RRE_R0 },
+       { "epar", 0x26, INSTR_RRE_R0 },
+       { "esar", 0x27, INSTR_RRE_R0 },
+       { "pt", 0x28, INSTR_RRE_RR },
+       { "iske", 0x29, INSTR_RRE_RR },
+       { "rrbe", 0x2a, INSTR_RRE_RR },
+       { "sske", 0x2b, INSTR_RRE_RR },
+       { "tb", 0x2c, INSTR_RRE_0R },
+       { "dxr", 0x2d, INSTR_RRE_F0 },
+       { "pgin", 0x2e, INSTR_RRE_RR },
+       { "pgout", 0x2f, INSTR_RRE_RR },
+       { "csch", 0x30, INSTR_S_00 },
+       { "hsch", 0x31, INSTR_S_00 },
+       { "msch", 0x32, INSTR_S_RD },
+       { "ssch", 0x33, INSTR_S_RD },
+       { "stsch", 0x34, INSTR_S_RD },
+       { "tsch", 0x35, INSTR_S_RD },
+       { "tpi", 0x36, INSTR_S_RD },
+       { "sal", 0x37, INSTR_S_00 },
+       { "rsch", 0x38, INSTR_S_00 },
+       { "stcrw", 0x39, INSTR_S_RD },
+       { "stcps", 0x3a, INSTR_S_RD },
+       { "rchp", 0x3b, INSTR_S_00 },
+       { "schm", 0x3c, INSTR_S_00 },
+       { "bakr", 0x40, INSTR_RRE_RR },
+       { "cksm", 0x41, INSTR_RRE_RR },
+       { "sqdr", 0x44, INSTR_RRE_F0 },
+       { "sqer", 0x45, INSTR_RRE_F0 },
+       { "stura", 0x46, INSTR_RRE_RR },
+       { "msta", 0x47, INSTR_RRE_R0 },
+       { "palb", 0x48, INSTR_RRE_00 },
+       { "ereg", 0x49, INSTR_RRE_RR },
+       { "esta", 0x4a, INSTR_RRE_RR },
+       { "lura", 0x4b, INSTR_RRE_RR },
+       { "tar", 0x4c, INSTR_RRE_AR },
+       { "cpya", INSTR_RRE_AA },
+       { "sar", 0x4e, INSTR_RRE_AR },
+       { "ear", 0x4f, INSTR_RRE_RA },
+       { "csp", 0x50, INSTR_RRE_RR },
+       { "msr", 0x52, INSTR_RRE_RR },
+       { "mvpg", 0x54, INSTR_RRE_RR },
+       { "mvst", 0x55, INSTR_RRE_RR },
+       { "cuse", 0x57, INSTR_RRE_RR },
+       { "bsg", 0x58, INSTR_RRE_RR },
+       { "bsa", 0x5a, INSTR_RRE_RR },
+       { "clst", 0x5d, INSTR_RRE_RR },
+       { "srst", 0x5e, INSTR_RRE_RR },
+       { "cmpsc", 0x63, INSTR_RRE_RR },
+       { "cmpsc", 0x63, INSTR_RRE_RR },
+       { "siga", 0x74, INSTR_S_RD },
+       { "xsch", 0x76, INSTR_S_00 },
+       { "rp", 0x77, INSTR_S_RD },
+       { "stcke", 0x78, INSTR_S_RD },
+       { "sacf", 0x79, INSTR_S_RD },
+       { "stsi", 0x7d, INSTR_S_RD },
+       { "srnm", 0x99, INSTR_S_RD },
+       { "stfpc", 0x9c, INSTR_S_RD },
+       { "lfpc", 0x9d, INSTR_S_RD },
+       { "tre", 0xa5, INSTR_RRE_RR },
+       { "cuutf", 0xa6, INSTR_RRE_RR },
+       { "cutfu", 0xa7, INSTR_RRE_RR },
+       { "stfl", 0xb1, INSTR_S_RD },
+       { "trap4", 0xff, INSTR_S_RD },
+       { "", 0, INSTR_INVALID }
+};
+
+static struct insn opcode_b3[] = {
+#ifdef CONFIG_64BIT
+       { "maylr", 0x38, INSTR_RRF_F0FF },
+       { "mylr", 0x39, INSTR_RRF_F0FF },
+       { "mayr", 0x3a, INSTR_RRF_F0FF },
+       { "myr", 0x3b, INSTR_RRF_F0FF },
+       { "mayhr", 0x3c, INSTR_RRF_F0FF },
+       { "myhr", 0x3d, INSTR_RRF_F0FF },
+       { "cegbr", 0xa4, INSTR_RRE_RR },
+       { "cdgbr", 0xa5, INSTR_RRE_RR },
+       { "cxgbr", 0xa6, INSTR_RRE_RR },
+       { "cgebr", 0xa8, INSTR_RRF_U0RF },
+       { "cgdbr", 0xa9, INSTR_RRF_U0RF },
+       { "cgxbr", 0xaa, INSTR_RRF_U0RF },
+       { "cfer", 0xb8, INSTR_RRF_U0RF },
+       { "cfdr", 0xb9, INSTR_RRF_U0RF },
+       { "cfxr", 0xba, INSTR_RRF_U0RF },
+       { "cegr", 0xc4, INSTR_RRE_RR },
+       { "cdgr", 0xc5, INSTR_RRE_RR },
+       { "cxgr", 0xc6, INSTR_RRE_RR },
+       { "cger", 0xc8, INSTR_RRF_U0RF },
+       { "cgdr", 0xc9, INSTR_RRF_U0RF },
+       { "cgxr", 0xca, INSTR_RRF_U0RF },
+#endif
+       { "lpebr", 0x00, INSTR_RRE_FF },
+       { "lnebr", 0x01, INSTR_RRE_FF },
+       { "ltebr", 0x02, INSTR_RRE_FF },
+       { "lcebr", 0x03, INSTR_RRE_FF },
+       { "ldebr", 0x04, INSTR_RRE_FF },
+       { "lxdbr", 0x05, INSTR_RRE_FF },
+       { "lxebr", 0x06, INSTR_RRE_FF },
+       { "mxdbr", 0x07, INSTR_RRE_FF },
+       { "kebr", 0x08, INSTR_RRE_FF },
+       { "cebr", 0x09, INSTR_RRE_FF },
+       { "aebr", 0x0a, INSTR_RRE_FF },
+       { "sebr", 0x0b, INSTR_RRE_FF },
+       { "mdebr", 0x0c, INSTR_RRE_FF },
+       { "debr", 0x0d, INSTR_RRE_FF },
+       { "maebr", 0x0e, INSTR_RRF_F0FF },
+       { "msebr", 0x0f, INSTR_RRF_F0FF },
+       { "lpdbr", 0x10, INSTR_RRE_FF },
+       { "lndbr", 0x11, INSTR_RRE_FF },
+       { "ltdbr", 0x12, INSTR_RRE_FF },
+       { "lcdbr", 0x13, INSTR_RRE_FF },
+       { "sqebr", 0x14, INSTR_RRE_FF },
+       { "sqdbr", 0x15, INSTR_RRE_FF },
+       { "sqxbr", 0x16, INSTR_RRE_FF },
+       { "meebr", 0x17, INSTR_RRE_FF },
+       { "kdbr", 0x18, INSTR_RRE_FF },
+       { "cdbr", 0x19, INSTR_RRE_FF },
+       { "adbr", 0x1a, INSTR_RRE_FF },
+       { "sdbr", 0x1b, INSTR_RRE_FF },
+       { "mdbr", 0x1c, INSTR_RRE_FF },
+       { "ddbr", 0x1d, INSTR_RRE_FF },
+       { "madbr", 0x1e, INSTR_RRF_F0FF },
+       { "msdbr", 0x1f, INSTR_RRF_F0FF },
+       { "lder", 0x24, INSTR_RRE_FF },
+       { "lxdr", 0x25, INSTR_RRE_FF },
+       { "lxer", 0x26, INSTR_RRE_FF },
+       { "maer", 0x2e, INSTR_RRF_F0FF },
+       { "mser", 0x2f, INSTR_RRF_F0FF },
+       { "sqxr", 0x36, INSTR_RRE_FF },
+       { "meer", 0x37, INSTR_RRE_FF },
+       { "madr", 0x3e, INSTR_RRF_F0FF },
+       { "msdr", 0x3f, INSTR_RRF_F0FF },
+       { "lpxbr", 0x40, INSTR_RRE_FF },
+       { "lnxbr", 0x41, INSTR_RRE_FF },
+       { "ltxbr", 0x42, INSTR_RRE_FF },
+       { "lcxbr", 0x43, INSTR_RRE_FF },
+       { "ledbr", 0x44, INSTR_RRE_FF },
+       { "ldxbr", 0x45, INSTR_RRE_FF },
+       { "lexbr", 0x46, INSTR_RRE_FF },
+       { "fixbr", 0x47, INSTR_RRF_U0FF },
+       { "kxbr", 0x48, INSTR_RRE_FF },
+       { "cxbr", 0x49, INSTR_RRE_FF },
+       { "axbr", 0x4a, INSTR_RRE_FF },
+       { "sxbr", 0x4b, INSTR_RRE_FF },
+       { "mxbr", 0x4c, INSTR_RRE_FF },
+       { "dxbr", 0x4d, INSTR_RRE_FF },
+       { "tbedr", 0x50, INSTR_RRF_U0FF },
+       { "tbdr", 0x51, INSTR_RRF_U0FF },
+       { "diebr", 0x53, INSTR_RRF_FUFF },
+       { "fiebr", 0x57, INSTR_RRF_U0FF },
+       { "thder", 0x58, INSTR_RRE_RR },
+       { "thdr", 0x59, INSTR_RRE_RR },
+       { "didbr", 0x5b, INSTR_RRF_FUFF },
+       { "fidbr", 0x5f, INSTR_RRF_U0FF },
+       { "lpxr", 0x60, INSTR_RRE_FF },
+       { "lnxr", 0x61, INSTR_RRE_FF },
+       { "ltxr", 0x62, INSTR_RRE_FF },
+       { "lcxr", 0x63, INSTR_RRE_FF },
+       { "lxr", 0x65, INSTR_RRE_RR },
+       { "lexr", 0x66, INSTR_RRE_FF },
+       { "fixr", 0x67, INSTR_RRF_U0FF },
+       { "cxr", 0x69, INSTR_RRE_FF },
+       { "lzer", 0x74, INSTR_RRE_R0 },
+       { "lzdr", 0x75, INSTR_RRE_R0 },
+       { "lzxr", 0x76, INSTR_RRE_R0 },
+       { "fier", 0x77, INSTR_RRF_U0FF },
+       { "fidr", 0x7f, INSTR_RRF_U0FF },
+       { "sfpc", 0x84, INSTR_RRE_RR_OPT },
+       { "efpc", 0x8c, INSTR_RRE_RR_OPT },
+       { "cefbr", 0x94, INSTR_RRE_RF },
+       { "cdfbr", 0x95, INSTR_RRE_RF },
+       { "cxfbr", 0x96, INSTR_RRE_RF },
+       { "cfebr", 0x98, INSTR_RRF_U0RF },
+       { "cfdbr", 0x99, INSTR_RRF_U0RF },
+       { "cfxbr", 0x9a, INSTR_RRF_U0RF },
+       { "cefr", 0xb4, INSTR_RRE_RF },
+       { "cdfr", 0xb5, INSTR_RRE_RF },
+       { "cxfr", 0xb6, INSTR_RRE_RF },
+       { "", 0, INSTR_INVALID }
+};
+
+static struct insn opcode_b9[] = {
+#ifdef CONFIG_64BIT
+       { "lpgr", 0x00, INSTR_RRE_RR },
+       { "lngr", 0x01, INSTR_RRE_RR },
+       { "ltgr", 0x02, INSTR_RRE_RR },
+       { "lcgr", 0x03, INSTR_RRE_RR },
+       { "lgr", 0x04, INSTR_RRE_RR },
+       { "lurag", 0x05, INSTR_RRE_RR },
+       { "lgbr", 0x06, INSTR_RRE_RR },
+       { "lghr", 0x07, INSTR_RRE_RR },
+       { "agr", 0x08, INSTR_RRE_RR },
+       { "sgr", 0x09, INSTR_RRE_RR },
+       { "algr", 0x0a, INSTR_RRE_RR },
+       { "slgr", 0x0b, INSTR_RRE_RR },
+       { "msgr", 0x0c, INSTR_RRE_RR },
+       { "dsgr", 0x0d, INSTR_RRE_RR },
+       { "eregg", 0x0e, INSTR_RRE_RR },
+       { "lrvgr", 0x0f, INSTR_RRE_RR },
+       { "lpgfr", 0x10, INSTR_RRE_RR },
+       { "lngfr", 0x11, INSTR_RRE_RR },
+       { "ltgfr", 0x12, INSTR_RRE_RR },
+       { "lcgfr", 0x13, INSTR_RRE_RR },
+       { "lgfr", 0x14, INSTR_RRE_RR },
+       { "llgfr", 0x16, INSTR_RRE_RR },
+       { "llgtr", 0x17, INSTR_RRE_RR },
+       { "agfr", 0x18, INSTR_RRE_RR },
+       { "sgfr", 0x19, INSTR_RRE_RR },
+       { "algfr", 0x1a, INSTR_RRE_RR },
+       { "slgfr", 0x1b, INSTR_RRE_RR },
+       { "msgfr", 0x1c, INSTR_RRE_RR },
+       { "dsgfr", 0x1d, INSTR_RRE_RR },
+       { "cgr", 0x20, INSTR_RRE_RR },
+       { "clgr", 0x21, INSTR_RRE_RR },
+       { "sturg", 0x25, INSTR_RRE_RR },
+       { "lbr", 0x26, INSTR_RRE_RR },
+       { "lhr", 0x27, INSTR_RRE_RR },
+       { "cgfr", 0x30, INSTR_RRE_RR },
+       { "clgfr", 0x31, INSTR_RRE_RR },
+       { "bctgr", 0x46, INSTR_RRE_RR },
+       { "ngr", 0x80, INSTR_RRE_RR },
+       { "ogr", 0x81, INSTR_RRE_RR },
+       { "xgr", 0x82, INSTR_RRE_RR },
+       { "flogr", 0x83, INSTR_RRE_RR },
+       { "llgcr", 0x84, INSTR_RRE_RR },
+       { "llghr", 0x85, INSTR_RRE_RR },
+       { "mlgr", 0x86, INSTR_RRE_RR },
+       { "dlgr", 0x87, INSTR_RRE_RR },
+       { "alcgr", 0x88, INSTR_RRE_RR },
+       { "slbgr", 0x89, INSTR_RRE_RR },
+       { "cspg", 0x8a, INSTR_RRE_RR },
+       { "idte", 0x8e, INSTR_RRF_R0RR },
+       { "llcr", 0x94, INSTR_RRE_RR },
+       { "llhr", 0x95, INSTR_RRE_RR },
+       { "esea", 0x9d, INSTR_RRE_R0 },
+       { "lptea", 0xaa, INSTR_RRF_RURR },
+       { "cu14", 0xb0, INSTR_RRF_M0RR },
+       { "cu24", 0xb1, INSTR_RRF_M0RR },
+       { "cu41", 0xb2, INSTR_RRF_M0RR },
+       { "cu42", 0xb3, INSTR_RRF_M0RR },
+#endif
+       { "kmac", 0x1e, INSTR_RRE_RR },
+       { "lrvr", 0x1f, INSTR_RRE_RR },
+       { "km", 0x2e, INSTR_RRE_RR },
+       { "kmc", 0x2f, INSTR_RRE_RR },
+       { "kimd", 0x3e, INSTR_RRE_RR },
+       { "klmd", 0x3f, INSTR_RRE_RR },
+       { "epsw", 0x8d, INSTR_RRE_RR },
+       { "trtt", 0x90, INSTR_RRE_RR },
+       { "trtt", 0x90, INSTR_RRF_M0RR },
+       { "trto", 0x91, INSTR_RRE_RR },
+       { "trto", 0x91, INSTR_RRF_M0RR },
+       { "trot", 0x92, INSTR_RRE_RR },
+       { "trot", 0x92, INSTR_RRF_M0RR },
+       { "troo", 0x93, INSTR_RRE_RR },
+       { "troo", 0x93, INSTR_RRF_M0RR },
+       { "mlr", 0x96, INSTR_RRE_RR },
+       { "dlr", 0x97, INSTR_RRE_RR },
+       { "alcr", 0x98, INSTR_RRE_RR },
+       { "slbr", 0x99, INSTR_RRE_RR },
+       { "", 0, INSTR_INVALID }
+};
+
+static struct insn opcode_c0[] = {
+#ifdef CONFIG_64BIT
+       { "lgfi", 0x01, INSTR_RIL_RI },
+       { "xihf", 0x06, INSTR_RIL_RU },
+       { "xilf", 0x07, INSTR_RIL_RU },
+       { "iihf", 0x08, INSTR_RIL_RU },
+       { "iilf", 0x09, INSTR_RIL_RU },
+       { "nihf", 0x0a, INSTR_RIL_RU },
+       { "nilf", 0x0b, INSTR_RIL_RU },
+       { "oihf", 0x0c, INSTR_RIL_RU },
+       { "oilf", 0x0d, INSTR_RIL_RU },
+       { "llihf", 0x0e, INSTR_RIL_RU },
+       { "llilf", 0x0f, INSTR_RIL_RU },
+#endif
+       { "larl", 0x00, INSTR_RIL_RP },
+       { "brcl", 0x04, INSTR_RIL_UP },
+       { "brasl", 0x05, INSTR_RIL_RP },
+       { "", 0, INSTR_INVALID }
+};
+
+static struct insn opcode_c2[] = {
+#ifdef CONFIG_64BIT
+       { "slgfi", 0x04, INSTR_RIL_RU },
+       { "slfi", 0x05, INSTR_RIL_RU },
+       { "agfi", 0x08, INSTR_RIL_RI },
+       { "afi", 0x09, INSTR_RIL_RI },
+       { "algfi", 0x0a, INSTR_RIL_RU },
+       { "alfi", 0x0b, INSTR_RIL_RU },
+       { "cgfi", 0x0c, INSTR_RIL_RI },
+       { "cfi", 0x0d, INSTR_RIL_RI },
+       { "clgfi", 0x0e, INSTR_RIL_RU },
+       { "clfi", 0x0f, INSTR_RIL_RU },
+#endif
+       { "", 0, INSTR_INVALID }
+};
+
+static struct insn opcode_c8[] = {
+#ifdef CONFIG_64BIT
+       { "mvcos", 0x00, INSTR_SSF_RRDRD },
+#endif
+       { "", 0, INSTR_INVALID }
+};
+
+static struct insn opcode_e3[] = {
+#ifdef CONFIG_64BIT
+       { "ltg", 0x02, INSTR_RXY_RRRD },
+       { "lrag", 0x03, INSTR_RXY_RRRD },
+       { "lg", 0x04, INSTR_RXY_RRRD },
+       { "cvby", 0x06, INSTR_RXY_RRRD },
+       { "ag", 0x08, INSTR_RXY_RRRD },
+       { "sg", 0x09, INSTR_RXY_RRRD },
+       { "alg", 0x0a, INSTR_RXY_RRRD },
+       { "slg", 0x0b, INSTR_RXY_RRRD },
+       { "msg", 0x0c, INSTR_RXY_RRRD },
+       { "dsg", 0x0d, INSTR_RXY_RRRD },
+       { "cvbg", 0x0e, INSTR_RXY_RRRD },
+       { "lrvg", 0x0f, INSTR_RXY_RRRD },
+       { "lt", 0x12, INSTR_RXY_RRRD },
+       { "lray", 0x13, INSTR_RXY_RRRD },
+       { "lgf", 0x14, INSTR_RXY_RRRD },
+       { "lgh", 0x15, INSTR_RXY_RRRD },
+       { "llgf", 0x16, INSTR_RXY_RRRD },
+       { "llgt", 0x17, INSTR_RXY_RRRD },
+       { "agf", 0x18, INSTR_RXY_RRRD },
+       { "sgf", 0x19, INSTR_RXY_RRRD },
+       { "algf", 0x1a, INSTR_RXY_RRRD },
+       { "slgf", 0x1b, INSTR_RXY_RRRD },
+       { "msgf", 0x1c, INSTR_RXY_RRRD },
+       { "dsgf", 0x1d, INSTR_RXY_RRRD },
+       { "cg", 0x20, INSTR_RXY_RRRD },
+       { "clg", 0x21, INSTR_RXY_RRRD },
+       { "stg", 0x24, INSTR_RXY_RRRD },
+       { "cvdy", 0x26, INSTR_RXY_RRRD },
+       { "cvdg", 0x2e, INSTR_RXY_RRRD },
+       { "strvg", 0x2f, INSTR_RXY_RRRD },
+       { "cgf", 0x30, INSTR_RXY_RRRD },
+       { "clgf", 0x31, INSTR_RXY_RRRD },
+       { "strvh", 0x3f, INSTR_RXY_RRRD },
+       { "bctg", 0x46, INSTR_RXY_RRRD },
+       { "sty", 0x50, INSTR_RXY_RRRD },
+       { "msy", 0x51, INSTR_RXY_RRRD },
+       { "ny", 0x54, INSTR_RXY_RRRD },
+       { "cly", 0x55, INSTR_RXY_RRRD },
+       { "oy", 0x56, INSTR_RXY_RRRD },
+       { "xy", 0x57, INSTR_RXY_RRRD },
+       { "ly", 0x58, INSTR_RXY_RRRD },
+       { "cy", 0x59, INSTR_RXY_RRRD },
+       { "ay", 0x5a, INSTR_RXY_RRRD },
+       { "sy", 0x5b, INSTR_RXY_RRRD },
+       { "aly", 0x5e, INSTR_RXY_RRRD },
+       { "sly", 0x5f, INSTR_RXY_RRRD },
+       { "sthy", 0x70, INSTR_RXY_RRRD },
+       { "lay", 0x71, INSTR_RXY_RRRD },
+       { "stcy", 0x72, INSTR_RXY_RRRD },
+       { "icy", 0x73, INSTR_RXY_RRRD },
+       { "lb", 0x76, INSTR_RXY_RRRD },
+       { "lgb", 0x77, INSTR_RXY_RRRD },
+       { "lhy", 0x78, INSTR_RXY_RRRD },
+       { "chy", 0x79, INSTR_RXY_RRRD },
+       { "ahy", 0x7a, INSTR_RXY_RRRD },
+       { "shy", 0x7b, INSTR_RXY_RRRD },
+       { "ng", 0x80, INSTR_RXY_RRRD },
+       { "og", 0x81, INSTR_RXY_RRRD },
+       { "xg", 0x82, INSTR_RXY_RRRD },
+       { "mlg", 0x86, INSTR_RXY_RRRD },
+       { "dlg", 0x87, INSTR_RXY_RRRD },
+       { "alcg", 0x88, INSTR_RXY_RRRD },
+       { "slbg", 0x89, INSTR_RXY_RRRD },
+       { "stpq", 0x8e, INSTR_RXY_RRRD },
+       { "lpq", 0x8f, INSTR_RXY_RRRD },
+       { "llgc", 0x90, INSTR_RXY_RRRD },
+       { "llgh", 0x91, INSTR_RXY_RRRD },
+       { "llc", 0x94, INSTR_RXY_RRRD },
+       { "llh", 0x95, INSTR_RXY_RRRD },
+#endif
+       { "lrv", 0x1e, INSTR_RXY_RRRD },
+       { "lrvh", 0x1f, INSTR_RXY_RRRD },
+       { "strv", 0x3e, INSTR_RXY_RRRD },
+       { "ml", 0x96, INSTR_RXY_RRRD },
+       { "dl", 0x97, INSTR_RXY_RRRD },
+       { "alc", 0x98, INSTR_RXY_RRRD },
+       { "slb", 0x99, INSTR_RXY_RRRD },
+       { "", 0, INSTR_INVALID }
+};
+
+static struct insn opcode_e5[] = {
+#ifdef CONFIG_64BIT
+       { "strag", 0x02, INSTR_SSE_RDRD },
+#endif
+       { "lasp", 0x00, INSTR_SSE_RDRD },
+       { "tprot", 0x01, INSTR_SSE_RDRD },
+       { "mvcsk", 0x0e, INSTR_SSE_RDRD },
+       { "mvcdk", 0x0f, INSTR_SSE_RDRD },
+       { "", 0, INSTR_INVALID }
+};
+
+static struct insn opcode_eb[] = {
+#ifdef CONFIG_64BIT
+       { "lmg", 0x04, INSTR_RSY_RRRD },
+       { "srag", 0x0a, INSTR_RSY_RRRD },
+       { "slag", 0x0b, INSTR_RSY_RRRD },
+       { "srlg", 0x0c, INSTR_RSY_RRRD },
+       { "sllg", 0x0d, INSTR_RSY_RRRD },
+       { "tracg", 0x0f, INSTR_RSY_RRRD },
+       { "csy", 0x14, INSTR_RSY_RRRD },
+       { "rllg", 0x1c, INSTR_RSY_RRRD },
+       { "clmh", 0x20, INSTR_RSY_RURD },
+       { "clmy", 0x21, INSTR_RSY_RURD },
+       { "stmg", 0x24, INSTR_RSY_RRRD },
+       { "stctg", 0x25, INSTR_RSY_CCRD },
+       { "stmh", 0x26, INSTR_RSY_RRRD },
+       { "stcmh", 0x2c, INSTR_RSY_RURD },
+       { "stcmy", 0x2d, INSTR_RSY_RURD },
+       { "lctlg", 0x2f, INSTR_RSY_CCRD },
+       { "csg", 0x30, INSTR_RSY_RRRD },
+       { "cdsy", 0x31, INSTR_RSY_RRRD },
+       { "cdsg", 0x3e, INSTR_RSY_RRRD },
+       { "bxhg", 0x44, INSTR_RSY_RRRD },
+       { "bxleg", 0x45, INSTR_RSY_RRRD },
+       { "tmy", 0x51, INSTR_SIY_URD },
+       { "mviy", 0x52, INSTR_SIY_URD },
+       { "niy", 0x54, INSTR_SIY_URD },
+       { "cliy", 0x55, INSTR_SIY_URD },
+       { "oiy", 0x56, INSTR_SIY_URD },
+       { "xiy", 0x57, INSTR_SIY_URD },
+       { "icmh", 0x80, INSTR_RSE_RURD },
+       { "icmh", 0x80, INSTR_RSY_RURD },
+       { "icmy", 0x81, INSTR_RSY_RURD },
+       { "clclu", 0x8f, INSTR_RSY_RRRD },
+       { "stmy", 0x90, INSTR_RSY_RRRD },
+       { "lmh", 0x96, INSTR_RSY_RRRD },
+       { "lmy", 0x98, INSTR_RSY_RRRD },
+       { "lamy", 0x9a, INSTR_RSY_AARD },
+       { "stamy", 0x9b, INSTR_RSY_AARD },
+#endif
+       { "rll", 0x1d, INSTR_RSY_RRRD },
+       { "mvclu", 0x8e, INSTR_RSY_RRRD },
+       { "tp", 0xc0, INSTR_RSL_R0RD },
+       { "", 0, INSTR_INVALID }
+};
+
+static struct insn opcode_ec[] = {
+#ifdef CONFIG_64BIT
+       { "brxhg", 0x44, INSTR_RIE_RRP },
+       { "brxlg", 0x45, INSTR_RIE_RRP },
+#endif
+       { "", 0, INSTR_INVALID }
+};
+
+static struct insn opcode_ed[] = {
+#ifdef CONFIG_64BIT
+       { "mayl", 0x38, INSTR_RXF_FRRDF },
+       { "myl", 0x39, INSTR_RXF_FRRDF },
+       { "may", 0x3a, INSTR_RXF_FRRDF },
+       { "my", 0x3b, INSTR_RXF_FRRDF },
+       { "mayh", 0x3c, INSTR_RXF_FRRDF },
+       { "myh", 0x3d, INSTR_RXF_FRRDF },
+       { "ley", 0x64, INSTR_RXY_FRRD },
+       { "ldy", 0x65, INSTR_RXY_FRRD },
+       { "stey", 0x66, INSTR_RXY_FRRD },
+       { "stdy", 0x67, INSTR_RXY_FRRD },
+#endif
+       { "ldeb", 0x04, INSTR_RXE_FRRD },
+       { "lxdb", 0x05, INSTR_RXE_FRRD },
+       { "lxeb", 0x06, INSTR_RXE_FRRD },
+       { "mxdb", 0x07, INSTR_RXE_FRRD },
+       { "keb", 0x08, INSTR_RXE_FRRD },
+       { "ceb", 0x09, INSTR_RXE_FRRD },
+       { "aeb", 0x0a, INSTR_RXE_FRRD },
+       { "seb", 0x0b, INSTR_RXE_FRRD },
+       { "mdeb", 0x0c, INSTR_RXE_FRRD },
+       { "deb", 0x0d, INSTR_RXE_FRRD },
+       { "maeb", 0x0e, INSTR_RXF_FRRDF },
+       { "mseb", 0x0f, INSTR_RXF_FRRDF },
+       { "tceb", 0x10, INSTR_RXE_FRRD },
+       { "tcdb", 0x11, INSTR_RXE_FRRD },
+       { "tcxb", 0x12, INSTR_RXE_FRRD },
+       { "sqeb", 0x14, INSTR_RXE_FRRD },
+       { "sqdb", 0x15, INSTR_RXE_FRRD },
+       { "meeb", 0x17, INSTR_RXE_FRRD },
+       { "kdb", 0x18, INSTR_RXE_FRRD },
+       { "cdb", 0x19, INSTR_RXE_FRRD },
+       { "adb", 0x1a, INSTR_RXE_FRRD },
+       { "sdb", 0x1b, INSTR_RXE_FRRD },
+       { "mdb", 0x1c, INSTR_RXE_FRRD },
+       { "ddb", 0x1d, INSTR_RXE_FRRD },
+       { "madb", 0x1e, INSTR_RXF_FRRDF },
+       { "msdb", 0x1f, INSTR_RXF_FRRDF },
+       { "lde", 0x24, INSTR_RXE_FRRD },
+       { "lxd", 0x25, INSTR_RXE_FRRD },
+       { "lxe", 0x26, INSTR_RXE_FRRD },
+       { "mae", 0x2e, INSTR_RXF_FRRDF },
+       { "mse", 0x2f, INSTR_RXF_FRRDF },
+       { "sqe", 0x34, INSTR_RXE_FRRD },
+       { "mee", 0x37, INSTR_RXE_FRRD },
+       { "mad", 0x3e, INSTR_RXF_FRRDF },
+       { "msd", 0x3f, INSTR_RXF_FRRDF },
+       { "", 0, INSTR_INVALID }
+};
+
+/* Extracts an operand value from an instruction.  */
+static unsigned int extract_operand(unsigned char *code,
+                                   const struct operand *operand)
+{
+       unsigned int val;
+       int bits;
+
+       /* Extract fragments of the operand byte for byte.  */
+       code += operand->shift / 8;
+       bits = (operand->shift & 7) + operand->bits;
+       val = 0;
+       do {
+               val <<= 8;
+               val |= (unsigned int) *code++;
+               bits -= 8;
+       } while (bits > 0);
+       val >>= -bits;
+       val &= ((1U << (operand->bits - 1)) << 1) - 1;
+
+       /* Check for special long displacement case.  */
+       if (operand->bits == 20 && operand->shift == 20)
+               val = (val & 0xff) << 12 | (val & 0xfff00) >> 8;
+
+       /* Sign extend value if the operand is signed or pc relative.  */
+       if ((operand->flags & (OPERAND_SIGNED | OPERAND_PCREL)) &&
+           (val & (1U << (operand->bits - 1))))
+               val |= (-1U << (operand->bits - 1)) << 1;
+
+       /* Double value if the operand is pc relative.  */
+       if (operand->flags & OPERAND_PCREL)
+               val <<= 1;
+
+       /* Length x in an instructions has real length x + 1.  */
+       if (operand->flags & OPERAND_LENGTH)
+               val++;
+       return val;
+}
+
+static inline int insn_length(unsigned char code)
+{
+       return ((((int) code + 64) >> 7) + 1) << 1;
+}
+
+static struct insn *find_insn(unsigned char *code)
+{
+       unsigned char opfrag = code[1];
+       unsigned char opmask;
+       struct insn *table;
+
+       switch (code[0]) {
+       case 0x01:
+               table = opcode_01;
+               break;
+       case 0xa5:
+               table = opcode_a5;
+               break;
+       case 0xa7:
+               table = opcode_a7;
+               break;
+       case 0xb2:
+               table = opcode_b2;
+               break;
+       case 0xb3:
+               table = opcode_b3;
+               break;
+       case 0xb9:
+               table = opcode_b9;
+               break;
+       case 0xc0:
+               table = opcode_c0;
+               break;
+       case 0xc2:
+               table = opcode_c2;
+               break;
+       case 0xc8:
+               table = opcode_c8;
+               break;
+       case 0xe3:
+               table = opcode_e3;
+               opfrag = code[5];
+               break;
+       case 0xe5:
+               table = opcode_e5;
+               break;
+       case 0xeb:
+               table = opcode_eb;
+               opfrag = code[5];
+               break;
+       case 0xec:
+               table = opcode_ec;
+               opfrag = code[5];
+               break;
+       case 0xed:
+               table = opcode_ed;
+               opfrag = code[5];
+               break;
+       default:
+               table = opcode;
+               opfrag = code[0];
+               break;
+       }
+       while (table->format != INSTR_INVALID) {
+               opmask = formats[table->format][0];
+               if (table->opfrag == (opfrag & opmask))
+                       return table;
+               table++;
+       }
+       return NULL;
+}
+
+static int print_insn(char *buffer, unsigned char *code, unsigned long addr)
+{
+       struct insn *insn;
+       const unsigned char *ops;
+       const struct operand *operand;
+       unsigned int value;
+       char separator;
+       char *ptr;
+
+       ptr = buffer;
+       insn = find_insn(code);
+       if (insn) {
+               ptr += sprintf(ptr, "%.5s\t", insn->name);
+               /* Extract the operands. */
+               separator = 0;
+               for (ops = formats[insn->format] + 1; *ops != 0; ops++) {
+                       operand = operands + *ops;
+                       value = extract_operand(code, operand);
+                       if ((operand->flags & OPERAND_INDEX)  && value == 0)
+                               continue;
+                       if ((operand->flags & OPERAND_BASE) &&
+                           value == 0 && separator == '(') {
+                               separator = ',';
+                               continue;
+                       }
+                       if (separator)
+                               ptr += sprintf(ptr, "%c", separator);
+                       if (operand->flags & OPERAND_GPR)
+                               ptr += sprintf(ptr, "%%r%i", value);
+                       else if (operand->flags & OPERAND_FPR)
+                               ptr += sprintf(ptr, "%%f%i", value);
+                       else if (operand->flags & OPERAND_AR)
+                               ptr += sprintf(ptr, "%%a%i", value);
+                       else if (operand->flags & OPERAND_CR)
+                               ptr += sprintf(ptr, "%%c%i", value);
+                       else if (operand->flags & OPERAND_PCREL)
+                               ptr += sprintf(ptr, "%lx", value + addr);
+                       else if (operand->flags & OPERAND_SIGNED)
+                               ptr += sprintf(ptr, "%i", value);
+                       else
+                               ptr += sprintf(ptr, "%u", value);
+                       if (operand->flags & OPERAND_DISP)
+                               separator = '(';
+                       else if (operand->flags & OPERAND_BASE) {
+                               ptr += sprintf(ptr, ")");
+                               separator = ',';
+                       } else
+                               separator = ',';
+               }
+       } else
+               ptr += sprintf(ptr, "unknown");
+       return (int) (ptr - buffer);
+}
+
+void show_code(struct pt_regs *regs)
+{
+       char *mode = (regs->psw.mask & PSW_MASK_PSTATE) ? "User" : "Krnl";
+       unsigned char code[64];
+       char buffer[64], *ptr;
+       mm_segment_t old_fs;
+       unsigned long addr;
+       int start, end, opsize, hops, i;
+
+       /* Get a snapshot of the 64 bytes surrounding the fault address. */
+       old_fs = get_fs();
+       set_fs((regs->psw.mask & PSW_MASK_PSTATE) ? USER_DS : KERNEL_DS);
+       for (start = 32; start && regs->psw.addr >= 34 - start; start -= 2) {
+               addr = regs->psw.addr - 34 + start;
+               if (__copy_from_user(code + start - 2,
+                                    (char __user *) addr, 2))
+                       break;
+       }
+       for (end = 32; end < 64; end += 2) {
+               addr = regs->psw.addr + end - 32;
+               if (__copy_from_user(code + end,
+                                    (char __user *) addr, 2))
+                       break;
+       }
+       set_fs(old_fs);
+       /* Code snapshot useable ? */
+       if ((regs->psw.addr & 1) || start >= end) {
+               printk("%s Code: Bad PSW.\n", mode);
+               return;
+       }
+       /* Find a starting point for the disassembly. */
+       while (start < 32) {
+               hops = 0;
+               for (i = 0, hops = 0; start + i < 32 && hops < 3; hops++) {
+                       if (!find_insn(code + start + i))
+                               break;
+                       i += insn_length(code[start + i]);
+               }
+               if (start + i == 32)
+                       /* Looks good, sequence ends at PSW. */
+                       break;
+               start += 2;
+       }
+       /* Decode the instructions. */
+       ptr = buffer;
+       ptr += sprintf(ptr, "%s Code:", mode);
+       hops = 0;
+       while (start < end && hops < 8) {
+               *ptr++ = (start == 32) ? '>' : ' ';
+               addr = regs->psw.addr + start - 32;
+               ptr += sprintf(ptr, ONELONG, addr);
+               opsize = insn_length(code[start]);
+               if (start + opsize >= end)
+                       break;
+               for (i = 0; i < opsize; i++)
+                       ptr += sprintf(ptr, "%02x", code[start + i]);
+               *ptr++ = '\t';
+               if (i < 6)
+                       *ptr++ = '\t';
+               ptr += print_insn(ptr, code + start, addr);
+               start += opsize;
+               printk(buffer);
+               ptr = buffer;
+               ptr += sprintf(ptr, "\n          ");
+               hops++;
+       }
+       printk("\n");
+}
index 5e47936..50538e5 100644 (file)
@@ -253,11 +253,10 @@ static noinline __init void find_memory_chunks(unsigned long memsize)
                        break;
 #endif
                /*
-                * Finish memory detection at the first hole, unless
-                * - we reached the hsa -> skip it.
-                * - we know there must be more.
+                * Finish memory detection at the first hole
+                * if storage size is unknown.
                 */
-               if (cc == -1UL && !memsize && old_addr != ADDR2G)
+               if (cc == -1UL && !memsize)
                        break;
                if (memsize && addr >= memsize)
                        break;
index dddc3de..c8a2212 100644 (file)
@@ -249,8 +249,6 @@ sysc_do_restart:
        bnz     BASED(sysc_tracesys)
        basr    %r14,%r8          # call sys_xxxx
        st      %r2,SP_R2(%r15)   # store return value (change R2 on stack)
-                                 # ATTENTION: check sys_execve_glue before
-                                 # changing anything here !!
 
 sysc_return:
        tm      SP_PSW+1(%r15),0x01     # returning to user ?
@@ -381,50 +379,37 @@ ret_from_fork:
        b       BASED(sysc_return)
 
 #
-# clone, fork, vfork, exec and sigreturn need glue,
-# because they all expect pt_regs as parameter,
-# but are called with different parameter.
-# return-address is set up above
+# kernel_execve function needs to deal with pt_regs that is not
+# at the usual place
 #
-sys_clone_glue:
-       la      %r2,SP_PTREGS(%r15)     # load pt_regs
-       l       %r1,BASED(.Lclone)
-       br      %r1                     # branch to sys_clone
-
-sys_fork_glue:
-       la      %r2,SP_PTREGS(%r15)     # load pt_regs
-       l       %r1,BASED(.Lfork)
-       br      %r1                     # branch to sys_fork
-
-sys_vfork_glue:
-       la      %r2,SP_PTREGS(%r15)     # load pt_regs
-       l       %r1,BASED(.Lvfork)
-       br      %r1                     # branch to sys_vfork
-
-sys_execve_glue:
-       la      %r2,SP_PTREGS(%r15)     # load pt_regs
-       l       %r1,BASED(.Lexecve)
-       lr      %r12,%r14               # save return address
-       basr    %r14,%r1                # call sys_execve
-       ltr     %r2,%r2                 # check if execve failed
-       bnz     0(%r12)                 # it did fail -> store result in gpr2
-       b       4(%r12)                 # SKIP ST 2,SP_R2(15) after BASR 14,8
-                                       # in system_call/sysc_tracesys
-
-sys_sigreturn_glue:
-       la      %r2,SP_PTREGS(%r15)     # load pt_regs as parameter
-       l       %r1,BASED(.Lsigreturn)
-       br      %r1                     # branch to sys_sigreturn
-
-sys_rt_sigreturn_glue:
-       la      %r2,SP_PTREGS(%r15)     # load pt_regs as parameter
-       l       %r1,BASED(.Lrt_sigreturn)
-       br      %r1                     # branch to sys_sigreturn
-
-sys_sigaltstack_glue:
-       la      %r4,SP_PTREGS(%r15)     # load pt_regs as parameter
-       l       %r1,BASED(.Lsigaltstack)
-       br      %r1                     # branch to sys_sigreturn
+       .globl  kernel_execve
+kernel_execve:
+       stm     %r12,%r15,48(%r15)
+       lr      %r14,%r15
+       l       %r13,__LC_SVC_NEW_PSW+4
+       s       %r15,BASED(.Lc_spsize)
+       st      %r14,__SF_BACKCHAIN(%r15)
+       la      %r12,SP_PTREGS(%r15)
+       xc      0(__PT_SIZE,%r12),0(%r12)
+       l       %r1,BASED(.Ldo_execve)
+       lr      %r5,%r12
+       basr    %r14,%r1
+       ltr     %r2,%r2
+       be      BASED(0f)
+       a       %r15,BASED(.Lc_spsize)
+       lm      %r12,%r15,48(%r15)
+       br      %r14
+       # execve succeeded.
+0:     stnsm   __SF_EMPTY(%r15),0xfc   # disable interrupts
+       l       %r15,__LC_KERNEL_STACK  # load ksp
+       s       %r15,BASED(.Lc_spsize)  # make room for registers & psw
+       l       %r9,__LC_THREAD_INFO
+       mvc     SP_PTREGS(__PT_SIZE,%r15),0(%r12)       # copy pt_regs
+       xc      __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
+       stosm   __SF_EMPTY(%r15),0x03   # reenable interrupts
+       l       %r1,BASED(.Lexecve_tail)
+       basr    %r14,%r1
+       b       BASED(sysc_return)
 
 /*
  * Program check handler routine
@@ -1031,19 +1016,11 @@ cleanup_io_leave_insn:
 .Ldo_extint:   .long   do_extint
 .Ldo_signal:   .long   do_signal
 .Lhandle_per:  .long   do_single_step
+.Ldo_execve:   .long   do_execve
+.Lexecve_tail: .long   execve_tail
 .Ljump_table:  .long   pgm_check_table
 .Lschedule:    .long   schedule
-.Lclone:       .long   sys_clone
-.Lexecve:      .long   sys_execve
-.Lfork:        .long   sys_fork
-.Lrt_sigreturn: .long  sys_rt_sigreturn
-.Lrt_sigsuspend:
-               .long   sys_rt_sigsuspend
-.Lsigreturn:   .long   sys_sigreturn
-.Lsigsuspend:  .long   sys_sigsuspend
-.Lsigaltstack: .long   sys_sigaltstack
 .Ltrace:       .long   syscall_trace
-.Lvfork:       .long   sys_vfork
 .Lschedtail:   .long   schedule_tail
 .Lsysc_table:  .long   sys_call_table
 #ifdef CONFIG_TRACE_IRQFLAGS
index 0f758c3..93745fd 100644 (file)
@@ -244,8 +244,6 @@ sysc_noemu:
        jnz     sysc_tracesys
        basr    %r14,%r8        # call sys_xxxx
        stg     %r2,SP_R2(%r15) # store return value (change R2 on stack)
-                               # ATTENTION: check sys_execve_glue before
-                               # changing anything here !!
 
 sysc_return:
        tm      SP_PSW+1(%r15),0x01     # returning to user ?
@@ -371,77 +369,35 @@ ret_from_fork:
        j       sysc_return
 
 #
-# clone, fork, vfork, exec and sigreturn need glue,
-# because they all expect pt_regs as parameter,
-# but are called with different parameter.
-# return-address is set up above
+# kernel_execve function needs to deal with pt_regs that is not
+# at the usual place
 #
-sys_clone_glue:
-       la      %r2,SP_PTREGS(%r15)     # load pt_regs
-       jg      sys_clone               # branch to sys_clone
-
-#ifdef CONFIG_COMPAT
-sys32_clone_glue:
-       la      %r2,SP_PTREGS(%r15)     # load pt_regs
-       jg      sys32_clone             # branch to sys32_clone
-#endif
-
-sys_fork_glue:
-       la      %r2,SP_PTREGS(%r15)     # load pt_regs
-       jg      sys_fork                # branch to sys_fork
-
-sys_vfork_glue:
-       la      %r2,SP_PTREGS(%r15)     # load pt_regs
-       jg      sys_vfork               # branch to sys_vfork
-
-sys_execve_glue:
-       la      %r2,SP_PTREGS(%r15)     # load pt_regs
-       lgr     %r12,%r14               # save return address
-       brasl   %r14,sys_execve         # call sys_execve
-       ltgr    %r2,%r2                 # check if execve failed
-       bnz     0(%r12)                 # it did fail -> store result in gpr2
-       b       6(%r12)                 # SKIP STG 2,SP_R2(15) in
-                                       # system_call/sysc_tracesys
-#ifdef CONFIG_COMPAT
-sys32_execve_glue:
-       la      %r2,SP_PTREGS(%r15)     # load pt_regs
-       lgr     %r12,%r14               # save return address
-       brasl   %r14,sys32_execve       # call sys32_execve
-       ltgr    %r2,%r2                 # check if execve failed
-       bnz     0(%r12)                 # it did fail -> store result in gpr2
-       b       6(%r12)                 # SKIP STG 2,SP_R2(15) in
-                                       # system_call/sysc_tracesys
-#endif
-
-sys_sigreturn_glue:
-       la      %r2,SP_PTREGS(%r15)     # load pt_regs as parameter
-       jg      sys_sigreturn           # branch to sys_sigreturn
-
-#ifdef CONFIG_COMPAT
-sys32_sigreturn_glue:
-       la      %r2,SP_PTREGS(%r15)     # load pt_regs as parameter
-       jg      sys32_sigreturn         # branch to sys32_sigreturn
-#endif
-
-sys_rt_sigreturn_glue:
-       la      %r2,SP_PTREGS(%r15)     # load pt_regs as parameter
-       jg      sys_rt_sigreturn        # branch to sys_sigreturn
-
-#ifdef CONFIG_COMPAT
-sys32_rt_sigreturn_glue:
-       la      %r2,SP_PTREGS(%r15)     # load pt_regs as parameter
-       jg      sys32_rt_sigreturn      # branch to sys32_sigreturn
-#endif
-
-sys_sigaltstack_glue:
-       la      %r4,SP_PTREGS(%r15)     # load pt_regs as parameter
-       jg      sys_sigaltstack         # branch to sys_sigreturn
-
-#ifdef CONFIG_COMPAT
-sys32_sigaltstack_glue:
-       la      %r4,SP_PTREGS(%r15)     # load pt_regs as parameter
-       jg      sys32_sigaltstack_wrapper # branch to sys_sigreturn
-#endif
+       .globl  kernel_execve
+kernel_execve:
+       stmg    %r12,%r15,96(%r15)
+       lgr     %r14,%r15
+       aghi    %r15,-SP_SIZE
+       stg     %r14,__SF_BACKCHAIN(%r15)
+       la      %r12,SP_PTREGS(%r15)
+       xc      0(__PT_SIZE,%r12),0(%r12)
+       lgr     %r5,%r12
+       brasl   %r14,do_execve
+       ltgfr   %r2,%r2
+       je      0f
+       aghi    %r15,SP_SIZE
+       lmg     %r12,%r15,96(%r15)
+       br      %r14
+       # execve succeeded.
+0:     stnsm   __SF_EMPTY(%r15),0xfc   # disable interrupts
+       lg      %r15,__LC_KERNEL_STACK  # load ksp
+       aghi    %r15,-SP_SIZE           # make room for registers & psw
+       lg      %r13,__LC_SVC_NEW_PSW+8
+       lg      %r9,__LC_THREAD_INFO
+       mvc     SP_PTREGS(__PT_SIZE,%r15),0(%r12)       # copy pt_regs
+       xc      __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
+       stosm   __SF_EMPTY(%r15),0x03   # reenable interrupts
+       brasl   %r14,execve_tail
+       j       sysc_return
 
 /*
  * Program check handler routine
index 3701070..a87b197 100644 (file)
@@ -39,7 +39,69 @@ startup_continue:
        basr    %r13,0                  # get base
 .LPG1: sll     %r13,1                  # remove high order bit
        srl     %r13,1
-       lhi     %r1,1                   # mode 1 = esame
+
+#ifdef CONFIG_ZFCPDUMP
+
+       # check if we have been ipled using zfcp dump:
+
+       tm      0xb9,0x01               # test if subchannel is enabled
+       jno     .nodump                 # subchannel disabled
+       l       %r1,0xb8
+       la      %r5,.Lipl_schib-.LPG1(%r13)
+       stsch   0(%r5)                  # get schib of subchannel
+       jne     .nodump                 # schib not available
+       tm      5(%r5),0x01             # devno valid?
+       jno     .nodump
+       tm      4(%r5),0x80             # qdio capable device?
+       jno     .nodump
+       l       %r2,20(%r0)             # address of ipl parameter block
+       lhi     %r3,0
+       ic      %r3,0x148(%r2)          # get opt field
+       chi     %r3,0x20                # load with dump?
+       jne     .nodump
+
+       # store all prefix registers in case of load with dump:
+
+       la      %r7,0                   # base register for 0 page
+       la      %r8,0                   # first cpu
+       l       %r11,.Lpref_arr_ptr-.LPG1(%r13) # address of prefix array
+       ahi     %r11,4                  # skip boot cpu
+       lr      %r12,%r11
+       ahi     %r12,(CONFIG_NR_CPUS*4) # end of prefix array
+       stap    .Lcurrent_cpu+2-.LPG1(%r13)     # store current cpu addr
+1:
+       cl      %r8,.Lcurrent_cpu-.LPG1(%r13)   # is ipl cpu ?
+       je      4f                              # if yes get next cpu
+2:
+       lr      %r9,%r7
+       sigp    %r9,%r8,0x9             # stop & store status of cpu
+       brc     8,3f                    # accepted
+       brc     4,4f                    # status stored: next cpu
+       brc     2,2b                    # busy:          try again
+       brc     1,4f                    # not op:        next cpu
+3:
+       mvc     0(4,%r11),264(%r7)      # copy prefix register to prefix array
+       ahi     %r11,4                  # next element in prefix array
+       clr     %r11,%r12
+       je      5f                      # no more space in prefix array
+4:
+       ahi     %r8,1                           # next cpu (r8 += 1)
+       cl      %r8,.Llast_cpu-.LPG1(%r13)      # is last possible cpu ?
+       jl      1b                              # jump if not last cpu
+5:
+       lhi     %r1,2                   # mode 2 = esame (dump)
+       j       6f
+       .align 4
+.Lipl_schib:
+       .rept 13
+       .long 0
+       .endr
+.nodump:
+       lhi     %r1,1                   # mode 1 = esame (normal ipl)
+6:
+#else
+       lhi     %r1,1                   # mode 1 = esame (normal ipl)
+#endif /* CONFIG_ZFCPDUMP */
        mvi     __LC_AR_MODE_ID,1       # set esame flag
        slr     %r0,%r0                 # set cpuid to zero
        sigp    %r1,%r0,0x12            # switch to esame mode
@@ -149,6 +211,14 @@ startup_continue:
 .L4malign:.quad 0xffffffffffc00000
 .Lscan2g:.quad 0x80000000 + 0x20000 - 8        # 2GB + 128K - 8
 .Lnop: .long   0x07000700
+#ifdef CONFIG_ZFCPDUMP
+.Lcurrent_cpu:
+       .long 0x0
+.Llast_cpu:
+       .long 0x0000ffff
+.Lpref_arr_ptr:
+       .long zfcpdump_prefix_array
+#endif /* CONFIG_ZFCPDUMP */
 .Lparmaddr:
        .quad   PARMAREA
        .align  64
index f731185..06833ac 100644 (file)
 #define SCCB_LOADPARM (&s390_readinfo_sccb.loadparm)
 #define SCCB_FLAG (s390_readinfo_sccb.flags)
 
-enum ipl_type {
-       IPL_TYPE_NONE    = 1,
-       IPL_TYPE_UNKNOWN = 2,
-       IPL_TYPE_CCW     = 4,
-       IPL_TYPE_FCP     = 8,
-       IPL_TYPE_NSS     = 16,
-};
-
-#define IPL_NONE_STR    "none"
-#define IPL_UNKNOWN_STR  "unknown"
-#define IPL_CCW_STR     "ccw"
-#define IPL_FCP_STR     "fcp"
-#define IPL_NSS_STR     "nss"
-
-/*
- * Must be in data section since the bss section
- * is not cleared when these are accessed.
- */
-u16 ipl_devno __attribute__((__section__(".data"))) = 0;
-u32 ipl_flags __attribute__((__section__(".data"))) = 0;
+#define IPL_UNKNOWN_STR                "unknown"
+#define IPL_CCW_STR            "ccw"
+#define IPL_FCP_STR            "fcp"
+#define IPL_FCP_DUMP_STR       "fcp_dump"
+#define IPL_NSS_STR            "nss"
 
 static char *ipl_type_str(enum ipl_type type)
 {
        switch (type) {
-       case IPL_TYPE_NONE:
-               return IPL_NONE_STR;
        case IPL_TYPE_CCW:
                return IPL_CCW_STR;
        case IPL_TYPE_FCP:
                return IPL_FCP_STR;
+       case IPL_TYPE_FCP_DUMP:
+               return IPL_FCP_DUMP_STR;
        case IPL_TYPE_NSS:
                return IPL_NSS_STR;
        case IPL_TYPE_UNKNOWN:
@@ -67,15 +52,55 @@ static char *ipl_type_str(enum ipl_type type)
        }
 }
 
+enum dump_type {
+       DUMP_TYPE_NONE  = 1,
+       DUMP_TYPE_CCW   = 2,
+       DUMP_TYPE_FCP   = 4,
+};
+
+#define DUMP_NONE_STR   "none"
+#define DUMP_CCW_STR    "ccw"
+#define DUMP_FCP_STR    "fcp"
+
+static char *dump_type_str(enum dump_type type)
+{
+       switch (type) {
+       case DUMP_TYPE_NONE:
+               return DUMP_NONE_STR;
+       case DUMP_TYPE_CCW:
+               return DUMP_CCW_STR;
+       case DUMP_TYPE_FCP:
+               return DUMP_FCP_STR;
+       default:
+               return NULL;
+       }
+}
+
+/*
+ * Must be in data section since the bss section
+ * is not cleared when these are accessed.
+ */
+static u16 ipl_devno __attribute__((__section__(".data"))) = 0;
+u32 ipl_flags __attribute__((__section__(".data"))) = 0;
+
 enum ipl_method {
-       IPL_METHOD_NONE,
-       IPL_METHOD_CCW_CIO,
-       IPL_METHOD_CCW_DIAG,
-       IPL_METHOD_CCW_VM,
-       IPL_METHOD_FCP_RO_DIAG,
-       IPL_METHOD_FCP_RW_DIAG,
-       IPL_METHOD_FCP_RO_VM,
-       IPL_METHOD_NSS,
+       REIPL_METHOD_CCW_CIO,
+       REIPL_METHOD_CCW_DIAG,
+       REIPL_METHOD_CCW_VM,
+       REIPL_METHOD_FCP_RO_DIAG,
+       REIPL_METHOD_FCP_RW_DIAG,
+       REIPL_METHOD_FCP_RO_VM,
+       REIPL_METHOD_FCP_DUMP,
+       REIPL_METHOD_NSS,
+       REIPL_METHOD_DEFAULT,
+};
+
+enum dump_method {
+       DUMP_METHOD_NONE,
+       DUMP_METHOD_CCW_CIO,
+       DUMP_METHOD_CCW_DIAG,
+       DUMP_METHOD_CCW_VM,
+       DUMP_METHOD_FCP_DIAG,
 };
 
 enum shutdown_action {
@@ -107,15 +132,15 @@ static int diag308_set_works = 0;
 static int reipl_capabilities = IPL_TYPE_UNKNOWN;
 
 static enum ipl_type reipl_type = IPL_TYPE_UNKNOWN;
-static enum ipl_method reipl_method = IPL_METHOD_NONE;
+static enum ipl_method reipl_method = REIPL_METHOD_DEFAULT;
 static struct ipl_parameter_block *reipl_block_fcp;
 static struct ipl_parameter_block *reipl_block_ccw;
 
 static char reipl_nss_name[NSS_NAME_SIZE + 1];
 
-static int dump_capabilities = IPL_TYPE_NONE;
-static enum ipl_type dump_type = IPL_TYPE_NONE;
-static enum ipl_method dump_method = IPL_METHOD_NONE;
+static int dump_capabilities = DUMP_TYPE_NONE;
+static enum dump_type dump_type = DUMP_TYPE_NONE;
+static enum dump_method dump_method = DUMP_METHOD_NONE;
 static struct ipl_parameter_block *dump_block_fcp;
 static struct ipl_parameter_block *dump_block_ccw;
 
@@ -134,6 +159,7 @@ int diag308(unsigned long subcode, void *addr)
                : "d" (subcode) : "cc", "memory");
        return _rc;
 }
+EXPORT_SYMBOL_GPL(diag308);
 
 /* SYSFS */
 
@@ -197,7 +223,7 @@ static void make_attrs_ro(struct attribute **attrs)
  * ipl section
  */
 
-static enum ipl_type ipl_get_type(void)
+static __init enum ipl_type get_ipl_type(void)
 {
        struct ipl_parameter_block *ipl = IPL_PARMBLOCK_START;
 
@@ -211,12 +237,44 @@ static enum ipl_type ipl_get_type(void)
                return IPL_TYPE_UNKNOWN;
        if (ipl->hdr.pbt != DIAG308_IPL_TYPE_FCP)
                return IPL_TYPE_UNKNOWN;
+       if (ipl->ipl_info.fcp.opt == DIAG308_IPL_OPT_DUMP)
+               return IPL_TYPE_FCP_DUMP;
        return IPL_TYPE_FCP;
 }
 
+void __init setup_ipl_info(void)
+{
+       ipl_info.type = get_ipl_type();
+       switch (ipl_info.type) {
+       case IPL_TYPE_CCW:
+               ipl_info.data.ccw.dev_id.devno = ipl_devno;
+               ipl_info.data.ccw.dev_id.ssid = 0;
+               break;
+       case IPL_TYPE_FCP:
+       case IPL_TYPE_FCP_DUMP:
+               ipl_info.data.fcp.dev_id.devno =
+                       IPL_PARMBLOCK_START->ipl_info.fcp.devno;
+               ipl_info.data.fcp.dev_id.ssid = 0;
+               ipl_info.data.fcp.wwpn = IPL_PARMBLOCK_START->ipl_info.fcp.wwpn;
+               ipl_info.data.fcp.lun = IPL_PARMBLOCK_START->ipl_info.fcp.lun;
+               break;
+       case IPL_TYPE_NSS:
+               strncpy(ipl_info.data.nss.name, kernel_nss_name,
+                       sizeof(ipl_info.data.nss.name));
+               break;
+       case IPL_TYPE_UNKNOWN:
+       default:
+               /* We have no info to copy */
+               break;
+       }
+}
+
+struct ipl_info ipl_info;
+EXPORT_SYMBOL_GPL(ipl_info);
+
 static ssize_t ipl_type_show(struct subsystem *subsys, char *page)
 {
-       return sprintf(page, "%s\n", ipl_type_str(ipl_get_type()));
+       return sprintf(page, "%s\n", ipl_type_str(ipl_info.type));
 }
 
 static struct subsys_attribute sys_ipl_type_attr = __ATTR_RO(ipl_type);
@@ -225,10 +283,11 @@ static ssize_t sys_ipl_device_show(struct subsystem *subsys, char *page)
 {
        struct ipl_parameter_block *ipl = IPL_PARMBLOCK_START;
 
-       switch (ipl_get_type()) {
+       switch (ipl_info.type) {
        case IPL_TYPE_CCW:
                return sprintf(page, "0.0.%04x\n", ipl_devno);
        case IPL_TYPE_FCP:
+       case IPL_TYPE_FCP_DUMP:
                return sprintf(page, "0.0.%04x\n", ipl->ipl_info.fcp.devno);
        default:
                return 0;
@@ -485,23 +544,29 @@ static int reipl_set_type(enum ipl_type type)
        switch(type) {
        case IPL_TYPE_CCW:
                if (MACHINE_IS_VM)
-                       reipl_method = IPL_METHOD_CCW_VM;
+                       reipl_method = REIPL_METHOD_CCW_VM;
                else
-                       reipl_method = IPL_METHOD_CCW_CIO;
+                       reipl_method = REIPL_METHOD_CCW_CIO;
                break;
        case IPL_TYPE_FCP:
                if (diag308_set_works)
-                       reipl_method = IPL_METHOD_FCP_RW_DIAG;
+                       reipl_method = REIPL_METHOD_FCP_RW_DIAG;
                else if (MACHINE_IS_VM)
-                       reipl_method = IPL_METHOD_FCP_RO_VM;
+                       reipl_method = REIPL_METHOD_FCP_RO_VM;
                else
-                       reipl_method = IPL_METHOD_FCP_RO_DIAG;
+                       reipl_method = REIPL_METHOD_FCP_RO_DIAG;
+               break;
+       case IPL_TYPE_FCP_DUMP:
+               reipl_method = REIPL_METHOD_FCP_DUMP;
                break;
        case IPL_TYPE_NSS:
-               reipl_method = IPL_METHOD_NSS;
+               reipl_method = REIPL_METHOD_NSS;
+               break;
+       case IPL_TYPE_UNKNOWN:
+               reipl_method = REIPL_METHOD_DEFAULT;
                break;
        default:
-               reipl_method = IPL_METHOD_NONE;
+               BUG();
        }
        reipl_type = type;
        return 0;
@@ -579,22 +644,22 @@ static struct attribute_group dump_ccw_attr_group = {
 
 /* dump type */
 
-static int dump_set_type(enum ipl_type type)
+static int dump_set_type(enum dump_type type)
 {
        if (!(dump_capabilities & type))
                return -EINVAL;
        switch(type) {
-       case IPL_TYPE_CCW:
+       case DUMP_TYPE_CCW:
                if (MACHINE_IS_VM)
-                       dump_method = IPL_METHOD_CCW_VM;
+                       dump_method = DUMP_METHOD_CCW_VM;
                else
-                       dump_method = IPL_METHOD_CCW_CIO;
+                       dump_method = DUMP_METHOD_CCW_CIO;
                break;
-       case IPL_TYPE_FCP:
-               dump_method = IPL_METHOD_FCP_RW_DIAG;
+       case DUMP_TYPE_FCP:
+               dump_method = DUMP_METHOD_FCP_DIAG;
                break;
        default:
-               dump_method = IPL_METHOD_NONE;
+               dump_method = DUMP_METHOD_NONE;
        }
        dump_type = type;
        return 0;
@@ -602,7 +667,7 @@ static int dump_set_type(enum ipl_type type)
 
 static ssize_t dump_type_show(struct subsystem *subsys, char *page)
 {
-       return sprintf(page, "%s\n", ipl_type_str(dump_type));
+       return sprintf(page, "%s\n", dump_type_str(dump_type));
 }
 
 static ssize_t dump_type_store(struct subsystem *subsys, const char *buf,
@@ -610,12 +675,12 @@ static ssize_t dump_type_store(struct subsystem *subsys, const char *buf,
 {
        int rc = -EINVAL;
 
-       if (strncmp(buf, IPL_NONE_STR, strlen(IPL_NONE_STR)) == 0)
-               rc = dump_set_type(IPL_TYPE_NONE);
-       else if (strncmp(buf, IPL_CCW_STR, strlen(IPL_CCW_STR)) == 0)
-               rc = dump_set_type(IPL_TYPE_CCW);
-       else if (strncmp(buf, IPL_FCP_STR, strlen(IPL_FCP_STR)) == 0)
-               rc = dump_set_type(IPL_TYPE_FCP);
+       if (strncmp(buf, DUMP_NONE_STR, strlen(DUMP_NONE_STR)) == 0)
+               rc = dump_set_type(DUMP_TYPE_NONE);
+       else if (strncmp(buf, DUMP_CCW_STR, strlen(DUMP_CCW_STR)) == 0)
+               rc = dump_set_type(DUMP_TYPE_CCW);
+       else if (strncmp(buf, DUMP_FCP_STR, strlen(DUMP_FCP_STR)) == 0)
+               rc = dump_set_type(DUMP_TYPE_FCP);
        return (rc != 0) ? rc : len;
 }
 
@@ -664,14 +729,14 @@ void do_reipl(void)
        char loadparm[LOADPARM_LEN + 1];
 
        switch (reipl_method) {
-       case IPL_METHOD_CCW_CIO:
+       case REIPL_METHOD_CCW_CIO:
                devid.devno = reipl_block_ccw->ipl_info.ccw.devno;
-               if (ipl_get_type() == IPL_TYPE_CCW && devid.devno == ipl_devno)
+               if (ipl_info.type == IPL_TYPE_CCW && devid.devno == ipl_devno)
                        diag308(DIAG308_IPL, NULL);
                devid.ssid  = 0;
                reipl_ccw_dev(&devid);
                break;
-       case IPL_METHOD_CCW_VM:
+       case REIPL_METHOD_CCW_VM:
                reipl_get_ascii_loadparm(loadparm);
                if (strlen(loadparm) == 0)
                        sprintf(buf, "IPL %X",
@@ -681,30 +746,32 @@ void do_reipl(void)
                                reipl_block_ccw->ipl_info.ccw.devno, loadparm);
                __cpcmd(buf, NULL, 0, NULL);
                break;
-       case IPL_METHOD_CCW_DIAG:
+       case REIPL_METHOD_CCW_DIAG:
                diag308(DIAG308_SET, reipl_block_ccw);
                diag308(DIAG308_IPL, NULL);
                break;
-       case IPL_METHOD_FCP_RW_DIAG:
+       case REIPL_METHOD_FCP_RW_DIAG:
                diag308(DIAG308_SET, reipl_block_fcp);
                diag308(DIAG308_IPL, NULL);
                break;
-       case IPL_METHOD_FCP_RO_DIAG:
+       case REIPL_METHOD_FCP_RO_DIAG:
                diag308(DIAG308_IPL, NULL);
                break;
-       case IPL_METHOD_FCP_RO_VM:
+       case REIPL_METHOD_FCP_RO_VM:
                __cpcmd("IPL", NULL, 0, NULL);
                break;
-       case IPL_METHOD_NSS:
+       case REIPL_METHOD_NSS:
                sprintf(buf, "IPL %s", reipl_nss_name);
                __cpcmd(buf, NULL, 0, NULL);
                break;
-       case IPL_METHOD_NONE:
-       default:
+       case REIPL_METHOD_DEFAULT:
                if (MACHINE_IS_VM)
                        __cpcmd("IPL", NULL, 0, NULL);
                diag308(DIAG308_IPL, NULL);
                break;
+       case REIPL_METHOD_FCP_DUMP:
+       default:
+               break;
        }
        signal_processor(smp_processor_id(), sigp_stop_and_store_status);
 }
@@ -715,28 +782,28 @@ static void do_dump(void)
        static char buf[100];
 
        switch (dump_method) {
-       case IPL_METHOD_CCW_CIO:
+       case DUMP_METHOD_CCW_CIO:
                smp_send_stop();
                devid.devno = dump_block_ccw->ipl_info.ccw.devno;
                devid.ssid  = 0;
                reipl_ccw_dev(&devid);
                break;
-       case IPL_METHOD_CCW_VM:
+       case DUMP_METHOD_CCW_VM:
                smp_send_stop();
                sprintf(buf, "STORE STATUS");
                __cpcmd(buf, NULL, 0, NULL);
                sprintf(buf, "IPL %X", dump_block_ccw->ipl_info.ccw.devno);
                __cpcmd(buf, NULL, 0, NULL);
                break;
-       case IPL_METHOD_CCW_DIAG:
+       case DUMP_METHOD_CCW_DIAG:
                diag308(DIAG308_SET, dump_block_ccw);
                diag308(DIAG308_DUMP, NULL);
                break;
-       case IPL_METHOD_FCP_RW_DIAG:
+       case DUMP_METHOD_FCP_DIAG:
                diag308(DIAG308_SET, dump_block_fcp);
                diag308(DIAG308_DUMP, NULL);
                break;
-       case IPL_METHOD_NONE:
+       case DUMP_METHOD_NONE:
        default:
                return;
        }
@@ -777,12 +844,13 @@ static int __init ipl_init(void)
        rc = firmware_register(&ipl_subsys);
        if (rc)
                return rc;
-       switch (ipl_get_type()) {
+       switch (ipl_info.type) {
        case IPL_TYPE_CCW:
                rc = sysfs_create_group(&ipl_subsys.kset.kobj,
                                        &ipl_ccw_attr_group);
                break;
        case IPL_TYPE_FCP:
+       case IPL_TYPE_FCP_DUMP:
                rc = ipl_register_fcp_files();
                break;
        case IPL_TYPE_NSS:
@@ -852,7 +920,7 @@ static int __init reipl_ccw_init(void)
        /* FIXME: check for diag308_set_works when enabling diag ccw reipl */
        if (!MACHINE_IS_VM)
                sys_reipl_ccw_loadparm_attr.attr.mode = S_IRUGO;
-       if (ipl_get_type() == IPL_TYPE_CCW)
+       if (ipl_info.type == IPL_TYPE_CCW)
                reipl_block_ccw->ipl_info.ccw.devno = ipl_devno;
        reipl_capabilities |= IPL_TYPE_CCW;
        return 0;
@@ -862,9 +930,9 @@ static int __init reipl_fcp_init(void)
 {
        int rc;
 
-       if ((!diag308_set_works) && (ipl_get_type() != IPL_TYPE_FCP))
+       if ((!diag308_set_works) && (ipl_info.type != IPL_TYPE_FCP))
                return 0;
-       if ((!diag308_set_works) && (ipl_get_type() == IPL_TYPE_FCP))
+       if ((!diag308_set_works) && (ipl_info.type == IPL_TYPE_FCP))
                make_attrs_ro(reipl_fcp_attrs);
 
        reipl_block_fcp = (void *) get_zeroed_page(GFP_KERNEL);
@@ -875,7 +943,7 @@ static int __init reipl_fcp_init(void)
                free_page((unsigned long)reipl_block_fcp);
                return rc;
        }
-       if (ipl_get_type() == IPL_TYPE_FCP) {
+       if (ipl_info.type == IPL_TYPE_FCP) {
                memcpy(reipl_block_fcp, IPL_PARMBLOCK_START, PAGE_SIZE);
        } else {
                reipl_block_fcp->hdr.len = IPL_PARM_BLK_FCP_LEN;
@@ -909,7 +977,7 @@ static int __init reipl_init(void)
        rc = reipl_nss_init();
        if (rc)
                return rc;
-       rc = reipl_set_type(ipl_get_type());
+       rc = reipl_set_type(ipl_info.type);
        if (rc)
                return rc;
        return 0;
@@ -931,7 +999,7 @@ static int __init dump_ccw_init(void)
        dump_block_ccw->hdr.version = IPL_PARM_BLOCK_VERSION;
        dump_block_ccw->hdr.blk0_len = IPL_PARM_BLK0_CCW_LEN;
        dump_block_ccw->hdr.pbt = DIAG308_IPL_TYPE_CCW;
-       dump_capabilities |= IPL_TYPE_CCW;
+       dump_capabilities |= DUMP_TYPE_CCW;
        return 0;
 }
 
@@ -956,7 +1024,7 @@ static int __init dump_fcp_init(void)
        dump_block_fcp->hdr.blk0_len = IPL_PARM_BLK0_FCP_LEN;
        dump_block_fcp->hdr.pbt = DIAG308_IPL_TYPE_FCP;
        dump_block_fcp->ipl_info.fcp.opt = DIAG308_IPL_OPT_DUMP;
-       dump_capabilities |= IPL_TYPE_FCP;
+       dump_capabilities |= DUMP_TYPE_FCP;
        return 0;
 }
 
@@ -995,7 +1063,7 @@ static int __init dump_init(void)
        rc = dump_fcp_init();
        if (rc)
                return rc;
-       dump_set_type(IPL_TYPE_NONE);
+       dump_set_type(DUMP_TYPE_NONE);
        return 0;
 }
 
@@ -1038,6 +1106,27 @@ static int __init s390_ipl_init(void)
 
 __initcall(s390_ipl_init);
 
+void __init ipl_save_parameters(void)
+{
+       struct cio_iplinfo iplinfo;
+       unsigned int *ipl_ptr;
+       void *src, *dst;
+
+       if (cio_get_iplinfo(&iplinfo))
+               return;
+
+       ipl_devno = iplinfo.devno;
+       ipl_flags |= IPL_DEVNO_VALID;
+       if (!iplinfo.is_qdio)
+               return;
+       ipl_flags |= IPL_PARMBLOCK_VALID;
+       ipl_ptr = (unsigned int *)__LC_IPL_PARMBLOCK_PTR;
+       src = (void *)(unsigned long)*ipl_ptr;
+       dst = (void *)IPL_PARMBLOCK_ORIGIN;
+       memmove(dst, src, PAGE_SIZE);
+       *ipl_ptr = IPL_PARMBLOCK_ORIGIN;
+}
+
 static LIST_HEAD(rcall);
 static DEFINE_MUTEX(rcall_mutex);
 
index 39d1dd7..59b4e79 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/string.h>
 #include <linux/kernel.h>
 #include <linux/moduleloader.h>
+#include <linux/bug.h>
 
 #if 0
 #define DEBUGP printk
@@ -398,9 +399,10 @@ int module_finalize(const Elf_Ehdr *hdr,
                    struct module *me)
 {
        vfree(me->arch.syminfo);
-       return 0;
+       return module_bug_finalize(hdr, sechdrs, me);
 }
 
 void module_arch_cleanup(struct module *mod)
 {
+       module_bug_cleanup(mod);
 }
index 5acfac6..11d9b01 100644 (file)
@@ -280,24 +280,26 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long new_stackp,
         return 0;
 }
 
-asmlinkage long sys_fork(struct pt_regs regs)
+asmlinkage long sys_fork(void)
 {
-       return do_fork(SIGCHLD, regs.gprs[15], &regs, 0, NULL, NULL);
+       struct pt_regs *regs = task_pt_regs(current);
+       return do_fork(SIGCHLD, regs->gprs[15], regs, 0, NULL, NULL);
 }
 
-asmlinkage long sys_clone(struct pt_regs regs)
+asmlinkage long sys_clone(void)
 {
-        unsigned long clone_flags;
-        unsigned long newsp;
+       struct pt_regs *regs = task_pt_regs(current);
+       unsigned long clone_flags;
+       unsigned long newsp;
        int __user *parent_tidptr, *child_tidptr;
 
-        clone_flags = regs.gprs[3];
-        newsp = regs.orig_gpr2;
-       parent_tidptr = (int __user *) regs.gprs[4];
-       child_tidptr = (int __user *) regs.gprs[5];
-        if (!newsp)
-                newsp = regs.gprs[15];
-        return do_fork(clone_flags, newsp, &regs, 0,
+       clone_flags = regs->gprs[3];
+       newsp = regs->orig_gpr2;
+       parent_tidptr = (int __user *) regs->gprs[4];
+       child_tidptr = (int __user *) regs->gprs[5];
+       if (!newsp)
+               newsp = regs->gprs[15];
+       return do_fork(clone_flags, newsp, regs, 0,
                       parent_tidptr, child_tidptr);
 }
 
@@ -311,40 +313,52 @@ asmlinkage long sys_clone(struct pt_regs regs)
  * do not have enough call-clobbered registers to hold all
  * the information you need.
  */
-asmlinkage long sys_vfork(struct pt_regs regs)
+asmlinkage long sys_vfork(void)
 {
+       struct pt_regs *regs = task_pt_regs(current);
        return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD,
-                      regs.gprs[15], &regs, 0, NULL, NULL);
+                      regs->gprs[15], regs, 0, NULL, NULL);
+}
+
+asmlinkage void execve_tail(void)
+{
+       task_lock(current);
+       current->ptrace &= ~PT_DTRACE;
+       task_unlock(current);
+       current->thread.fp_regs.fpc = 0;
+       if (MACHINE_HAS_IEEE)
+               asm volatile("sfpc %0,%0" : : "d" (0));
 }
 
 /*
  * sys_execve() executes a new program.
  */
-asmlinkage long sys_execve(struct pt_regs regs)
+asmlinkage long sys_execve(void)
 {
-        int error;
-        char * filename;
-
-        filename = getname((char __user *) regs.orig_gpr2);
-        error = PTR_ERR(filename);
-        if (IS_ERR(filename))
-                goto out;
-        error = do_execve(filename, (char __user * __user *) regs.gprs[3],
-                         (char __user * __user *) regs.gprs[4], &regs);
-       if (error == 0) {
-               task_lock(current);
-               current->ptrace &= ~PT_DTRACE;
-               task_unlock(current);
-               current->thread.fp_regs.fpc = 0;
-               if (MACHINE_HAS_IEEE)
-                       asm volatile("sfpc %0,%0" : : "d" (0));
+       struct pt_regs *regs = task_pt_regs(current);
+       char *filename;
+       unsigned long result;
+       int rc;
+
+       filename = getname((char __user *) regs->orig_gpr2);
+       if (IS_ERR(filename)) {
+               result = PTR_ERR(filename);
+               goto out;
        }
-        putname(filename);
+       rc = do_execve(filename, (char __user * __user *) regs->gprs[3],
+                      (char __user * __user *) regs->gprs[4], regs);
+       if (rc) {
+               result = rc;
+               goto out_putname;
+       }
+       execve_tail();
+       result = regs->gprs[2];
+out_putname:
+       putname(filename);
 out:
-        return error;
+       return result;
 }
 
-
 /*
  * fill in the FPU structure for a core dump.
  */
index 863c8d0..3dfd098 100644 (file)
@@ -285,6 +285,26 @@ static void __init conmode_default(void)
        }
 }
 
+#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE)
+static void __init setup_zfcpdump(unsigned int console_devno)
+{
+       static char str[64];
+
+       if (ipl_info.type != IPL_TYPE_FCP_DUMP)
+               return;
+       if (console_devno != -1)
+               sprintf(str, "cio_ignore=all,!0.0.%04x,!0.0.%04x",
+                       ipl_info.data.fcp.dev_id.devno, console_devno);
+       else
+               sprintf(str, "cio_ignore=all,!0.0.%04x",
+                       ipl_info.data.fcp.dev_id.devno);
+       strcat(COMMAND_LINE, str);
+       console_loglevel = 2;
+}
+#else
+static inline void setup_zfcpdump(unsigned int console_devno) {}
+#endif /* CONFIG_ZFCPDUMP */
+
 #ifdef CONFIG_SMP
 void (*_machine_restart)(char *command) = machine_restart_smp;
 void (*_machine_halt)(void) = machine_halt_smp;
@@ -586,13 +606,20 @@ setup_resources(void)
        }
 }
 
+unsigned long real_memory_size;
+EXPORT_SYMBOL_GPL(real_memory_size);
+
 static void __init setup_memory_end(void)
 {
-       unsigned long real_size, memory_size;
+       unsigned long memory_size;
        unsigned long max_mem, max_phys;
        int i;
 
-       memory_size = real_size = 0;
+#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE)
+       if (ipl_info.type == IPL_TYPE_FCP_DUMP)
+               memory_end = ZFCPDUMP_HSA_SIZE;
+#endif
+       memory_size = 0;
        max_phys = VMALLOC_END_INIT - VMALLOC_MIN_SIZE;
        memory_end &= PAGE_MASK;
 
@@ -601,7 +628,8 @@ static void __init setup_memory_end(void)
        for (i = 0; i < MEMORY_CHUNKS; i++) {
                struct mem_chunk *chunk = &memory_chunk[i];
 
-               real_size = max(real_size, chunk->addr + chunk->size);
+               real_memory_size = max(real_memory_size,
+                                      chunk->addr + chunk->size);
                if (chunk->addr >= max_mem) {
                        memset(chunk, 0, sizeof(*chunk));
                        continue;
@@ -765,6 +793,7 @@ setup_arch(char **cmdline_p)
 
        parse_early_param();
 
+       setup_ipl_info();
        setup_memory_end();
        setup_addressing_mode();
        setup_memory();
@@ -782,6 +811,9 @@ setup_arch(char **cmdline_p)
 
         /* Setup default console */
        conmode_default();
+
+       /* Setup zfcpdump support */
+       setup_zfcpdump(console_devno);
 }
 
 void print_cpu_info(struct cpuinfo_S390 *cpuinfo)
index 554f9cf..3c41907 100644 (file)
@@ -102,9 +102,9 @@ sys_sigaction(int sig, const struct old_sigaction __user *act,
 }
 
 asmlinkage long
-sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
-                                       struct pt_regs *regs)
+sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss)
 {
+       struct pt_regs *regs = task_pt_regs(current);
        return do_sigaltstack(uss, uoss, regs->gprs[15]);
 }
 
@@ -163,8 +163,9 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
        return 0;
 }
 
-asmlinkage long sys_sigreturn(struct pt_regs *regs)
+asmlinkage long sys_sigreturn(void)
 {
+       struct pt_regs *regs = task_pt_regs(current);
        sigframe __user *frame = (sigframe __user *)regs->gprs[15];
        sigset_t set;
 
@@ -189,8 +190,9 @@ badframe:
        return 0;
 }
 
-asmlinkage long sys_rt_sigreturn(struct pt_regs *regs)
+asmlinkage long sys_rt_sigreturn(void)
 {
+       struct pt_regs *regs = task_pt_regs(current);
        rt_sigframe __user *frame = (rt_sigframe __user *)regs->gprs[15];
        sigset_t set;
 
index 97764f7..3754e20 100644 (file)
@@ -1,12 +1,12 @@
 /*
  *  arch/s390/kernel/smp.c
  *
- *    Copyright (C) IBM Corp. 1999,2006
+ *    Copyright IBM Corp. 1999,2007
  *    Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
- *               Martin Schwidefsky (schwidefsky@de.ibm.com)
- *               Heiko Carstens (heiko.carstens@de.ibm.com)
+ *              Martin Schwidefsky (schwidefsky@de.ibm.com)
+ *              Heiko Carstens (heiko.carstens@de.ibm.com)
  *
- *  based on other smp stuff by 
+ *  based on other smp stuff by
  *    (c) 1995 Alan Cox, CymruNET Ltd  <alan@cymru.net>
  *    (c) 1998 Ingo Molnar
  *
@@ -31,6 +31,7 @@
 #include <linux/interrupt.h>
 #include <linux/cpu.h>
 #include <linux/timex.h>
+#include <linux/bootmem.h>
 #include <asm/ipl.h>
 #include <asm/setup.h>
 #include <asm/sigp.h>
 #include <asm/cpcmd.h>
 #include <asm/tlbflush.h>
 #include <asm/timer.h>
-
-extern volatile int __cpu_logical_map[];
+#include <asm/lowcore.h>
 
 /*
  * An array with a pointer the lowcore of every CPU.
  */
-
 struct _lowcore *lowcore_ptr[NR_CPUS];
+EXPORT_SYMBOL(lowcore_ptr);
 
 cpumask_t cpu_online_map = CPU_MASK_NONE;
+EXPORT_SYMBOL(cpu_online_map);
+
 cpumask_t cpu_possible_map = CPU_MASK_NONE;
+EXPORT_SYMBOL(cpu_possible_map);
 
 static struct task_struct *current_set[NR_CPUS];
 
@@ -70,7 +73,7 @@ struct call_data_struct {
        int wait;
 };
 
-static struct call_data_struct * call_data;
+static struct call_data_struct *call_data;
 
 /*
  * 'Call function' interrupt callback
@@ -150,8 +153,8 @@ out:
  *
  * Run a function on all other CPUs.
  *
- * You must not call this function with disabled interrupts or from a
- * hardware interrupt handler. You may call it from a bottom half.
+ * You must not call this function with disabled interrupts, from a
+ * hardware interrupt handler or from a bottom half.
  */
 int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
                      int wait)
@@ -177,11 +180,11 @@ EXPORT_SYMBOL(smp_call_function);
  *
  * Run a function on one processor.
  *
- * You must not call this function with disabled interrupts or from a
- * hardware interrupt handler. You may call it from a bottom half.
+ * You must not call this function with disabled interrupts, from a
+ * hardware interrupt handler or from a bottom half.
  */
 int smp_call_function_on(void (*func) (void *info), void *info, int nonatomic,
-                         int wait, int cpu)
+                        int wait, int cpu)
 {
        cpumask_t map = CPU_MASK_NONE;
 
@@ -195,9 +198,9 @@ EXPORT_SYMBOL(smp_call_function_on);
 
 static void do_send_stop(void)
 {
-        int cpu, rc;
+       int cpu, rc;
 
-        /* stop all processors */
+       /* stop all processors */
        for_each_online_cpu(cpu) {
                if (cpu == smp_processor_id())
                        continue;
@@ -209,9 +212,9 @@ static void do_send_stop(void)
 
 static void do_store_status(void)
 {
-        int cpu, rc;
+       int cpu, rc;
 
-        /* store status of all processors in their lowcores (real 0) */
+       /* store status of all processors in their lowcores (real 0) */
        for_each_online_cpu(cpu) {
                if (cpu == smp_processor_id())
                        continue;
@@ -219,8 +222,8 @@ static void do_store_status(void)
                        rc = signal_processor_p(
                                (__u32)(unsigned long) lowcore_ptr[cpu], cpu,
                                sigp_store_status_at_address);
-               } while(rc == sigp_busy);
-        }
+               } while (rc == sigp_busy);
+       }
 }
 
 static void do_wait_for_stop(void)
@@ -231,7 +234,7 @@ static void do_wait_for_stop(void)
        for_each_online_cpu(cpu) {
                if (cpu == smp_processor_id())
                        continue;
-               while(!smp_cpu_not_running(cpu))
+               while (!smp_cpu_not_running(cpu))
                        cpu_relax();
        }
 }
@@ -245,7 +248,7 @@ void smp_send_stop(void)
        /* Disable all interrupts/machine checks */
        __load_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK);
 
-        /* write magic number to zero page (absolute 0) */
+       /* write magic number to zero page (absolute 0) */
        lowcore_ptr[smp_processor_id()]->panic_magic = __PANIC_MAGIC;
 
        /* stop other processors. */
@@ -261,8 +264,7 @@ void smp_send_stop(void)
 /*
  * Reboot, halt and power_off routines for SMP.
  */
-
-void machine_restart_smp(char * __unused) 
+void machine_restart_smp(char *__unused)
 {
        smp_send_stop();
        do_reipl();
@@ -293,17 +295,17 @@ void machine_power_off_smp(void)
 
 static void do_ext_call_interrupt(__u16 code)
 {
-        unsigned long bits;
+       unsigned long bits;
 
-        /*
-         * handle bit signal external calls
-         *
-         * For the ec_schedule signal we have to do nothing. All the work
-         * is done automatically when we return from the interrupt.
-         */
+       /*
+        * handle bit signal external calls
+        *
+        * For the ec_schedule signal we have to do nothing. All the work
+        * is done automatically when we return from the interrupt.
+        */
        bits = xchg(&S390_lowcore.ext_call_fast, 0);
 
-       if (test_bit(ec_call_function, &bits)) 
+       if (test_bit(ec_call_function, &bits))
                do_call_function();
 }
 
@@ -313,11 +315,11 @@ static void do_ext_call_interrupt(__u16 code)
  */
 static void smp_ext_bitcall(int cpu, ec_bit_sig sig)
 {
-        /*
-         * Set signaling bit in lowcore of target cpu and kick it
-         */
+       /*
+        * Set signaling bit in lowcore of target cpu and kick it
+        */
        set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast);
-       while(signal_processor(cpu, sigp_emergency_signal) == sigp_busy)
+       while (signal_processor(cpu, sigp_emergency_signal) == sigp_busy)
                udelay(10);
 }
 
@@ -332,7 +334,7 @@ void smp_ptlb_callback(void *info)
 
 void smp_ptlb_all(void)
 {
-        on_each_cpu(smp_ptlb_callback, NULL, 0, 1);
+       on_each_cpu(smp_ptlb_callback, NULL, 0, 1);
 }
 EXPORT_SYMBOL(smp_ptlb_all);
 #endif /* ! CONFIG_64BIT */
@@ -344,7 +346,7 @@ EXPORT_SYMBOL(smp_ptlb_all);
  */
 void smp_send_reschedule(int cpu)
 {
-        smp_ext_bitcall(cpu, ec_schedule);
+       smp_ext_bitcall(cpu, ec_schedule);
 }
 
 /*
@@ -358,11 +360,12 @@ struct ec_creg_mask_parms {
 /*
  * callback for setting/clearing control bits
  */
-static void smp_ctl_bit_callback(void *info) {
+static void smp_ctl_bit_callback(void *info)
+{
        struct ec_creg_mask_parms *pp = info;
        unsigned long cregs[16];
        int i;
-       
+
        __ctl_store(cregs, 0, 15);
        for (i = 0; i <= 15; i++)
                cregs[i] = (cregs[i] & pp->andvals[i]) | pp->orvals[i];
@@ -381,6 +384,7 @@ void smp_ctl_set_bit(int cr, int bit)
        parms.orvals[cr] = 1 << bit;
        on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1);
 }
+EXPORT_SYMBOL(smp_ctl_set_bit);
 
 /*
  * Clear a bit in a control register of all cpus
@@ -394,13 +398,72 @@ void smp_ctl_clear_bit(int cr, int bit)
        parms.andvals[cr] = ~(1L << bit);
        on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1);
 }
+EXPORT_SYMBOL(smp_ctl_clear_bit);
+
+#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE)
+
+/*
+ * zfcpdump_prefix_array holds prefix registers for the following scenario:
+ * 64 bit zfcpdump kernel and 31 bit kernel which is to be dumped. We have to
+ * save its prefix registers, since they get lost, when switching from 31 bit
+ * to 64 bit.
+ */
+unsigned int zfcpdump_prefix_array[NR_CPUS + 1] \
+       __attribute__((__section__(".data")));
+
+static void __init smp_get_save_areas(void)
+{
+       unsigned int cpu, cpu_num, rc;
+       __u16 boot_cpu_addr;
+
+       if (ipl_info.type != IPL_TYPE_FCP_DUMP)
+               return;
+       boot_cpu_addr = S390_lowcore.cpu_data.cpu_addr;
+       cpu_num = 1;
+       for (cpu = 0; cpu <= 65535; cpu++) {
+               if ((u16) cpu == boot_cpu_addr)
+                       continue;
+               __cpu_logical_map[1] = (__u16) cpu;
+               if (signal_processor(1, sigp_sense) == sigp_not_operational)
+                       continue;
+               if (cpu_num >= NR_CPUS) {
+                       printk("WARNING: Registers for cpu %i are not "
+                              "saved, since dump kernel was compiled with"
+                              "NR_CPUS=%i!\n", cpu_num, NR_CPUS);
+                       continue;
+               }
+               zfcpdump_save_areas[cpu_num] =
+                       alloc_bootmem(sizeof(union save_area));
+               while (1) {
+                       rc = signal_processor(1, sigp_stop_and_store_status);
+                       if (rc != sigp_busy)
+                               break;
+                       cpu_relax();
+               }
+               memcpy(zfcpdump_save_areas[cpu_num],
+                      (void *)(unsigned long) store_prefix() +
+                      SAVE_AREA_BASE, SAVE_AREA_SIZE);
+#ifdef __s390x__
+               /* copy original prefix register */
+               zfcpdump_save_areas[cpu_num]->s390x.pref_reg =
+                       zfcpdump_prefix_array[cpu_num];
+#endif
+               cpu_num++;
+       }
+}
+
+union save_area *zfcpdump_save_areas[NR_CPUS + 1];
+EXPORT_SYMBOL_GPL(zfcpdump_save_areas);
+
+#else
+#define smp_get_save_areas() do { } while (0)
+#endif
 
 /*
  * Lets check how many CPUs we have.
  */
 
-static unsigned int
-__init smp_count_cpus(void)
+static unsigned int __init smp_count_cpus(void)
 {
        unsigned int cpu, num_cpus;
        __u16 boot_cpu_addr;
@@ -416,31 +479,30 @@ __init smp_count_cpus(void)
                if ((__u16) cpu == boot_cpu_addr)
                        continue;
                __cpu_logical_map[1] = (__u16) cpu;
-               if (signal_processor(1, sigp_sense) ==
-                   sigp_not_operational)
+               if (signal_processor(1, sigp_sense) == sigp_not_operational)
                        continue;
                num_cpus++;
        }
 
-       printk("Detected %d CPU's\n",(int) num_cpus);
+       printk("Detected %d CPU's\n", (int) num_cpus);
        printk("Boot cpu address %2X\n", boot_cpu_addr);
 
        return num_cpus;
 }
 
 /*
- *      Activate a secondary processor.
+ *     Activate a secondary processor.
  */
 int __devinit start_secondary(void *cpuvoid)
 {
-        /* Setup the cpu */
-        cpu_init();
+       /* Setup the cpu */
+       cpu_init();
        preempt_disable();
        /* Enable TOD clock interrupts on the secondary cpu. */
-        init_cpu_timer();
+       init_cpu_timer();
 #ifdef CONFIG_VIRT_TIMER
        /* Enable cpu timer interrupts on the secondary cpu. */
-        init_cpu_vtimer();
+       init_cpu_vtimer();
 #endif
        /* Enable pfault pseudo page faults on this cpu. */
        pfault_init();
@@ -449,11 +511,11 @@ int __devinit start_secondary(void *cpuvoid)
        cpu_set(smp_processor_id(), cpu_online_map);
        /* Switch on interrupts */
        local_irq_enable();
-        /* Print info about this processor */
-        print_cpu_info(&S390_lowcore.cpu_data);
-        /* cpu_idle will call schedule for us */
-        cpu_idle();
-        return 0;
+       /* Print info about this processor */
+       print_cpu_info(&S390_lowcore.cpu_data);
+       /* cpu_idle will call schedule for us */
+       cpu_idle();
+       return 0;
 }
 
 static void __init smp_create_idle(unsigned int cpu)
@@ -470,56 +532,13 @@ static void __init smp_create_idle(unsigned int cpu)
        current_set[cpu] = p;
 }
 
-/* Reserving and releasing of CPUs */
-
-static DEFINE_SPINLOCK(smp_reserve_lock);
-static int smp_cpu_reserved[NR_CPUS];
-
-int
-smp_get_cpu(cpumask_t cpu_mask)
-{
-       unsigned long flags;
-       int cpu;
-
-       spin_lock_irqsave(&smp_reserve_lock, flags);
-       /* Try to find an already reserved cpu. */
-       for_each_cpu_mask(cpu, cpu_mask) {
-               if (smp_cpu_reserved[cpu] != 0) {
-                       smp_cpu_reserved[cpu]++;
-                       /* Found one. */
-                       goto out;
-               }
-       }
-       /* Reserve a new cpu from cpu_mask. */
-       for_each_cpu_mask(cpu, cpu_mask) {
-               if (cpu_online(cpu)) {
-                       smp_cpu_reserved[cpu]++;
-                       goto out;
-               }
-       }
-       cpu = -ENODEV;
-out:
-       spin_unlock_irqrestore(&smp_reserve_lock, flags);
-       return cpu;
-}
-
-void
-smp_put_cpu(int cpu)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&smp_reserve_lock, flags);
-       smp_cpu_reserved[cpu]--;
-       spin_unlock_irqrestore(&smp_reserve_lock, flags);
-}
-
-static int
-cpu_stopped(int cpu)
+static int cpu_stopped(int cpu)
 {
        __u32 status;
 
        /* Check for stopped state */
-       if (signal_processor_ps(&status, 0, cpu, sigp_sense) == sigp_status_stored) {
+       if (signal_processor_ps(&status, 0, cpu, sigp_sense) ==
+           sigp_status_stored) {
                if (status & 0x40)
                        return 1;
        }
@@ -528,14 +547,13 @@ cpu_stopped(int cpu)
 
 /* Upping and downing of CPUs */
 
-int
-__cpu_up(unsigned int cpu)
+int __cpu_up(unsigned int cpu)
 {
        struct task_struct *idle;
-        struct _lowcore    *cpu_lowcore;
+       struct _lowcore *cpu_lowcore;
        struct stack_frame *sf;
-        sigp_ccode          ccode;
-       int                 curr_cpu;
+       sigp_ccode ccode;
+       int curr_cpu;
 
        for (curr_cpu = 0; curr_cpu <= 65535; curr_cpu++) {
                __cpu_logical_map[cpu] = (__u16) curr_cpu;
@@ -548,7 +566,7 @@ __cpu_up(unsigned int cpu)
 
        ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]),
                                   cpu, sigp_set_prefix);
-       if (ccode){
+       if (ccode) {
                printk("sigp_set_prefix failed for cpu %d "
                       "with condition code %d\n",
                       (int) cpu, (int) ccode);
@@ -556,9 +574,9 @@ __cpu_up(unsigned int cpu)
        }
 
        idle = current_set[cpu];
-        cpu_lowcore = lowcore_ptr[cpu];
+       cpu_lowcore = lowcore_ptr[cpu];
        cpu_lowcore->kernel_stack = (unsigned long)
-               task_stack_page(idle) + (THREAD_SIZE);
+               task_stack_page(idle) + THREAD_SIZE;
        sf = (struct stack_frame *) (cpu_lowcore->kernel_stack
                                     - sizeof(struct pt_regs)
                                     - sizeof(struct stack_frame));
@@ -570,11 +588,11 @@ __cpu_up(unsigned int cpu)
                "       stam    0,15,0(%0)"
                : : "a" (&cpu_lowcore->access_regs_save_area) : "memory");
        cpu_lowcore->percpu_offset = __per_cpu_offset[cpu];
-        cpu_lowcore->current_task = (unsigned long) idle;
-        cpu_lowcore->cpu_data.cpu_nr = cpu;
+       cpu_lowcore->current_task = (unsigned long) idle;
+       cpu_lowcore->cpu_data.cpu_nr = cpu;
        eieio();
 
-       while (signal_processor(cpu,sigp_restart) == sigp_busy)
+       while (signal_processor(cpu, sigp_restart) == sigp_busy)
                udelay(10);
 
        while (!cpu_online(cpu))
@@ -589,6 +607,7 @@ void __init smp_setup_cpu_possible_map(void)
 {
        unsigned int phy_cpus, pos_cpus, cpu;
 
+       smp_get_save_areas();
        phy_cpus = smp_count_cpus();
        pos_cpus = min(phy_cpus + additional_cpus, (unsigned int) NR_CPUS);
 
@@ -620,18 +639,11 @@ static int __init setup_possible_cpus(char *s)
 }
 early_param("possible_cpus", setup_possible_cpus);
 
-int
-__cpu_disable(void)
+int __cpu_disable(void)
 {
-       unsigned long flags;
        struct ec_creg_mask_parms cr_parms;
        int cpu = smp_processor_id();
 
-       spin_lock_irqsave(&smp_reserve_lock, flags);
-       if (smp_cpu_reserved[cpu] != 0) {
-               spin_unlock_irqrestore(&smp_reserve_lock, flags);
-               return -EBUSY;
-       }
        cpu_clear(cpu, cpu_online_map);
 
        /* Disable pfault pseudo page faults on this cpu. */
@@ -642,24 +654,23 @@ __cpu_disable(void)
 
        /* disable all external interrupts */
        cr_parms.orvals[0] = 0;
-       cr_parms.andvals[0] = ~(1<<15 | 1<<14 | 1<<13 | 1<<12 |
-                               1<<11 | 1<<10 | 1<< 6 | 1<< 4);
+       cr_parms.andvals[0] = ~(1 << 15 | 1 << 14 | 1 << 13 | 1 << 12 |
+                               1 << 11 | 1 << 10 | 1 <<  6 | 1 <<  4);
        /* disable all I/O interrupts */
        cr_parms.orvals[6] = 0;
-       cr_parms.andvals[6] = ~(1<<31 | 1<<30 | 1<<29 | 1<<28 |
-                               1<<27 | 1<<26 | 1<<25 | 1<<24);
+       cr_parms.andvals[6] = ~(1 << 31 | 1 << 30 | 1 << 29 | 1 << 28 |
+                               1 << 27 | 1 << 26 | 1 << 25 | 1 << 24);
        /* disable most machine checks */
        cr_parms.orvals[14] = 0;
-       cr_parms.andvals[14] = ~(1<<28 | 1<<27 | 1<<26 | 1<<25 | 1<<24);
+       cr_parms.andvals[14] = ~(1 << 28 | 1 << 27 | 1 << 26 |
+                                1 << 25 | 1 << 24);
 
        smp_ctl_bit_callback(&cr_parms);
 
-       spin_unlock_irqrestore(&smp_reserve_lock, flags);
        return 0;
 }
 
-void
-__cpu_die(unsigned int cpu)
+void __cpu_die(unsigned int cpu)
 {
        /* Wait until target cpu is down */
        while (!smp_cpu_not_running(cpu))
@@ -667,13 +678,12 @@ __cpu_die(unsigned int cpu)
        printk("Processor %d spun down\n", cpu);
 }
 
-void
-cpu_die(void)
+void cpu_die(void)
 {
        idle_task_exit();
        signal_processor(smp_processor_id(), sigp_stop);
        BUG();
-       for(;;);
+       for (;;);
 }
 
 #endif /* CONFIG_HOTPLUG_CPU */
@@ -686,36 +696,36 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
 {
        unsigned long stack;
        unsigned int cpu;
-        int i;
-
-        /* request the 0x1201 emergency signal external interrupt */
-        if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0)
-                panic("Couldn't request external interrupt 0x1201");
-        memset(lowcore_ptr,0,sizeof(lowcore_ptr));  
-        /*
-         *  Initialize prefix pages and stacks for all possible cpus
-         */
+       int i;
+
+       /* request the 0x1201 emergency signal external interrupt */
+       if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0)
+               panic("Couldn't request external interrupt 0x1201");
+       memset(lowcore_ptr, 0, sizeof(lowcore_ptr));
+       /*
+        *  Initialize prefix pages and stacks for all possible cpus
+        */
        print_cpu_info(&S390_lowcore.cpu_data);
 
-        for_each_possible_cpu(i) {
+       for_each_possible_cpu(i) {
                lowcore_ptr[i] = (struct _lowcore *)
-                       __get_free_pages(GFP_KERNEL|GFP_DMA, 
-                                       sizeof(void*) == 8 ? 1 : 0);
-               stack = __get_free_pages(GFP_KERNEL,ASYNC_ORDER);
-               if (lowcore_ptr[i] == NULL || stack == 0ULL)
+                       __get_free_pages(GFP_KERNEL | GFP_DMA,
+                                        sizeof(void*) == 8 ? 1 : 0);
+               stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
+               if (!lowcore_ptr[i] || !stack)
                        panic("smp_boot_cpus failed to allocate memory\n");
 
                *(lowcore_ptr[i]) = S390_lowcore;
-               lowcore_ptr[i]->async_stack = stack + (ASYNC_SIZE);
-               stack = __get_free_pages(GFP_KERNEL,0);
-               if (stack == 0ULL)
+               lowcore_ptr[i]->async_stack = stack + ASYNC_SIZE;
+               stack = __get_free_pages(GFP_KERNEL, 0);
+               if (!stack)
                        panic("smp_boot_cpus failed to allocate memory\n");
-               lowcore_ptr[i]->panic_stack = stack + (PAGE_SIZE);
+               lowcore_ptr[i]->panic_stack = stack + PAGE_SIZE;
 #ifndef CONFIG_64BIT
                if (MACHINE_HAS_IEEE) {
                        lowcore_ptr[i]->extended_save_area_addr =
-                               (__u32) __get_free_pages(GFP_KERNEL,0);
-                       if (lowcore_ptr[i]->extended_save_area_addr == 0)
+                               (__u32) __get_free_pages(GFP_KERNEL, 0);
+                       if (!lowcore_ptr[i]->extended_save_area_addr)
                                panic("smp_boot_cpus failed to "
                                      "allocate memory\n");
                }
@@ -754,34 +764,63 @@ void smp_cpus_done(unsigned int max_cpus)
  */
 int setup_profiling_timer(unsigned int multiplier)
 {
-        return 0;
+       return 0;
 }
 
 static DEFINE_PER_CPU(struct cpu, cpu_devices);
 
+static ssize_t show_capability(struct sys_device *dev, char *buf)
+{
+       unsigned int capability;
+       int rc;
+
+       rc = get_cpu_capability(&capability);
+       if (rc)
+               return rc;
+       return sprintf(buf, "%u\n", capability);
+}
+static SYSDEV_ATTR(capability, 0444, show_capability, NULL);
+
+static int __cpuinit smp_cpu_notify(struct notifier_block *self,
+                                   unsigned long action, void *hcpu)
+{
+       unsigned int cpu = (unsigned int)(long)hcpu;
+       struct cpu *c = &per_cpu(cpu_devices, cpu);
+       struct sys_device *s = &c->sysdev;
+
+       switch (action) {
+       case CPU_ONLINE:
+               if (sysdev_create_file(s, &attr_capability))
+                       return NOTIFY_BAD;
+               break;
+       case CPU_DEAD:
+               sysdev_remove_file(s, &attr_capability);
+               break;
+       }
+       return NOTIFY_OK;
+}
+
+static struct notifier_block __cpuinitdata smp_cpu_nb = {
+       .notifier_call = smp_cpu_notify,
+};
+
 static int __init topology_init(void)
 {
        int cpu;
-       int ret;
+
+       register_cpu_notifier(&smp_cpu_nb);
 
        for_each_possible_cpu(cpu) {
                struct cpu *c = &per_cpu(cpu_devices, cpu);
+               struct sys_device *s = &c->sysdev;
 
                c->hotpluggable = 1;
-               ret = register_cpu(c, cpu);
-               if (ret)
-                       printk(KERN_WARNING "topology_init: register_cpu %d "
-                              "failed (%d)\n", cpu, ret);
+               register_cpu(c, cpu);
+               if (!cpu_online(cpu))
+                       continue;
+               s = &c->sysdev;
+               sysdev_create_file(s, &attr_capability);
        }
        return 0;
 }
-
 subsys_initcall(topology_init);
-
-EXPORT_SYMBOL(cpu_online_map);
-EXPORT_SYMBOL(cpu_possible_map);
-EXPORT_SYMBOL(lowcore_ptr);
-EXPORT_SYMBOL(smp_ctl_set_bit);
-EXPORT_SYMBOL(smp_ctl_clear_bit);
-EXPORT_SYMBOL(smp_get_cpu);
-EXPORT_SYMBOL(smp_put_cpu);
index 584ed95..3a77c22 100644 (file)
@@ -266,23 +266,3 @@ s390_fadvise64_64(struct fadvise64_64_args __user *args)
                return -EFAULT;
        return sys_fadvise64_64(a.fd, a.offset, a.len, a.advice);
 }
-
-/*
- * Do a system call from kernel instead of calling sys_execve so we
- * end up with proper pt_regs.
- */
-int kernel_execve(const char *filename, char *const argv[], char *const envp[])
-{
-       register const char *__arg1 asm("2") = filename;
-       register char *const*__arg2 asm("3") = argv;
-       register char *const*__arg3 asm("4") = envp;
-       register long __svcres asm("2");
-       asm volatile(
-               "svc %b1"
-               : "=d" (__svcres)
-               : "i" (__NR_execve),
-                 "0" (__arg1),
-                 "d" (__arg2),
-                 "d" (__arg3) : "memory");
-       return __svcres;
-}
index c774f10..cd8d321 100644 (file)
@@ -10,7 +10,7 @@
 
 NI_SYSCALL                                                     /* 0 */
 SYSCALL(sys_exit,sys_exit,sys32_exit_wrapper)
-SYSCALL(sys_fork_glue,sys_fork_glue,sys_fork_glue)
+SYSCALL(sys_fork,sys_fork,sys_fork)
 SYSCALL(sys_read,sys_read,sys32_read_wrapper)
 SYSCALL(sys_write,sys_write,sys32_write_wrapper)
 SYSCALL(sys_open,sys_open,sys32_open_wrapper)                  /* 5 */
@@ -19,7 +19,7 @@ SYSCALL(sys_restart_syscall,sys_restart_syscall,sys_restart_syscall)
 SYSCALL(sys_creat,sys_creat,sys32_creat_wrapper)
 SYSCALL(sys_link,sys_link,sys32_link_wrapper)
 SYSCALL(sys_unlink,sys_unlink,sys32_unlink_wrapper)            /* 10 */
-SYSCALL(sys_execve_glue,sys_execve_glue,sys32_execve_glue)
+SYSCALL(sys_execve,sys_execve,sys32_execve)
 SYSCALL(sys_chdir,sys_chdir,sys32_chdir_wrapper)
 SYSCALL(sys_time,sys_ni_syscall,sys32_time_wrapper)            /* old time syscall */
 SYSCALL(sys_mknod,sys_mknod,sys32_mknod_wrapper)
@@ -127,8 +127,8 @@ SYSCALL(sys_swapoff,sys_swapoff,sys32_swapoff_wrapper)              /* 115 */
 SYSCALL(sys_sysinfo,sys_sysinfo,compat_sys_sysinfo_wrapper)
 SYSCALL(sys_ipc,sys_ipc,sys32_ipc_wrapper)
 SYSCALL(sys_fsync,sys_fsync,sys32_fsync_wrapper)
-SYSCALL(sys_sigreturn_glue,sys_sigreturn_glue,sys32_sigreturn_glue)
-SYSCALL(sys_clone_glue,sys_clone_glue,sys32_clone_glue)                /* 120 */
+SYSCALL(sys_sigreturn,sys_sigreturn,sys32_sigreturn)
+SYSCALL(sys_clone,sys_clone,sys32_clone)                       /* 120 */
 SYSCALL(sys_setdomainname,sys_setdomainname,sys32_setdomainname_wrapper)
 SYSCALL(sys_newuname,s390x_newuname,sys32_newuname_wrapper)
 NI_SYSCALL                                                     /* modify_ldt for i386 */
@@ -181,7 +181,7 @@ SYSCALL(sys_nfsservctl,sys_nfsservctl,compat_sys_nfsservctl_wrapper)
 SYSCALL(sys_setresgid16,sys_ni_syscall,sys32_setresgid16_wrapper)      /* 170 old setresgid16 syscall */
 SYSCALL(sys_getresgid16,sys_ni_syscall,sys32_getresgid16_wrapper)      /* old getresgid16 syscall */
 SYSCALL(sys_prctl,sys_prctl,sys32_prctl_wrapper)
-SYSCALL(sys_rt_sigreturn_glue,sys_rt_sigreturn_glue,sys32_rt_sigreturn_glue)
+SYSCALL(sys_rt_sigreturn,sys_rt_sigreturn,sys32_rt_sigreturn)
 SYSCALL(sys_rt_sigaction,sys_rt_sigaction,sys32_rt_sigaction_wrapper)
 SYSCALL(sys_rt_sigprocmask,sys_rt_sigprocmask,sys32_rt_sigprocmask_wrapper)    /* 175 */
 SYSCALL(sys_rt_sigpending,sys_rt_sigpending,sys32_rt_sigpending_wrapper)
@@ -194,11 +194,11 @@ SYSCALL(sys_chown16,sys_ni_syscall,sys32_chown16_wrapper) /* old chown16 syscall
 SYSCALL(sys_getcwd,sys_getcwd,sys32_getcwd_wrapper)
 SYSCALL(sys_capget,sys_capget,sys32_capget_wrapper)
 SYSCALL(sys_capset,sys_capset,sys32_capset_wrapper)            /* 185 */
-SYSCALL(sys_sigaltstack_glue,sys_sigaltstack_glue,sys32_sigaltstack_glue)
+SYSCALL(sys_sigaltstack,sys_sigaltstack,sys32_sigaltstack)
 SYSCALL(sys_sendfile,sys_sendfile64,sys32_sendfile_wrapper)
 NI_SYSCALL                                                     /* streams1 */
 NI_SYSCALL                                                     /* streams2 */
-SYSCALL(sys_vfork_glue,sys_vfork_glue,sys_vfork_glue)          /* 190 */
+SYSCALL(sys_vfork,sys_vfork,sys_vfork)                         /* 190 */
 SYSCALL(sys_getrlimit,sys_getrlimit,compat_sys_getrlimit_wrapper)
 SYSCALL(sys_mmap2,sys_mmap2,sys32_mmap2_wrapper)
 SYSCALL(sys_truncate64,sys_ni_syscall,sys32_truncate64_wrapper)
index e1ad464..711dae8 100644 (file)
@@ -280,7 +280,6 @@ static void clock_comparator_interrupt(__u16 code)
 }
 
 static void etr_reset(void);
-static void etr_init(void);
 static void etr_ext_handler(__u16);
 
 /*
@@ -355,7 +354,6 @@ void __init time_init(void)
 #ifdef CONFIG_VIRT_TIMER
        vtime_init();
 #endif
-       etr_init();
 }
 
 /*
@@ -426,11 +424,11 @@ static struct etr_aib etr_port1;
 static int etr_port1_uptodate;
 static unsigned long etr_events;
 static struct timer_list etr_timer;
-static struct tasklet_struct etr_tasklet;
 static DEFINE_PER_CPU(atomic_t, etr_sync_word);
 
 static void etr_timeout(unsigned long dummy);
-static void etr_tasklet_fn(unsigned long dummy);
+static void etr_work_fn(struct work_struct *work);
+static DECLARE_WORK(etr_work, etr_work_fn);
 
 /*
  * The etr get_clock function. It will write the current clock value
@@ -507,29 +505,31 @@ static void etr_reset(void)
        }
 }
 
-static void etr_init(void)
+static int __init etr_init(void)
 {
        struct etr_aib aib;
 
        if (test_bit(ETR_FLAG_ENOSYS, &etr_flags))
-               return;
+               return 0;
        /* Check if this machine has the steai instruction. */
        if (etr_steai(&aib, ETR_STEAI_STEPPING_PORT) == 0)
                set_bit(ETR_FLAG_STEAI, &etr_flags);
        setup_timer(&etr_timer, etr_timeout, 0UL);
-       tasklet_init(&etr_tasklet, etr_tasklet_fn, 0);
        if (!etr_port0_online && !etr_port1_online)
                set_bit(ETR_FLAG_EACCES, &etr_flags);
        if (etr_port0_online) {
                set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events);
-               tasklet_hi_schedule(&etr_tasklet);
+               schedule_work(&etr_work);
        }
        if (etr_port1_online) {
                set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events);
-               tasklet_hi_schedule(&etr_tasklet);
+               schedule_work(&etr_work);
        }
+       return 0;
 }
 
+arch_initcall(etr_init);
+
 /*
  * Two sorts of ETR machine checks. The architecture reads:
  * "When a machine-check niterruption occurs and if a switch-to-local or
@@ -549,7 +549,7 @@ void etr_switch_to_local(void)
                return;
        etr_disable_sync_clock(NULL);
        set_bit(ETR_EVENT_SWITCH_LOCAL, &etr_events);
-       tasklet_hi_schedule(&etr_tasklet);
+       schedule_work(&etr_work);
 }
 
 /*
@@ -564,7 +564,7 @@ void etr_sync_check(void)
                return;
        etr_disable_sync_clock(NULL);
        set_bit(ETR_EVENT_SYNC_CHECK, &etr_events);
-       tasklet_hi_schedule(&etr_tasklet);
+       schedule_work(&etr_work);
 }
 
 /*
@@ -591,13 +591,13 @@ static void etr_ext_handler(__u16 code)
                 * Both ports are not up-to-date now.
                 */
                set_bit(ETR_EVENT_PORT_ALERT, &etr_events);
-       tasklet_hi_schedule(&etr_tasklet);
+       schedule_work(&etr_work);
 }
 
 static void etr_timeout(unsigned long dummy)
 {
        set_bit(ETR_EVENT_UPDATE, &etr_events);
-       tasklet_hi_schedule(&etr_tasklet);
+       schedule_work(&etr_work);
 }
 
 /*
@@ -927,7 +927,7 @@ static struct etr_eacr etr_handle_update(struct etr_aib *aib,
        if (!eacr.e0 && !eacr.e1)
                return eacr;
 
-       /* Update port0 or port1 with aib stored in etr_tasklet_fn. */
+       /* Update port0 or port1 with aib stored in etr_work_fn. */
        if (aib->esw.q == 0) {
                /* Information for port 0 stored. */
                if (eacr.p0 && !etr_port0_uptodate) {
@@ -1007,7 +1007,7 @@ static void etr_update_eacr(struct etr_eacr eacr)
  * particular this is the only function that calls etr_update_eacr(),
  * it "controls" the etr control register.
  */
-static void etr_tasklet_fn(unsigned long dummy)
+static void etr_work_fn(struct work_struct *work)
 {
        unsigned long long now;
        struct etr_eacr eacr;
@@ -1220,13 +1220,13 @@ static ssize_t etr_online_store(struct sys_device *dev,
                        return count;   /* Nothing to do. */
                etr_port0_online = value;
                set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events);
-               tasklet_hi_schedule(&etr_tasklet);
+               schedule_work(&etr_work);
        } else {
                if (etr_port1_online == value)
                        return count;   /* Nothing to do. */
                etr_port1_online = value;
                set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events);
-               tasklet_hi_schedule(&etr_tasklet);
+               schedule_work(&etr_work);
        }
        return count;
 }
index f0e5a32..49dec83 100644 (file)
@@ -30,7 +30,7 @@
 #include <linux/kallsyms.h>
 #include <linux/reboot.h>
 #include <linux/kprobes.h>
-
+#include <linux/bug.h>
 #include <asm/system.h>
 #include <asm/uaccess.h>
 #include <asm/io.h>
@@ -188,18 +188,31 @@ void dump_stack(void)
 
 EXPORT_SYMBOL(dump_stack);
 
+static inline int mask_bits(struct pt_regs *regs, unsigned long bits)
+{
+       return (regs->psw.mask & bits) / ((~bits + 1) & bits);
+}
+
 void show_registers(struct pt_regs *regs)
 {
-       mm_segment_t old_fs;
        char *mode;
-       int i;
 
        mode = (regs->psw.mask & PSW_MASK_PSTATE) ? "User" : "Krnl";
        printk("%s PSW : %p %p",
               mode, (void *) regs->psw.mask,
               (void *) regs->psw.addr);
        print_symbol(" (%s)\n", regs->psw.addr & PSW_ADDR_INSN);
-       printk("%s GPRS: " FOURLONG, mode,
+       printk("           R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x "
+              "P:%x AS:%x CC:%x PM:%x", mask_bits(regs, PSW_MASK_PER),
+              mask_bits(regs, PSW_MASK_DAT), mask_bits(regs, PSW_MASK_IO),
+              mask_bits(regs, PSW_MASK_EXT), mask_bits(regs, PSW_MASK_KEY),
+              mask_bits(regs, PSW_MASK_MCHECK), mask_bits(regs, PSW_MASK_WAIT),
+              mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC),
+              mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM));
+#ifdef CONFIG_64BIT
+       printk(" EA:%x", mask_bits(regs, PSW_BASE_BITS));
+#endif
+       printk("\n%s GPRS: " FOURLONG, mode,
               regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]);
        printk("           " FOURLONG,
               regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]);
@@ -208,41 +221,7 @@ void show_registers(struct pt_regs *regs)
        printk("           " FOURLONG,
               regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]);
 
-#if 0
-       /* FIXME: this isn't needed any more but it changes the ksymoops
-        * input. To remove or not to remove ... */
-       save_access_regs(regs->acrs);
-       printk("%s ACRS: %08x %08x %08x %08x\n", mode,
-              regs->acrs[0], regs->acrs[1], regs->acrs[2], regs->acrs[3]);
-       printk("           %08x %08x %08x %08x\n",
-              regs->acrs[4], regs->acrs[5], regs->acrs[6], regs->acrs[7]);
-       printk("           %08x %08x %08x %08x\n",
-              regs->acrs[8], regs->acrs[9], regs->acrs[10], regs->acrs[11]);
-       printk("           %08x %08x %08x %08x\n",
-              regs->acrs[12], regs->acrs[13], regs->acrs[14], regs->acrs[15]);
-#endif
-
-       /*
-        * Print the first 20 byte of the instruction stream at the
-        * time of the fault.
-        */
-       old_fs = get_fs();
-       if (regs->psw.mask & PSW_MASK_PSTATE)
-               set_fs(USER_DS);
-       else
-               set_fs(KERNEL_DS);
-       printk("%s Code: ", mode);
-       for (i = 0; i < 20; i++) {
-               unsigned char c;
-               if (__get_user(c, (char __user *)(regs->psw.addr + i))) {
-                       printk(" Bad PSW.");
-                       break;
-               }
-               printk("%02x ", c);
-       }
-       set_fs(old_fs);
-
-       printk("\n");
+       show_code(regs);
 }      
 
 /* This is called from fs/proc/array.c */
@@ -318,6 +297,11 @@ report_user_fault(long interruption_code, struct pt_regs *regs)
 #endif
 }
 
+int is_valid_bugaddr(unsigned long addr)
+{
+       return 1;
+}
+
 static void __kprobes inline do_trap(long interruption_code, int signr,
                                        char *str, struct pt_regs *regs,
                                        siginfo_t *info)
@@ -344,8 +328,14 @@ static void __kprobes inline do_trap(long interruption_code, int signr,
                 fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
                 if (fixup)
                         regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE;
-                else
-                        die(str, regs, interruption_code);
+               else {
+                       enum bug_trap_type btt;
+
+                       btt = report_bug(regs->psw.addr & PSW_ADDR_INSN);
+                       if (btt == BUG_TRAP_TYPE_WARN)
+                               return;
+                       die(str, regs, interruption_code);
+               }
         }
 }
 
index c30716a..418f642 100644 (file)
@@ -45,6 +45,8 @@ SECTIONS
   __ex_table : { *(__ex_table) }
   __stop___ex_table = .;
 
+  BUG_TABLE
+
   .data : {                    /* Data */
        *(.data)
        CONSTRUCTORS
@@ -77,6 +79,12 @@ SECTIONS
        *(.init.text)
        _einittext = .;
   }
+  /*
+   * .exit.text is discarded at runtime, not link time,
+   * to deal with references from __bug_table
+   */
+  .exit.text :  { *(.exit.text) }
+
   .init.data : { *(.init.data) }
   . = ALIGN(256);
   __setup_start = .;
@@ -116,7 +124,7 @@ SECTIONS
 
   /* Sections to be discarded */
   /DISCARD/ : {
-       *(.exit.text) *(.exit.data) *(.exitcall.exit)
+       *(.exit.data) *(.exitcall.exit)
        }
 
   /* Stabs debugging sections.  */
index 9d5b028..1e1a6ee 100644 (file)
@@ -128,7 +128,7 @@ static inline void set_vtimer(__u64 expires)
        S390_lowcore.last_update_timer = expires;
 
        /* store expire time for this CPU timer */
-       per_cpu(virt_cpu_timer, smp_processor_id()).to_expire = expires;
+       __get_cpu_var(virt_cpu_timer).to_expire = expires;
 }
 #else
 static inline void set_vtimer(__u64 expires)
@@ -137,7 +137,7 @@ static inline void set_vtimer(__u64 expires)
        asm volatile ("SPT %0" : : "m" (S390_lowcore.last_update_timer));
 
        /* store expire time for this CPU timer */
-       per_cpu(virt_cpu_timer, smp_processor_id()).to_expire = expires;
+       __get_cpu_var(virt_cpu_timer).to_expire = expires;
 }
 #endif
 
@@ -145,7 +145,7 @@ static void start_cpu_timer(void)
 {
        struct vtimer_queue *vt_list;
 
-       vt_list = &per_cpu(virt_cpu_timer, smp_processor_id());
+       vt_list = &__get_cpu_var(virt_cpu_timer);
 
        /* CPU timer interrupt is pending, don't reprogramm it */
        if (vt_list->idle & 1LL<<63)
@@ -159,7 +159,7 @@ static void stop_cpu_timer(void)
 {
        struct vtimer_queue *vt_list;
 
-       vt_list = &per_cpu(virt_cpu_timer, smp_processor_id());
+       vt_list = &__get_cpu_var(virt_cpu_timer);
 
        /* nothing to do */
        if (list_empty(&vt_list->list)) {
@@ -219,7 +219,7 @@ static void do_callbacks(struct list_head *cb_list)
        if (list_empty(cb_list))
                return;
 
-       vt_list = &per_cpu(virt_cpu_timer, smp_processor_id());
+       vt_list = &__get_cpu_var(virt_cpu_timer);
 
        list_for_each_entry_safe(event, tmp, cb_list, entry) {
                fn = event->function;
@@ -244,7 +244,6 @@ static void do_callbacks(struct list_head *cb_list)
  */
 static void do_cpu_timer_interrupt(__u16 error_code)
 {
-       int cpu;
        __u64 next, delta;
        struct vtimer_queue *vt_list;
        struct vtimer_list *event, *tmp;
@@ -253,8 +252,7 @@ static void do_cpu_timer_interrupt(__u16 error_code)
        struct list_head cb_list;
 
        INIT_LIST_HEAD(&cb_list);
-       cpu = smp_processor_id();
-       vt_list = &per_cpu(virt_cpu_timer, cpu);
+       vt_list = &__get_cpu_var(virt_cpu_timer);
 
        /* walk timer list, fire all expired events */
        spin_lock(&vt_list->lock);
@@ -534,7 +532,7 @@ void init_cpu_vtimer(void)
        /* enable cpu timer interrupts */
        __ctl_set_bit(0,10);
 
-       vt_list = &per_cpu(virt_cpu_timer, smp_processor_id());
+       vt_list = &__get_cpu_var(virt_cpu_timer);
        INIT_LIST_HEAD(&vt_list->list);
        spin_lock_init(&vt_list->lock);
        vt_list->to_expire = 0;
index 7a44fed..59aea65 100644 (file)
@@ -5,6 +5,6 @@
 EXTRA_AFLAGS := -traditional
 
 lib-y += delay.o string.o uaccess_std.o uaccess_pt.o qrnnd.o
-lib-$(CONFIG_32BIT) += div64.o
+obj-$(CONFIG_32BIT) += div64.o
 lib-$(CONFIG_64BIT) += uaccess_mvcos.o
 lib-$(CONFIG_SMP) += spinlock.o
index 0481f34..a5f8300 100644 (file)
@@ -147,5 +147,3 @@ uint32_t __div64_32(uint64_t *n, uint32_t base)
 }
 
 #endif /* MARCH_G5 */
-
-EXPORT_SYMBOL(__div64_32);
index 7462aeb..2b76a87 100644 (file)
@@ -26,9 +26,9 @@
 #include <linux/module.h>
 #include <linux/hardirq.h>
 #include <linux/kprobes.h>
+#include <linux/uaccess.h>
 
 #include <asm/system.h>
-#include <asm/uaccess.h>
 #include <asm/pgtable.h>
 #include <asm/kdebug.h>
 #include <asm/s390_ext.h>
@@ -63,21 +63,25 @@ int unregister_page_fault_notifier(struct notifier_block *nb)
        return atomic_notifier_chain_unregister(&notify_page_fault_chain, nb);
 }
 
-static inline int notify_page_fault(enum die_val val, const char *str,
-                       struct pt_regs *regs, long err, int trap, int sig)
+static int __kprobes __notify_page_fault(struct pt_regs *regs, long err)
 {
-       struct die_args args = {
-               .regs = regs,
-               .str = str,
-               .err = err,
-               .trapnr = trap,
-               .signr = sig
-       };
-       return atomic_notifier_call_chain(&notify_page_fault_chain, val, &args);
+       struct die_args args = { .str = "page fault",
+                                .trapnr = 14,
+                                .signr = SIGSEGV };
+       args.regs = regs;
+       args.err = err;
+       return atomic_notifier_call_chain(&notify_page_fault_chain,
+                                         DIE_PAGE_FAULT, &args);
+}
+
+static inline int notify_page_fault(struct pt_regs *regs, long err)
+{
+       if (unlikely(kprobe_running()))
+               return __notify_page_fault(regs, err);
+       return NOTIFY_DONE;
 }
 #else
-static inline int notify_page_fault(enum die_val val, const char *str,
-                       struct pt_regs *regs, long err, int trap, int sig)
+static inline int notify_page_fault(struct pt_regs *regs, long err)
 {
        return NOTIFY_DONE;
 }
@@ -170,74 +174,127 @@ static void do_sigsegv(struct pt_regs *regs, unsigned long error_code,
        force_sig_info(SIGSEGV, &si, current);
 }
 
+static void do_no_context(struct pt_regs *regs, unsigned long error_code,
+                         unsigned long address)
+{
+       const struct exception_table_entry *fixup;
+
+       /* Are we prepared to handle this kernel fault?  */
+       fixup = search_exception_tables(regs->psw.addr & __FIXUP_MASK);
+       if (fixup) {
+               regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE;
+               return;
+       }
+
+       /*
+        * Oops. The kernel tried to access some bad page. We'll have to
+        * terminate things with extreme prejudice.
+        */
+       if (check_space(current) == 0)
+               printk(KERN_ALERT "Unable to handle kernel pointer dereference"
+                      " at virtual kernel address %p\n", (void *)address);
+       else
+               printk(KERN_ALERT "Unable to handle kernel paging request"
+                      " at virtual user address %p\n", (void *)address);
+
+       die("Oops", regs, error_code);
+       do_exit(SIGKILL);
+}
+
+static void do_low_address(struct pt_regs *regs, unsigned long error_code)
+{
+       /* Low-address protection hit in kernel mode means
+          NULL pointer write access in kernel mode.  */
+       if (regs->psw.mask & PSW_MASK_PSTATE) {
+               /* Low-address protection hit in user mode 'cannot happen'. */
+               die ("Low-address protection", regs, error_code);
+               do_exit(SIGKILL);
+       }
+
+       do_no_context(regs, error_code, 0);
+}
+
+/*
+ * We ran out of memory, or some other thing happened to us that made
+ * us unable to handle the page fault gracefully.
+ */
+static int do_out_of_memory(struct pt_regs *regs, unsigned long error_code,
+                           unsigned long address)
+{
+       struct task_struct *tsk = current;
+       struct mm_struct *mm = tsk->mm;
+
+       up_read(&mm->mmap_sem);
+       if (is_init(tsk)) {
+               yield();
+               down_read(&mm->mmap_sem);
+               return 1;
+       }
+       printk("VM: killing process %s\n", tsk->comm);
+       if (regs->psw.mask & PSW_MASK_PSTATE)
+               do_exit(SIGKILL);
+       do_no_context(regs, error_code, address);
+       return 0;
+}
+
+static void do_sigbus(struct pt_regs *regs, unsigned long error_code,
+                     unsigned long address)
+{
+       struct task_struct *tsk = current;
+       struct mm_struct *mm = tsk->mm;
+
+       up_read(&mm->mmap_sem);
+       /*
+        * Send a sigbus, regardless of whether we were in kernel
+        * or user mode.
+        */
+       tsk->thread.prot_addr = address;
+       tsk->thread.trap_no = error_code;
+       force_sig(SIGBUS, tsk);
+
+       /* Kernel mode? Handle exceptions or die */
+       if (!(regs->psw.mask & PSW_MASK_PSTATE))
+               do_no_context(regs, error_code, address);
+}
+
 #ifdef CONFIG_S390_EXEC_PROTECT
 extern long sys_sigreturn(struct pt_regs *regs);
 extern long sys_rt_sigreturn(struct pt_regs *regs);
 extern long sys32_sigreturn(struct pt_regs *regs);
 extern long sys32_rt_sigreturn(struct pt_regs *regs);
 
-static inline void do_sigreturn(struct mm_struct *mm, struct pt_regs *regs,
-                               int rt)
+static int signal_return(struct mm_struct *mm, struct pt_regs *regs,
+                        unsigned long address, unsigned long error_code)
 {
+       u16 instruction;
+       int rc, compat;
+
+       pagefault_disable();
+       rc = __get_user(instruction, (u16 __user *) regs->psw.addr);
+       pagefault_enable();
+       if (rc)
+               return -EFAULT;
+
        up_read(&mm->mmap_sem);
        clear_tsk_thread_flag(current, TIF_SINGLE_STEP);
 #ifdef CONFIG_COMPAT
-       if (test_tsk_thread_flag(current, TIF_31BIT)) {
-               if (rt)
-                       sys32_rt_sigreturn(regs);
-               else
-                       sys32_sigreturn(regs);
-               return;
-       }
-#endif /* CONFIG_COMPAT */
-       if (rt)
-               sys_rt_sigreturn(regs);
+       compat = test_tsk_thread_flag(current, TIF_31BIT);
+       if (compat && instruction == 0x0a77)
+               sys32_sigreturn(regs);
+       else if (compat && instruction == 0x0aad)
+               sys32_rt_sigreturn(regs);
        else
+#endif
+       if (instruction == 0x0a77)
                sys_sigreturn(regs);
-       return;
-}
-
-static int signal_return(struct mm_struct *mm, struct pt_regs *regs,
-                        unsigned long address, unsigned long error_code)
-{
-       pgd_t *pgd;
-       pmd_t *pmd;
-       pte_t *pte;
-       u16 *instruction;
-       unsigned long pfn, uaddr = regs->psw.addr;
-
-       spin_lock(&mm->page_table_lock);
-       pgd = pgd_offset(mm, uaddr);
-       if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
-               goto out_fault;
-       pmd = pmd_offset(pgd, uaddr);
-       if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
-               goto out_fault;
-       pte = pte_offset_map(pmd_offset(pgd_offset(mm, uaddr), uaddr), uaddr);
-       if (!pte || !pte_present(*pte))
-               goto out_fault;
-       pfn = pte_pfn(*pte);
-       if (!pfn_valid(pfn))
-               goto out_fault;
-       spin_unlock(&mm->page_table_lock);
-
-       instruction = (u16 *) ((pfn << PAGE_SHIFT) + (uaddr & (PAGE_SIZE-1)));
-       if (*instruction == 0x0a77)
-               do_sigreturn(mm, regs, 0);
-       else if (*instruction == 0x0aad)
-               do_sigreturn(mm, regs, 1);
+       else if (instruction == 0x0aad)
+               sys_rt_sigreturn(regs);
        else {
-               printk("- XXX - do_exception: task = %s, primary, NO EXEC "
-                      "-> SIGSEGV\n", current->comm);
-               up_read(&mm->mmap_sem);
                current->thread.prot_addr = address;
                current->thread.trap_no = error_code;
                do_sigsegv(regs, error_code, SEGV_MAPERR, address);
        }
        return 0;
-out_fault:
-       spin_unlock(&mm->page_table_lock);
-       return -EFAULT;
 }
 #endif /* CONFIG_S390_EXEC_PROTECT */
 
@@ -253,49 +310,23 @@ out_fault:
  *   3b       Region third trans.  ->  Not present       (nullification)
  */
 static inline void
-do_exception(struct pt_regs *regs, unsigned long error_code, int is_protection)
+do_exception(struct pt_regs *regs, unsigned long error_code, int write)
 {
-        struct task_struct *tsk;
-        struct mm_struct *mm;
-        struct vm_area_struct * vma;
-        unsigned long address;
-       const struct exception_table_entry *fixup;
-       int si_code;
+       struct task_struct *tsk;
+       struct mm_struct *mm;
+       struct vm_area_struct *vma;
+       unsigned long address;
        int space;
+       int si_code;
 
-        tsk = current;
-        mm = tsk->mm;
-       
-       if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
-                                       SIGSEGV) == NOTIFY_STOP)
+       if (notify_page_fault(regs, error_code) == NOTIFY_STOP)
                return;
 
-       /* 
-         * Check for low-address protection.  This needs to be treated
-        * as a special case because the translation exception code 
-        * field is not guaranteed to contain valid data in this case.
-        */
-       if (is_protection && !(S390_lowcore.trans_exc_code & 4)) {
-
-               /* Low-address protection hit in kernel mode means 
-                  NULL pointer write access in kernel mode.  */
-               if (!(regs->psw.mask & PSW_MASK_PSTATE)) {
-                       address = 0;
-                       space = 0;
-                       goto no_context;
-               }
-
-               /* Low-address protection hit in user mode 'cannot happen'.  */
-               die ("Low-address protection", regs, error_code);
-               do_exit(SIGKILL);
-       }
+       tsk = current;
+       mm = tsk->mm;
 
-        /* 
-         * get the failing address 
-         * more specific the segment and page table portion of 
-         * the address 
-         */
-        address = S390_lowcore.trans_exc_code & __FAIL_ADDR_MASK;
+       /* get the failing address and the affected space */
+       address = S390_lowcore.trans_exc_code & __FAIL_ADDR_MASK;
        space = check_space(tsk);
 
        /*
@@ -313,7 +344,7 @@ do_exception(struct pt_regs *regs, unsigned long error_code, int is_protection)
         */
        local_irq_enable();
 
-        down_read(&mm->mmap_sem);
+       down_read(&mm->mmap_sem);
 
        si_code = SEGV_MAPERR;
        vma = find_vma(mm, address);
@@ -330,19 +361,19 @@ do_exception(struct pt_regs *regs, unsigned long error_code, int is_protection)
                        return;
 #endif
 
-        if (vma->vm_start <= address) 
-                goto good_area;
-        if (!(vma->vm_flags & VM_GROWSDOWN))
-                goto bad_area;
-        if (expand_stack(vma, address))
-                goto bad_area;
+       if (vma->vm_start <= address)
+               goto good_area;
+       if (!(vma->vm_flags & VM_GROWSDOWN))
+               goto bad_area;
+       if (expand_stack(vma, address))
+               goto bad_area;
 /*
  * Ok, we have a good vm_area for this memory access, so
  * we can handle it..
  */
 good_area:
        si_code = SEGV_ACCERR;
-       if (!is_protection) {
+       if (!write) {
                /* page not present, check vm flags */
                if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
                        goto bad_area;
@@ -357,7 +388,7 @@ survive:
         * make sure we exit gracefully rather than endlessly redo
         * the fault.
         */
-       switch (handle_mm_fault(mm, vma, address, is_protection)) {
+       switch (handle_mm_fault(mm, vma, address, write)) {
        case VM_FAULT_MINOR:
                tsk->min_flt++;
                break;
@@ -365,9 +396,12 @@ survive:
                tsk->maj_flt++;
                break;
        case VM_FAULT_SIGBUS:
-               goto do_sigbus;
+               do_sigbus(regs, error_code, address);
+               return;
        case VM_FAULT_OOM:
-               goto out_of_memory;
+               if (do_out_of_memory(regs, error_code, address))
+                       goto survive;
+               return;
        default:
                BUG();
        }
@@ -385,75 +419,34 @@ survive:
  * Fix it, but check if it's kernel or user first..
  */
 bad_area:
-        up_read(&mm->mmap_sem);
+       up_read(&mm->mmap_sem);
 
-        /* User mode accesses just cause a SIGSEGV */
-        if (regs->psw.mask & PSW_MASK_PSTATE) {
-                tsk->thread.prot_addr = address;
-                tsk->thread.trap_no = error_code;
+       /* User mode accesses just cause a SIGSEGV */
+       if (regs->psw.mask & PSW_MASK_PSTATE) {
+               tsk->thread.prot_addr = address;
+               tsk->thread.trap_no = error_code;
                do_sigsegv(regs, error_code, si_code, address);
-                return;
+               return;
        }
 
 no_context:
-        /* Are we prepared to handle this kernel fault?  */
-       fixup = search_exception_tables(regs->psw.addr & __FIXUP_MASK);
-       if (fixup) {
-               regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE;
-                return;
-        }
-
-/*
- * Oops. The kernel tried to access some bad page. We'll have to
- * terminate things with extreme prejudice.
- */
-       if (space == 0)
-                printk(KERN_ALERT "Unable to handle kernel pointer dereference"
-                      " at virtual kernel address %p\n", (void *)address);
-        else
-                printk(KERN_ALERT "Unable to handle kernel paging request"
-                      " at virtual user address %p\n", (void *)address);
-
-        die("Oops", regs, error_code);
-        do_exit(SIGKILL);
-
-
-/*
- * We ran out of memory, or some other thing happened to us that made
- * us unable to handle the page fault gracefully.
-*/
-out_of_memory:
-       up_read(&mm->mmap_sem);
-       if (is_init(tsk)) {
-               yield();
-               down_read(&mm->mmap_sem);
-               goto survive;
-       }
-       printk("VM: killing process %s\n", tsk->comm);
-       if (regs->psw.mask & PSW_MASK_PSTATE)
-               do_exit(SIGKILL);
-       goto no_context;
-
-do_sigbus:
-       up_read(&mm->mmap_sem);
-
-       /*
-        * Send a sigbus, regardless of whether we were in kernel
-        * or user mode.
-        */
-        tsk->thread.prot_addr = address;
-        tsk->thread.trap_no = error_code;
-       force_sig(SIGBUS, tsk);
-
-       /* Kernel mode? Handle exceptions or die */
-       if (!(regs->psw.mask & PSW_MASK_PSTATE))
-               goto no_context;
+       do_no_context(regs, error_code, address);
 }
 
 void __kprobes do_protection_exception(struct pt_regs *regs,
                                       unsigned long error_code)
 {
+       /* Protection exception is supressing, decrement psw address. */
        regs->psw.addr -= (error_code >> 16);
+       /*
+        * Check for low-address protection.  This needs to be treated
+        * as a special case because the translation exception code
+        * field is not guaranteed to contain valid data in this case.
+        */
+       if (unlikely(!(S390_lowcore.trans_exc_code & 4))) {
+               do_low_address(regs, error_code);
+               return;
+       }
        do_exception(regs, 4, 1);
 }
 
index b5681e3..0b9cca5 100644 (file)
@@ -3,7 +3,7 @@
 #
 
 lib-y  = delay.o memset.o memmove.o memchr.o \
-        checksum.o strcasecmp.o strlen.o div64.o udivdi3.o \
+        checksum.o strlen.o div64.o udivdi3.o \
         div64-generic.o
 
 memcpy-y                       := memcpy.o
diff --git a/arch/sh/lib/strcasecmp.c b/arch/sh/lib/strcasecmp.c
deleted file mode 100644 (file)
index 4e57a21..0000000
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- *  linux/arch/alpha/lib/strcasecmp.c
- */
-
-#include <linux/string.h>
-
-
-/* We handle nothing here except the C locale.  Since this is used in
-   only one place, on strings known to contain only 7 bit ASCII, this
-   is ok.  */
-
-int strcasecmp(const char *a, const char *b)
-{
-       int ca, cb;
-
-       do {
-               ca = *a++ & 0xff;
-               cb = *b++ & 0xff;
-               if (ca >= 'A' && ca <= 'Z')
-                       ca += 'a' - 'A';
-               if (cb >= 'A' && cb <= 'Z')
-                       cb += 'a' - 'A';
-       } while (ca == cb && ca != '\0');
-
-       return ca - cb;
-}
index ba58c3a..7bb86b9 100644 (file)
@@ -25,7 +25,7 @@
 struct linux_ebus *ebus_chain = NULL;
 
 /* We are together with pcic.c under CONFIG_PCI. */
-extern unsigned int pcic_pin_to_irq(unsigned int, char *name);
+extern unsigned int pcic_pin_to_irq(unsigned int, const char *name);
 
 /*
  * IRQ Blacklist
@@ -69,7 +69,7 @@ static inline unsigned long ebus_alloc(size_t size)
 
 /*
  */
-int __init ebus_blacklist_irq(char *name)
+int __init ebus_blacklist_irq(const char *name)
 {
        struct ebus_device_irq *dp;
 
@@ -86,8 +86,8 @@ int __init ebus_blacklist_irq(char *name)
 void __init fill_ebus_child(struct device_node *dp,
                            struct linux_ebus_child *dev)
 {
-       int *regs;
-       int *irqs;
+       const int *regs;
+       const int *irqs;
        int i, len;
 
        dev->prom_node = dp;
@@ -146,9 +146,9 @@ void __init fill_ebus_child(struct device_node *dp,
 
 void __init fill_ebus_device(struct device_node *dp, struct linux_ebus_device *dev)
 {
-       struct linux_prom_registers *regs;
+       const struct linux_prom_registers *regs;
        struct linux_ebus_child *child;
-       int *irqs;
+       const int *irqs;
        int i, n, len;
        unsigned long baseaddr;
 
@@ -269,7 +269,7 @@ void __init fill_ebus_device(struct device_node *dp, struct linux_ebus_device *d
 
 void __init ebus_init(void)
 {
-       struct linux_prom_pci_registers *regs;
+       const struct linux_prom_pci_registers *regs;
        struct linux_pbm_info *pbm;
        struct linux_ebus_device *dev;
        struct linux_ebus *ebus;
index 48c24f7..fd7f8cb 100644 (file)
@@ -210,7 +210,7 @@ struct of_bus {
                                       int *addrc, int *sizec);
        int             (*map)(u32 *addr, const u32 *range,
                               int na, int ns, int pna);
-       unsigned int    (*get_flags)(u32 *addr);
+       unsigned int    (*get_flags)(const u32 *addr);
 };
 
 /*
@@ -270,7 +270,7 @@ static int of_bus_default_map(u32 *addr, const u32 *range,
        return 0;
 }
 
-static unsigned int of_bus_default_get_flags(u32 *addr)
+static unsigned int of_bus_default_get_flags(const u32 *addr)
 {
        return IORESOURCE_MEM;
 }
@@ -334,7 +334,7 @@ static int of_bus_pci_map(u32 *addr, const u32 *range,
        return 0;
 }
 
-static unsigned int of_bus_pci_get_flags(u32 *addr)
+static unsigned int of_bus_pci_get_flags(const u32 *addr)
 {
        unsigned int flags = 0;
        u32 w = addr[0];
@@ -375,7 +375,7 @@ static int of_bus_sbus_map(u32 *addr, const u32 *range, int na, int ns, int pna)
        return of_bus_default_map(addr, range, na, ns, pna);
 }
 
-static unsigned int of_bus_sbus_get_flags(u32 *addr)
+static unsigned int of_bus_sbus_get_flags(const u32 *addr)
 {
        return IORESOURCE_MEM;
 }
@@ -432,7 +432,7 @@ static int __init build_one_resource(struct device_node *parent,
                                     u32 *addr,
                                     int na, int ns, int pna)
 {
-       u32 *ranges;
+       const u32 *ranges;
        unsigned int rlen;
        int rone;
 
@@ -470,7 +470,7 @@ static void __init build_device_resources(struct of_device *op,
        struct of_bus *bus;
        int na, ns;
        int index, num_reg;
-       void *preg;
+       const void *preg;
 
        if (!parent)
                return;
@@ -492,7 +492,7 @@ static void __init build_device_resources(struct of_device *op,
        for (index = 0; index < num_reg; index++) {
                struct resource *r = &op->resource[index];
                u32 addr[OF_MAX_ADDR_CELLS];
-               u32 *reg = (preg + (index * ((na + ns) * 4)));
+               const u32 *reg = (preg + (index * ((na + ns) * 4)));
                struct device_node *dp = op->node;
                struct device_node *pp = p_op->node;
                struct of_bus *pbus, *dbus;
@@ -559,7 +559,7 @@ static struct of_device * __init scan_one_device(struct device_node *dp,
                                                 struct device *parent)
 {
        struct of_device *op = kzalloc(sizeof(*op), GFP_KERNEL);
-       struct linux_prom_irqs *intr;
+       const struct linux_prom_irqs *intr;
        int len, i;
 
        if (!op)
@@ -579,7 +579,8 @@ static struct of_device * __init scan_one_device(struct device_node *dp,
                for (i = 0; i < op->num_irqs; i++)
                        op->irqs[i] = intr[i].pri;
        } else {
-               unsigned int *irq = of_get_property(dp, "interrupts", &len);
+               const unsigned int *irq =
+                       of_get_property(dp, "interrupts", &len);
 
                if (irq) {
                        op->num_irqs = len / sizeof(unsigned int);
@@ -594,7 +595,7 @@ static struct of_device * __init scan_one_device(struct device_node *dp,
                        0, 0, 1, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7, 0, 0,
                };
                struct device_node *io_unit, *sbi = dp->parent;
-               struct linux_prom_registers *regs;
+               const struct linux_prom_registers *regs;
                int board, slot;
 
                while (sbi) {
index 1c927c5..5ca7e8f 100644 (file)
@@ -37,8 +37,6 @@
 #include <asm/irq_regs.h>
 
 
-unsigned int pcic_pin_to_irq(unsigned int pin, char *name);
-
 /*
  * I studied different documents and many live PROMs both from 2.30
  * family and 3.xx versions. I came to the amazing conclusion: there is
@@ -681,7 +679,7 @@ void __devinit pcibios_fixup_bus(struct pci_bus *bus)
  * pcic_pin_to_irq() is exported to ebus.c.
  */
 unsigned int
-pcic_pin_to_irq(unsigned int pin, char *name)
+pcic_pin_to_irq(unsigned int pin, const char *name)
 {
        struct linux_pcic *pcic = &pcic0;
        unsigned int irq;
index 2cc302b..eed140b 100644 (file)
@@ -32,12 +32,13 @@ static struct device_node *allnodes;
  */
 static DEFINE_RWLOCK(devtree_lock);
 
-int of_device_is_compatible(struct device_node *device, const char *compat)
+int of_device_is_compatible(const struct device_node *device,
+                           const char *compat)
 {
        const char* cp;
        int cplen, l;
 
-       cp = (char *) of_get_property(device, "compatible", &cplen);
+       cp = of_get_property(device, "compatible", &cplen);
        if (cp == NULL)
                return 0;
        while (cplen > 0) {
@@ -150,13 +151,14 @@ struct device_node *of_find_compatible_node(struct device_node *from,
 }
 EXPORT_SYMBOL(of_find_compatible_node);
 
-struct property *of_find_property(struct device_node *np, const char *name,
+struct property *of_find_property(const struct device_node *np,
+                                 const char *name,
                                  int *lenp)
 {
        struct property *pp;
 
        for (pp = np->properties; pp != 0; pp = pp->next) {
-               if (strcmp(pp->name, name) == 0) {
+               if (strcasecmp(pp->name, name) == 0) {
                        if (lenp != 0)
                                *lenp = pp->length;
                        break;
@@ -170,7 +172,8 @@ EXPORT_SYMBOL(of_find_property);
  * Find a property with a given name for a given node
  * and return the value.
  */
-void *of_get_property(struct device_node *np, const char *name, int *lenp)
+const void *of_get_property(const struct device_node *np, const char *name,
+                           int *lenp)
 {
        struct property *pp = of_find_property(np,name,lenp);
        return pp ? pp->value : NULL;
@@ -192,7 +195,7 @@ EXPORT_SYMBOL(of_getintprop_default);
 
 int of_n_addr_cells(struct device_node *np)
 {
-       int* ip;
+       const int* ip;
        do {
                if (np->parent)
                        np = np->parent;
@@ -207,7 +210,7 @@ EXPORT_SYMBOL(of_n_addr_cells);
 
 int of_n_size_cells(struct device_node *np)
 {
-       int* ip;
+       const int* ip;
        do {
                if (np->parent)
                        np = np->parent;
@@ -239,7 +242,7 @@ int of_set_property(struct device_node *dp, const char *name, void *val, int len
        while (*prevp) {
                struct property *prop = *prevp;
 
-               if (!strcmp(prop->name, name)) {
+               if (!strcasecmp(prop->name, name)) {
                        void *old_val = prop->value;
                        int ret;
 
index 9bb1240..f1401b5 100644 (file)
@@ -301,7 +301,7 @@ static __inline__ void sun4_clock_probe(void)
 static int __devinit clock_probe(struct of_device *op, const struct of_device_id *match)
 {
        struct device_node *dp = op->node;
-       char *model = of_get_property(dp, "model", NULL);
+       const char *model = of_get_property(dp, "model", NULL);
 
        if (!model)
                return -ENODEV;
index 1a6348b..590a41b 100644 (file)
@@ -19,6 +19,14 @@ config SPARC64
          SPARC64 ports; its web page is available at
          <http://www.ultralinux.org/>.
 
+config GENERIC_TIME
+       bool
+       default y
+
+config GENERIC_CLOCKEVENTS
+       bool
+       default y
+
 config 64BIT
        def_bool y
 
@@ -34,10 +42,6 @@ config LOCKDEP_SUPPORT
        bool
        default y
 
-config TIME_INTERPOLATION
-       bool
-       default y
-
 config ARCH_MAY_HAVE_PC_FDC
        bool
        default y
@@ -113,6 +117,8 @@ config GENERIC_HARDIRQS
 
 menu "General machine setup"
 
+source "kernel/time/Kconfig"
+
 config SMP
        bool "Symmetric multi-processing support"
        ---help---
@@ -214,6 +220,7 @@ config ARCH_SPARSEMEM_ENABLE
 
 config ARCH_SPARSEMEM_DEFAULT
        def_bool y
+       select SPARSEMEM_STATIC
 
 config LARGE_ALLOCS
        def_bool y
index e724c54..c65b2f9 100644 (file)
@@ -32,7 +32,7 @@ static void central_probe_failure(int line)
 static void central_ranges_init(struct linux_central *central)
 {
        struct device_node *dp = central->prom_node;
-       void *pval;
+       const void *pval;
        int len;
        
        central->num_central_ranges = 0;
@@ -47,7 +47,7 @@ static void central_ranges_init(struct linux_central *central)
 static void fhc_ranges_init(struct linux_fhc *fhc)
 {
        struct device_node *dp = fhc->prom_node;
-       void *pval;
+       const void *pval;
        int len;
        
        fhc->num_fhc_ranges = 0;
@@ -119,7 +119,7 @@ static unsigned long prom_reg_to_paddr(struct linux_prom_registers *r)
 static void probe_other_fhcs(void)
 {
        struct device_node *dp;
-       struct linux_prom64_registers *fpregs;
+       const struct linux_prom64_registers *fpregs;
 
        for_each_node_by_name(dp, "fhc") {
                struct linux_fhc *fhc;
@@ -190,7 +190,8 @@ static void probe_clock_board(struct linux_central *central,
                              struct device_node *fp)
 {
        struct device_node *dp;
-       struct linux_prom_registers cregs[3], *pr;
+       struct linux_prom_registers cregs[3];
+       const struct linux_prom_registers *pr;
        int nslots, tmp, nregs;
 
        dp = fp->child;
@@ -299,7 +300,8 @@ static void init_all_fhc_hw(void)
 
 void central_probe(void)
 {
-       struct linux_prom_registers fpregs[6], *pr;
+       struct linux_prom_registers fpregs[6];
+       const struct linux_prom_registers *pr;
        struct linux_fhc *fhc;
        struct device_node *dp, *fp;
        int err;
index 9699abe..777d345 100644 (file)
@@ -343,8 +343,8 @@ static int init_one_mctrl(struct device_node *dp)
 {
        struct mctrl_info *mp = kzalloc(sizeof(*mp), GFP_KERNEL);
        int portid = of_getintprop_default(dp, "portid", -1);
-       struct linux_prom64_registers *regs;
-       void *pval;
+       const struct linux_prom64_registers *regs;
+       const void *pval;
        int len;
 
        if (!mp)
index 35bf895..0ace17b 100644 (file)
@@ -285,7 +285,7 @@ static void __init fill_ebus_child(struct device_node *dp,
                                   int non_standard_regs)
 {
        struct of_device *op;
-       int *regs;
+       const int *regs;
        int i, len;
 
        dev->prom_node = dp;
@@ -438,11 +438,9 @@ static struct pci_dev *find_next_ebus(struct pci_dev *start, int *is_rio_p)
 
 void __init ebus_init(void)
 {
-       struct pci_pbm_info *pbm;
        struct linux_ebus_device *dev;
        struct linux_ebus *ebus;
        struct pci_dev *pdev;
-       struct pcidev_cookie *cookie;
        struct device_node *dp;
        int is_rio;
        int num_ebus = 0;
@@ -453,8 +451,7 @@ void __init ebus_init(void)
                return;
        }
 
-       cookie = pdev->sysdata;
-       dp = cookie->prom_node;
+       dp = pci_device_to_OF_node(pdev);
 
        ebus_chain = ebus = ebus_alloc(sizeof(struct linux_ebus));
        ebus->next = NULL;
@@ -480,8 +477,7 @@ void __init ebus_init(void)
                                break;
                        }
                        ebus->is_rio = is_rio;
-                       cookie = pdev->sysdata;
-                       dp = cookie->prom_node;
+                       dp = pci_device_to_OF_node(pdev);
                        continue;
                }
                printk("ebus%d:", num_ebus);
@@ -489,7 +485,6 @@ void __init ebus_init(void)
                ebus->index = num_ebus;
                ebus->prom_node = dp;
                ebus->self = pdev;
-               ebus->parent = pbm = cookie->pbm;
 
                ebus->ofdev.node = dp;
                ebus->ofdev.dev.parent = &pdev->dev;
@@ -531,8 +526,7 @@ void __init ebus_init(void)
                if (!pdev)
                        break;
 
-               cookie = pdev->sysdata;
-               dp = cookie->prom_node;
+               dp = pci_device_to_OF_node(pdev);
 
                ebus->next = ebus_alloc(sizeof(struct linux_ebus));
                ebus = ebus->next;
index c443db1..6241e3d 100644 (file)
@@ -589,32 +589,6 @@ void ack_bad_irq(unsigned int virt_irq)
               ino, virt_irq);
 }
 
-#ifndef CONFIG_SMP
-extern irqreturn_t timer_interrupt(int, void *);
-
-void timer_irq(int irq, struct pt_regs *regs)
-{
-       unsigned long clr_mask = 1 << irq;
-       unsigned long tick_mask = tick_ops->softint_mask;
-       struct pt_regs *old_regs;
-
-       if (get_softint() & tick_mask) {
-               irq = 0;
-               clr_mask = tick_mask;
-       }
-       clear_softint(clr_mask);
-
-       old_regs = set_irq_regs(regs);
-       irq_enter();
-
-       kstat_this_cpu.irqs[0]++;
-       timer_interrupt(irq, NULL);
-
-       irq_exit();
-       set_irq_regs(old_regs);
-}
-#endif
-
 void handler_irq(int irq, struct pt_regs *regs)
 {
        struct ino_bucket *bucket;
@@ -653,7 +627,7 @@ static u64 prom_limit0, prom_limit1;
 static void map_prom_timers(void)
 {
        struct device_node *dp;
-       unsigned int *addr;
+       const unsigned int *addr;
 
        /* PROM timer node hangs out in the top level of device siblings... */
        dp = of_find_node_by_path("/");
index 98721a8..6a6882e 100644 (file)
@@ -24,27 +24,9 @@ static void __init report_dev(struct sparc_isa_device *isa_dev, int child)
 
 static void __init isa_dev_get_resource(struct sparc_isa_device *isa_dev)
 {
-       struct linux_prom_registers *pregs;
-       unsigned long base, len;
-       int prop_len;
-
-       pregs = of_get_property(isa_dev->prom_node, "reg", &prop_len);
-       if (!pregs)
-               return;
-
-       /* Only the first one is interesting. */
-       len = pregs[0].reg_size;
-       base = (((unsigned long)pregs[0].which_io << 32) |
-               (unsigned long)pregs[0].phys_addr);
-       base += isa_dev->bus->parent->io_space.start;
-
-       isa_dev->resource.start = base;
-       isa_dev->resource.end   = (base + len - 1UL);
-       isa_dev->resource.flags = IORESOURCE_IO;
-       isa_dev->resource.name  = isa_dev->prom_node->name;
+       struct of_device *op = of_find_device_by_node(isa_dev->prom_node);
 
-       request_resource(&isa_dev->bus->parent->io_space,
-                        &isa_dev->resource);
+       memcpy(&isa_dev->resource, &op->resource[0], sizeof(struct resource));
 }
 
 static void __init isa_dev_get_irq(struct sparc_isa_device *isa_dev)
@@ -158,19 +140,10 @@ void __init isa_init(void)
 
        pdev = NULL;
        while ((pdev = pci_get_device(vendor, device, pdev)) != NULL) {
-               struct pcidev_cookie *pdev_cookie;
-               struct pci_pbm_info *pbm;
                struct sparc_isa_bridge *isa_br;
                struct device_node *dp;
 
-               pdev_cookie = pdev->sysdata;
-               if (!pdev_cookie) {
-                       printk("ISA: Warning, ISA bridge ignored due to "
-                              "lack of OBP data.\n");
-                       continue;
-               }
-               pbm = pdev_cookie->pbm;
-               dp = pdev_cookie->prom_node;
+               dp = pci_device_to_OF_node(pdev);
 
                isa_br = kzalloc(sizeof(*isa_br), GFP_KERNEL);
                if (!isa_br) {
@@ -195,10 +168,9 @@ void __init isa_init(void)
                isa_br->next = isa_chain;
                isa_chain = isa_br;
 
-               isa_br->parent = pbm;
                isa_br->self = pdev;
                isa_br->index = index++;
-               isa_br->prom_node = pdev_cookie->prom_node;
+               isa_br->prom_node = dp;
 
                printk("isa%d:", isa_br->index);
 
index fb9bf1e..9ac9a30 100644 (file)
@@ -245,7 +245,7 @@ struct of_bus {
                                       int *addrc, int *sizec);
        int             (*map)(u32 *addr, const u32 *range,
                               int na, int ns, int pna);
-       unsigned int    (*get_flags)(u32 *addr);
+       unsigned int    (*get_flags)(const u32 *addr);
 };
 
 /*
@@ -305,7 +305,7 @@ static int of_bus_default_map(u32 *addr, const u32 *range,
        return 0;
 }
 
-static unsigned int of_bus_default_get_flags(u32 *addr)
+static unsigned int of_bus_default_get_flags(const u32 *addr)
 {
        return IORESOURCE_MEM;
 }
@@ -317,6 +317,11 @@ static unsigned int of_bus_default_get_flags(u32 *addr)
 static int of_bus_pci_match(struct device_node *np)
 {
        if (!strcmp(np->type, "pci") || !strcmp(np->type, "pciex")) {
+               const char *model = of_get_property(np, "model", NULL);
+
+               if (model && !strcmp(model, "SUNW,simba"))
+                       return 0;
+
                /* Do not do PCI specific frobbing if the
                 * PCI bridge lacks a ranges property.  We
                 * want to pass it through up to the next
@@ -332,6 +337,21 @@ static int of_bus_pci_match(struct device_node *np)
        return 0;
 }
 
+static int of_bus_simba_match(struct device_node *np)
+{
+       const char *model = of_get_property(np, "model", NULL);
+
+       if (model && !strcmp(model, "SUNW,simba"))
+               return 1;
+       return 0;
+}
+
+static int of_bus_simba_map(u32 *addr, const u32 *range,
+                           int na, int ns, int pna)
+{
+       return 0;
+}
+
 static void of_bus_pci_count_cells(struct device_node *np,
                                   int *addrc, int *sizec)
 {
@@ -369,7 +389,7 @@ static int of_bus_pci_map(u32 *addr, const u32 *range,
        return 0;
 }
 
-static unsigned int of_bus_pci_get_flags(u32 *addr)
+static unsigned int of_bus_pci_get_flags(const u32 *addr)
 {
        unsigned int flags = 0;
        u32 w = addr[0];
@@ -436,6 +456,15 @@ static struct of_bus of_busses[] = {
                .map = of_bus_pci_map,
                .get_flags = of_bus_pci_get_flags,
        },
+       /* SIMBA */
+       {
+               .name = "simba",
+               .addr_prop_name = "assigned-addresses",
+               .match = of_bus_simba_match,
+               .count_cells = of_bus_pci_count_cells,
+               .map = of_bus_simba_map,
+               .get_flags = of_bus_pci_get_flags,
+       },
        /* SBUS */
        {
                .name = "sbus",
@@ -482,7 +511,7 @@ static int __init build_one_resource(struct device_node *parent,
                                     u32 *addr,
                                     int na, int ns, int pna)
 {
-       u32 *ranges;
+       const u32 *ranges;
        unsigned int rlen;
        int rone;
 
@@ -513,7 +542,7 @@ static int __init build_one_resource(struct device_node *parent,
 
 static int __init use_1to1_mapping(struct device_node *pp)
 {
-       char *model;
+       const char *model;
 
        /* If this is on the PMU bus, don't try to translate it even
         * if a ranges property exists.
@@ -548,7 +577,7 @@ static void __init build_device_resources(struct of_device *op,
        struct of_bus *bus;
        int na, ns;
        int index, num_reg;
-       void *preg;
+       const void *preg;
 
        if (!parent)
                return;
@@ -578,7 +607,7 @@ static void __init build_device_resources(struct of_device *op,
        for (index = 0; index < num_reg; index++) {
                struct resource *r = &op->resource[index];
                u32 addr[OF_MAX_ADDR_CELLS];
-               u32 *reg = (preg + (index * ((na + ns) * 4)));
+               const u32 *reg = (preg + (index * ((na + ns) * 4)));
                struct device_node *dp = op->node;
                struct device_node *pp = p_op->node;
                struct of_bus *pbus, *dbus;
@@ -643,14 +672,14 @@ static void __init build_device_resources(struct of_device *op,
 
 static struct device_node * __init
 apply_interrupt_map(struct device_node *dp, struct device_node *pp,
-                   u32 *imap, int imlen, u32 *imask,
+                   const u32 *imap, int imlen, const u32 *imask,
                    unsigned int *irq_p)
 {
        struct device_node *cp;
        unsigned int irq = *irq_p;
        struct of_bus *bus;
        phandle handle;
-       u32 *reg;
+       const u32 *reg;
        int na, num_reg, i;
 
        bus = of_match_bus(pp);
@@ -705,7 +734,7 @@ static unsigned int __init pci_irq_swizzle(struct device_node *dp,
                                           struct device_node *pp,
                                           unsigned int irq)
 {
-       struct linux_prom_pci_registers *regs;
+       const struct linux_prom_pci_registers *regs;
        unsigned int bus, devfn, slot, ret;
 
        if (irq < 1 || irq > 4)
@@ -730,12 +759,6 @@ static unsigned int __init pci_irq_swizzle(struct device_node *dp,
                 * D: 2-bit slot number, derived from PCI device number as
                 *    (dev - 1) for bus A, or (dev - 2) for bus B
                 * L: 2-bit line number
-                *
-                * Actually, more "portable" way to calculate the funky
-                * slot number is to subtract pbm->pci_first_slot from the
-                * device number, and that's exactly what the pre-OF
-                * sparc64 code did, but we're building this stuff generically
-                * using the OBP tree, not in the PCI controller layer.
                 */
                if (bus & 0x80) {
                        /* PBM-A */
@@ -794,7 +817,7 @@ static unsigned int __init build_one_device_irq(struct of_device *op,
        pp = dp->parent;
        ip = NULL;
        while (pp) {
-               void *imap, *imsk;
+               const void *imap, *imsk;
                int imlen;
 
                imap = of_get_property(pp, "interrupt-map", &imlen);
@@ -859,7 +882,7 @@ static struct of_device * __init scan_one_device(struct device_node *dp,
                                                 struct device *parent)
 {
        struct of_device *op = kzalloc(sizeof(*op), GFP_KERNEL);
-       unsigned int *irq;
+       const unsigned int *irq;
        int len, i;
 
        if (!op)
index 1210988..023af41 100644 (file)
@@ -1,9 +1,11 @@
-/* $Id: pci.c,v 1.39 2002/01/05 01:13:43 davem Exp $
- * pci.c: UltraSparc PCI controller support.
+/* pci.c: UltraSparc PCI controller support.
  *
  * Copyright (C) 1997, 1998, 1999 David S. Miller (davem@redhat.com)
  * Copyright (C) 1998, 1999 Eddie C. Dost   (ecd@skynet.be)
  * Copyright (C) 1999 Jakub Jelinek   (jj@ultra.linux.cz)
+ *
+ * OF tree based PCI bus probing taken from the PowerPC port
+ * with minor modifications, see there for credits.
  */
 
 #include <linux/module.h>
@@ -24,6 +26,9 @@
 #include <asm/ebus.h>
 #include <asm/isa.h>
 #include <asm/prom.h>
+#include <asm/apb.h>
+
+#include "pci_impl.h"
 
 unsigned long pci_memspace_mask = 0xffffffffUL;
 
@@ -277,10 +282,10 @@ int __init pcic_present(void)
        return pci_controller_scan(pci_is_controller);
 }
 
-struct pci_iommu_ops *pci_iommu_ops;
+const struct pci_iommu_ops *pci_iommu_ops;
 EXPORT_SYMBOL(pci_iommu_ops);
 
-extern struct pci_iommu_ops pci_sun4u_iommu_ops,
+extern const struct pci_iommu_ops pci_sun4u_iommu_ops,
        pci_sun4v_iommu_ops;
 
 /* Find each controller in the system, attach and initialize
@@ -300,6 +305,467 @@ static void __init pci_controller_probe(void)
        pci_controller_scan(pci_controller_init);
 }
 
+static unsigned long pci_parse_of_flags(u32 addr0)
+{
+       unsigned long flags = 0;
+
+       if (addr0 & 0x02000000) {
+               flags = IORESOURCE_MEM | PCI_BASE_ADDRESS_SPACE_MEMORY;
+               flags |= (addr0 >> 22) & PCI_BASE_ADDRESS_MEM_TYPE_64;
+               flags |= (addr0 >> 28) & PCI_BASE_ADDRESS_MEM_TYPE_1M;
+               if (addr0 & 0x40000000)
+                       flags |= IORESOURCE_PREFETCH
+                                | PCI_BASE_ADDRESS_MEM_PREFETCH;
+       } else if (addr0 & 0x01000000)
+               flags = IORESOURCE_IO | PCI_BASE_ADDRESS_SPACE_IO;
+       return flags;
+}
+
+/* The of_device layer has translated all of the assigned-address properties
+ * into physical address resources, we only have to figure out the register
+ * mapping.
+ */
+static void pci_parse_of_addrs(struct of_device *op,
+                              struct device_node *node,
+                              struct pci_dev *dev)
+{
+       struct resource *op_res;
+       const u32 *addrs;
+       int proplen;
+
+       addrs = of_get_property(node, "assigned-addresses", &proplen);
+       if (!addrs)
+               return;
+       printk("    parse addresses (%d bytes) @ %p\n", proplen, addrs);
+       op_res = &op->resource[0];
+       for (; proplen >= 20; proplen -= 20, addrs += 5, op_res++) {
+               struct resource *res;
+               unsigned long flags;
+               int i;
+
+               flags = pci_parse_of_flags(addrs[0]);
+               if (!flags)
+                       continue;
+               i = addrs[0] & 0xff;
+               printk("  start: %lx, end: %lx, i: %x\n",
+                      op_res->start, op_res->end, i);
+
+               if (PCI_BASE_ADDRESS_0 <= i && i <= PCI_BASE_ADDRESS_5) {
+                       res = &dev->resource[(i - PCI_BASE_ADDRESS_0) >> 2];
+               } else if (i == dev->rom_base_reg) {
+                       res = &dev->resource[PCI_ROM_RESOURCE];
+                       flags |= IORESOURCE_READONLY | IORESOURCE_CACHEABLE;
+               } else {
+                       printk(KERN_ERR "PCI: bad cfg reg num 0x%x\n", i);
+                       continue;
+               }
+               res->start = op_res->start;
+               res->end = op_res->end;
+               res->flags = flags;
+               res->name = pci_name(dev);
+       }
+}
+
+struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
+                                 struct device_node *node,
+                                 struct pci_bus *bus, int devfn,
+                                 int host_controller)
+{
+       struct dev_archdata *sd;
+       struct pci_dev *dev;
+       const char *type;
+       u32 class;
+
+       dev = kzalloc(sizeof(struct pci_dev), GFP_KERNEL);
+       if (!dev)
+               return NULL;
+
+       sd = &dev->dev.archdata;
+       sd->iommu = pbm->iommu;
+       sd->stc = &pbm->stc;
+       sd->host_controller = pbm;
+       sd->prom_node = node;
+       sd->op = of_find_device_by_node(node);
+       sd->msi_num = 0xffffffff;
+
+       type = of_get_property(node, "device_type", NULL);
+       if (type == NULL)
+               type = "";
+
+       printk("    create device, devfn: %x, type: %s hostcontroller(%d)\n",
+              devfn, type, host_controller);
+
+       dev->bus = bus;
+       dev->sysdata = node;
+       dev->dev.parent = bus->bridge;
+       dev->dev.bus = &pci_bus_type;
+       dev->devfn = devfn;
+       dev->multifunction = 0;         /* maybe a lie? */
+
+       if (host_controller) {
+               dev->vendor = 0x108e;
+               dev->device = 0x8000;
+               dev->subsystem_vendor = 0x0000;
+               dev->subsystem_device = 0x0000;
+               dev->cfg_size = 256;
+               dev->class = PCI_CLASS_BRIDGE_HOST << 8;
+               sprintf(pci_name(dev), "%04x:%02x:%02x.%d", pci_domain_nr(bus),
+                       0x00, PCI_SLOT(devfn), PCI_FUNC(devfn));
+       } else {
+               dev->vendor = of_getintprop_default(node, "vendor-id", 0xffff);
+               dev->device = of_getintprop_default(node, "device-id", 0xffff);
+               dev->subsystem_vendor =
+                       of_getintprop_default(node, "subsystem-vendor-id", 0);
+               dev->subsystem_device =
+                       of_getintprop_default(node, "subsystem-id", 0);
+
+               dev->cfg_size = pci_cfg_space_size(dev);
+
+               /* We can't actually use the firmware value, we have
+                * to read what is in the register right now.  One
+                * reason is that in the case of IDE interfaces the
+                * firmware can sample the value before the the IDE
+                * interface is programmed into native mode.
+                */
+               pci_read_config_dword(dev, PCI_CLASS_REVISION, &class);
+               dev->class = class >> 8;
+
+               sprintf(pci_name(dev), "%04x:%02x:%02x.%d", pci_domain_nr(bus),
+                       dev->bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn));
+       }
+       printk("    class: 0x%x device name: %s\n",
+              dev->class, pci_name(dev));
+
+       dev->current_state = 4;         /* unknown power state */
+       dev->error_state = pci_channel_io_normal;
+
+       if (host_controller) {
+               dev->hdr_type = PCI_HEADER_TYPE_BRIDGE;
+               dev->rom_base_reg = PCI_ROM_ADDRESS1;
+               dev->irq = PCI_IRQ_NONE;
+       } else {
+               if (!strcmp(type, "pci") || !strcmp(type, "pciex")) {
+                       /* a PCI-PCI bridge */
+                       dev->hdr_type = PCI_HEADER_TYPE_BRIDGE;
+                       dev->rom_base_reg = PCI_ROM_ADDRESS1;
+               } else if (!strcmp(type, "cardbus")) {
+                       dev->hdr_type = PCI_HEADER_TYPE_CARDBUS;
+               } else {
+                       dev->hdr_type = PCI_HEADER_TYPE_NORMAL;
+                       dev->rom_base_reg = PCI_ROM_ADDRESS;
+
+                       dev->irq = sd->op->irqs[0];
+                       if (dev->irq == 0xffffffff)
+                               dev->irq = PCI_IRQ_NONE;
+               }
+       }
+       pci_parse_of_addrs(sd->op, node, dev);
+
+       printk("    adding to system ...\n");
+
+       pci_device_add(dev, bus);
+
+       return dev;
+}
+
+static void __init apb_calc_first_last(u8 map, u32 *first_p, u32 *last_p)
+{
+       u32 idx, first, last;
+
+       first = 8;
+       last = 0;
+       for (idx = 0; idx < 8; idx++) {
+               if ((map & (1 << idx)) != 0) {
+                       if (first > idx)
+                               first = idx;
+                       if (last < idx)
+                               last = idx;
+               }
+       }
+
+       *first_p = first;
+       *last_p = last;
+}
+
+static void __init pci_resource_adjust(struct resource *res,
+                                      struct resource *root)
+{
+       res->start += root->start;
+       res->end += root->start;
+}
+
+/* Cook up fake bus resources for SUNW,simba PCI bridges which lack
+ * a proper 'ranges' property.
+ */
+static void __init apb_fake_ranges(struct pci_dev *dev,
+                                  struct pci_bus *bus,
+                                  struct pci_pbm_info *pbm)
+{
+       struct resource *res;
+       u32 first, last;
+       u8 map;
+
+       pci_read_config_byte(dev, APB_IO_ADDRESS_MAP, &map);
+       apb_calc_first_last(map, &first, &last);
+       res = bus->resource[0];
+       res->start = (first << 21);
+       res->end = (last << 21) + ((1 << 21) - 1);
+       res->flags = IORESOURCE_IO;
+       pci_resource_adjust(res, &pbm->io_space);
+
+       pci_read_config_byte(dev, APB_MEM_ADDRESS_MAP, &map);
+       apb_calc_first_last(map, &first, &last);
+       res = bus->resource[1];
+       res->start = (first << 21);
+       res->end = (last << 21) + ((1 << 21) - 1);
+       res->flags = IORESOURCE_MEM;
+       pci_resource_adjust(res, &pbm->mem_space);
+}
+
+static void __init pci_of_scan_bus(struct pci_pbm_info *pbm,
+                                  struct device_node *node,
+                                  struct pci_bus *bus);
+
+#define GET_64BIT(prop, i)     ((((u64) (prop)[(i)]) << 32) | (prop)[(i)+1])
+
+void __devinit of_scan_pci_bridge(struct pci_pbm_info *pbm,
+                                 struct device_node *node,
+                                 struct pci_dev *dev)
+{
+       struct pci_bus *bus;
+       const u32 *busrange, *ranges;
+       int len, i, simba;
+       struct resource *res;
+       unsigned int flags;
+       u64 size;
+
+       printk("of_scan_pci_bridge(%s)\n", node->full_name);
+
+       /* parse bus-range property */
+       busrange = of_get_property(node, "bus-range", &len);
+       if (busrange == NULL || len != 8) {
+               printk(KERN_DEBUG "Can't get bus-range for PCI-PCI bridge %s\n",
+                      node->full_name);
+               return;
+       }
+       ranges = of_get_property(node, "ranges", &len);
+       simba = 0;
+       if (ranges == NULL) {
+               const char *model = of_get_property(node, "model", NULL);
+               if (model && !strcmp(model, "SUNW,simba")) {
+                       simba = 1;
+               } else {
+                       printk(KERN_DEBUG "Can't get ranges for PCI-PCI bridge %s\n",
+                              node->full_name);
+                       return;
+               }
+       }
+
+       bus = pci_add_new_bus(dev->bus, dev, busrange[0]);
+       if (!bus) {
+               printk(KERN_ERR "Failed to create pci bus for %s\n",
+                      node->full_name);
+               return;
+       }
+
+       bus->primary = dev->bus->number;
+       bus->subordinate = busrange[1];
+       bus->bridge_ctl = 0;
+
+       /* parse ranges property, or cook one up by hand for Simba */
+       /* PCI #address-cells == 3 and #size-cells == 2 always */
+       res = &dev->resource[PCI_BRIDGE_RESOURCES];
+       for (i = 0; i < PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES; ++i) {
+               res->flags = 0;
+               bus->resource[i] = res;
+               ++res;
+       }
+       if (simba) {
+               apb_fake_ranges(dev, bus, pbm);
+               goto simba_cont;
+       }
+       i = 1;
+       for (; len >= 32; len -= 32, ranges += 8) {
+               struct resource *root;
+
+               flags = pci_parse_of_flags(ranges[0]);
+               size = GET_64BIT(ranges, 6);
+               if (flags == 0 || size == 0)
+                       continue;
+               if (flags & IORESOURCE_IO) {
+                       res = bus->resource[0];
+                       if (res->flags) {
+                               printk(KERN_ERR "PCI: ignoring extra I/O range"
+                                      " for bridge %s\n", node->full_name);
+                               continue;
+                       }
+                       root = &pbm->io_space;
+               } else {
+                       if (i >= PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES) {
+                               printk(KERN_ERR "PCI: too many memory ranges"
+                                      " for bridge %s\n", node->full_name);
+                               continue;
+                       }
+                       res = bus->resource[i];
+                       ++i;
+                       root = &pbm->mem_space;
+               }
+
+               res->start = GET_64BIT(ranges, 1);
+               res->end = res->start + size - 1;
+               res->flags = flags;
+
+               /* Another way to implement this would be to add an of_device
+                * layer routine that can calculate a resource for a given
+                * range property value in a PCI device.
+                */
+               pci_resource_adjust(res, root);
+       }
+simba_cont:
+       sprintf(bus->name, "PCI Bus %04x:%02x", pci_domain_nr(bus),
+               bus->number);
+       printk("    bus name: %s\n", bus->name);
+
+       pci_of_scan_bus(pbm, node, bus);
+}
+
+static void __init pci_of_scan_bus(struct pci_pbm_info *pbm,
+                                  struct device_node *node,
+                                  struct pci_bus *bus)
+{
+       struct device_node *child;
+       const u32 *reg;
+       int reglen, devfn;
+       struct pci_dev *dev;
+
+       printk("PCI: scan_bus[%s] bus no %d\n",
+              node->full_name, bus->number);
+
+       child = NULL;
+       while ((child = of_get_next_child(node, child)) != NULL) {
+               printk("  * %s\n", child->full_name);
+               reg = of_get_property(child, "reg", &reglen);
+               if (reg == NULL || reglen < 20)
+                       continue;
+               devfn = (reg[0] >> 8) & 0xff;
+
+               /* create a new pci_dev for this device */
+               dev = of_create_pci_dev(pbm, child, bus, devfn, 0);
+               if (!dev)
+                       continue;
+               printk("PCI: dev header type: %x\n", dev->hdr_type);
+
+               if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
+                   dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
+                       of_scan_pci_bridge(pbm, child, dev);
+       }
+}
+
+static ssize_t
+show_pciobppath_attr(struct device * dev, struct device_attribute * attr, char * buf)
+{
+       struct pci_dev *pdev;
+       struct device_node *dp;
+
+       pdev = to_pci_dev(dev);
+       dp = pdev->dev.archdata.prom_node;
+
+       return snprintf (buf, PAGE_SIZE, "%s\n", dp->full_name);
+}
+
+static DEVICE_ATTR(obppath, S_IRUSR | S_IRGRP | S_IROTH, show_pciobppath_attr, NULL);
+
+static void __devinit pci_bus_register_of_sysfs(struct pci_bus *bus)
+{
+       struct pci_dev *dev;
+       struct pci_bus *child_bus;
+       int err;
+
+       list_for_each_entry(dev, &bus->devices, bus_list) {
+               /* we don't really care if we can create this file or
+                * not, but we need to assign the result of the call
+                * or the world will fall under alien invasion and
+                * everybody will be frozen on a spaceship ready to be
+                * eaten on alpha centauri by some green and jelly
+                * humanoid.
+                */
+               err = sysfs_create_file(&dev->dev.kobj, &dev_attr_obppath.attr);
+       }
+       list_for_each_entry(child_bus, &bus->children, node)
+               pci_bus_register_of_sysfs(child_bus);
+}
+
+int pci_host_bridge_read_pci_cfg(struct pci_bus *bus_dev,
+                                unsigned int devfn,
+                                int where, int size,
+                                u32 *value)
+{
+       static u8 fake_pci_config[] = {
+               0x8e, 0x10, /* Vendor: 0x108e (Sun) */
+               0x00, 0x80, /* Device: 0x8000 (PBM) */
+               0x46, 0x01, /* Command: 0x0146 (SERR, PARITY, MASTER, MEM) */
+               0xa0, 0x22, /* Status: 0x02a0 (DEVSEL_MED, FB2B, 66MHZ) */
+               0x00, 0x00, 0x00, 0x06, /* Class: 0x06000000 host bridge */
+               0x00, /* Cacheline: 0x00 */
+               0x40, /* Latency: 0x40 */
+               0x00, /* Header-Type: 0x00 normal */
+       };
+
+       *value = 0;
+       if (where >= 0 && where < sizeof(fake_pci_config) &&
+           (where + size) >= 0 &&
+           (where + size) < sizeof(fake_pci_config) &&
+           size <= sizeof(u32)) {
+               while (size--) {
+                       *value <<= 8;
+                       *value |= fake_pci_config[where + size];
+               }
+       }
+
+       return PCIBIOS_SUCCESSFUL;
+}
+
+int pci_host_bridge_write_pci_cfg(struct pci_bus *bus_dev,
+                                 unsigned int devfn,
+                                 int where, int size,
+                                 u32 value)
+{
+       return PCIBIOS_SUCCESSFUL;
+}
+
+struct pci_bus * __init pci_scan_one_pbm(struct pci_pbm_info *pbm)
+{
+       struct pci_controller_info *p = pbm->parent;
+       struct device_node *node = pbm->prom_node;
+       struct pci_dev *host_pdev;
+       struct pci_bus *bus;
+
+       printk("PCI: Scanning PBM %s\n", node->full_name);
+
+       /* XXX parent device? XXX */
+       bus = pci_create_bus(NULL, pbm->pci_first_busno, p->pci_ops, pbm);
+       if (!bus) {
+               printk(KERN_ERR "Failed to create bus for %s\n",
+                      node->full_name);
+               return NULL;
+       }
+       bus->secondary = pbm->pci_first_busno;
+       bus->subordinate = pbm->pci_last_busno;
+
+       bus->resource[0] = &pbm->io_space;
+       bus->resource[1] = &pbm->mem_space;
+
+       /* Create the dummy host bridge and link it in.  */
+       host_pdev = of_create_pci_dev(pbm, node, bus, 0x00, 1);
+       bus->self = host_pdev;
+
+       pci_of_scan_bus(pbm, node, bus);
+       pci_bus_add_devices(bus);
+       pci_bus_register_of_sysfs(bus);
+
+       return bus;
+}
+
 static void __init pci_scan_each_controller_bus(void)
 {
        struct pci_controller_info *p;
@@ -360,8 +826,33 @@ void pcibios_align_resource(void *data, struct resource *res,
 {
 }
 
-int pcibios_enable_device(struct pci_dev *pdev, int mask)
+int pcibios_enable_device(struct pci_dev *dev, int mask)
 {
+       u16 cmd, oldcmd;
+       int i;
+
+       pci_read_config_word(dev, PCI_COMMAND, &cmd);
+       oldcmd = cmd;
+
+       for (i = 0; i < PCI_NUM_RESOURCES; i++) {
+               struct resource *res = &dev->resource[i];
+
+               /* Only set up the requested stuff */
+               if (!(mask & (1<<i)))
+                       continue;
+
+               if (res->flags & IORESOURCE_IO)
+                       cmd |= PCI_COMMAND_IO;
+               if (res->flags & IORESOURCE_MEM)
+                       cmd |= PCI_COMMAND_MEMORY;
+       }
+
+       if (cmd != oldcmd) {
+               printk(KERN_DEBUG "PCI: Enabling device: (%s), cmd %x\n",
+                      pci_name(dev), cmd);
+                /* Enable the appropriate bits in the PCI command register.  */
+               pci_write_config_word(dev, PCI_COMMAND, cmd);
+       }
        return 0;
 }
 
@@ -380,7 +871,7 @@ void pcibios_resource_to_bus(struct pci_dev *pdev, struct pci_bus_region *region
        else
                root = &pbm->mem_space;
 
-       pbm->parent->resource_adjust(pdev, &zero_res, root);
+       pci_resource_adjust(&zero_res, root);
 
        region->start = res->start - zero_res.start;
        region->end = res->end - zero_res.start;
@@ -401,7 +892,7 @@ void pcibios_bus_to_resource(struct pci_dev *pdev, struct resource *res,
        else
                root = &pbm->mem_space;
 
-       pbm->parent->resource_adjust(pdev, res, root);
+       pci_resource_adjust(res, root);
 }
 EXPORT_SYMBOL(pcibios_bus_to_resource);
 
@@ -422,55 +913,17 @@ char * __devinit pcibios_setup(char *str)
 static int __pci_mmap_make_offset_bus(struct pci_dev *pdev, struct vm_area_struct *vma,
                                      enum pci_mmap_state mmap_state)
 {
-       struct pcidev_cookie *pcp = pdev->sysdata;
-       struct pci_pbm_info *pbm;
+       struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
        struct pci_controller_info *p;
        unsigned long space_size, user_offset, user_size;
 
-       if (!pcp)
-               return -ENXIO;
-       pbm = pcp->pbm;
-       if (!pbm)
-               return -ENXIO;
-
        p = pbm->parent;
-       if (p->pbms_same_domain) {
-               unsigned long lowest, highest;
-
-               lowest = ~0UL; highest = 0UL;
-               if (mmap_state == pci_mmap_io) {
-                       if (p->pbm_A.io_space.flags) {
-                               lowest = p->pbm_A.io_space.start;
-                               highest = p->pbm_A.io_space.end + 1;
-                       }
-                       if (p->pbm_B.io_space.flags) {
-                               if (lowest > p->pbm_B.io_space.start)
-                                       lowest = p->pbm_B.io_space.start;
-                               if (highest < p->pbm_B.io_space.end + 1)
-                                       highest = p->pbm_B.io_space.end + 1;
-                       }
-                       space_size = highest - lowest;
-               } else {
-                       if (p->pbm_A.mem_space.flags) {
-                               lowest = p->pbm_A.mem_space.start;
-                               highest = p->pbm_A.mem_space.end + 1;
-                       }
-                       if (p->pbm_B.mem_space.flags) {
-                               if (lowest > p->pbm_B.mem_space.start)
-                                       lowest = p->pbm_B.mem_space.start;
-                               if (highest < p->pbm_B.mem_space.end + 1)
-                                       highest = p->pbm_B.mem_space.end + 1;
-                       }
-                       space_size = highest - lowest;
-               }
+       if (mmap_state == pci_mmap_io) {
+               space_size = (pbm->io_space.end -
+                             pbm->io_space.start) + 1;
        } else {
-               if (mmap_state == pci_mmap_io) {
-                       space_size = (pbm->io_space.end -
-                                     pbm->io_space.start) + 1;
-               } else {
-                       space_size = (pbm->mem_space.end -
-                                     pbm->mem_space.start) + 1;
-               }
+               space_size = (pbm->mem_space.end -
+                             pbm->mem_space.start) + 1;
        }
 
        /* Make sure the request is in range. */
@@ -481,31 +934,12 @@ static int __pci_mmap_make_offset_bus(struct pci_dev *pdev, struct vm_area_struc
            (user_offset + user_size) > space_size)
                return -EINVAL;
 
-       if (p->pbms_same_domain) {
-               unsigned long lowest = ~0UL;
-
-               if (mmap_state == pci_mmap_io) {
-                       if (p->pbm_A.io_space.flags)
-                               lowest = p->pbm_A.io_space.start;
-                       if (p->pbm_B.io_space.flags &&
-                           lowest > p->pbm_B.io_space.start)
-                               lowest = p->pbm_B.io_space.start;
-               } else {
-                       if (p->pbm_A.mem_space.flags)
-                               lowest = p->pbm_A.mem_space.start;
-                       if (p->pbm_B.mem_space.flags &&
-                           lowest > p->pbm_B.mem_space.start)
-                               lowest = p->pbm_B.mem_space.start;
-               }
-               vma->vm_pgoff = (lowest + user_offset) >> PAGE_SHIFT;
+       if (mmap_state == pci_mmap_io) {
+               vma->vm_pgoff = (pbm->io_space.start +
+                                user_offset) >> PAGE_SHIFT;
        } else {
-               if (mmap_state == pci_mmap_io) {
-                       vma->vm_pgoff = (pbm->io_space.start +
-                                        user_offset) >> PAGE_SHIFT;
-               } else {
-                       vma->vm_pgoff = (pbm->mem_space.start +
-                                        user_offset) >> PAGE_SHIFT;
-               }
+               vma->vm_pgoff = (pbm->mem_space.start +
+                                user_offset) >> PAGE_SHIFT;
        }
 
        return 0;
@@ -639,9 +1073,8 @@ int pci_domain_nr(struct pci_bus *pbus)
                struct pci_controller_info *p = pbm->parent;
 
                ret = p->index;
-               if (p->pbms_same_domain == 0)
-                       ret = ((ret << 1) +
-                              ((pbm == &pbm->parent->pbm_B) ? 1 : 0));
+               ret = ((ret << 1) +
+                      ((pbm == &pbm->parent->pbm_B) ? 1 : 0));
        }
 
        return ret;
@@ -651,8 +1084,7 @@ EXPORT_SYMBOL(pci_domain_nr);
 #ifdef CONFIG_PCI_MSI
 int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
 {
-       struct pcidev_cookie *pcp = pdev->sysdata;
-       struct pci_pbm_info *pbm = pcp->pbm;
+       struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
        struct pci_controller_info *p = pbm->parent;
        int virt_irq, err;
 
@@ -670,8 +1102,7 @@ void arch_teardown_msi_irq(unsigned int virt_irq)
 {
        struct msi_desc *entry = get_irq_msi(virt_irq);
        struct pci_dev *pdev = entry->dev;
-       struct pcidev_cookie *pcp = pdev->sysdata;
-       struct pci_pbm_info *pbm = pcp->pbm;
+       struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
        struct pci_controller_info *p = pbm->parent;
 
        if (!pbm->msi_num || !p->setup_msi_irq)
@@ -683,9 +1114,7 @@ void arch_teardown_msi_irq(unsigned int virt_irq)
 
 struct device_node *pci_device_to_OF_node(struct pci_dev *pdev)
 {
-       struct pcidev_cookie *pc = pdev->sysdata;
-
-       return pc->op->node;
+       return pdev->dev.archdata.prom_node;
 }
 EXPORT_SYMBOL(pci_device_to_OF_node);
 
index 5a92cb9..1e6aeed 100644 (file)
@@ -1,7 +1,6 @@
-/* $Id: pci_common.c,v 1.29 2002/02/01 00:56:03 davem Exp $
- * pci_common.c: PCI controller common support.
+/* pci_common.c: PCI controller common support.
  *
- * Copyright (C) 1999 David S. Miller (davem@redhat.com)
+ * Copyright (C) 1999, 2007 David S. Miller (davem@davemloft.net)
  */
 
 #include <linux/string.h>
 
 #include "pci_impl.h"
 
-/* Fix self device of BUS and hook it into BUS->self.
- * The pci_scan_bus does not do this for the host bridge.
- */
-void __init pci_fixup_host_bridge_self(struct pci_bus *pbus)
-{
-       struct pci_dev *pdev;
-
-       list_for_each_entry(pdev, &pbus->devices, bus_list) {
-               if (pdev->class >> 8 == PCI_CLASS_BRIDGE_HOST) {
-                       pbus->self = pdev;
-                       return;
-               }
-       }
-
-       prom_printf("PCI: Critical error, cannot find host bridge PDEV.\n");
-       prom_halt();
-}
-
-/* Find the OBP PROM device tree node for a PCI device.  */
-static struct device_node * __init
-find_device_prom_node(struct pci_pbm_info *pbm, struct pci_dev *pdev,
-                     struct device_node *bus_node,
-                     struct linux_prom_pci_registers **pregs,
-                     int *nregs)
+static void pci_register_legacy_regions(struct resource *io_res,
+                                       struct resource *mem_res)
 {
-       struct device_node *dp;
-
-       *nregs = 0;
-
-       /*
-        * Return the PBM's PROM node in case we are it's PCI device,
-        * as the PBM's reg property is different to standard PCI reg
-        * properties. We would delete this device entry otherwise,
-        * which confuses XFree86's device probing...
-        */
-       if ((pdev->bus->number == pbm->pci_bus->number) && (pdev->devfn == 0) &&
-           (pdev->vendor == PCI_VENDOR_ID_SUN) &&
-           (pdev->device == PCI_DEVICE_ID_SUN_PBM ||
-            pdev->device == PCI_DEVICE_ID_SUN_SCHIZO ||
-            pdev->device == PCI_DEVICE_ID_SUN_TOMATILLO ||
-            pdev->device == PCI_DEVICE_ID_SUN_SABRE ||
-            pdev->device == PCI_DEVICE_ID_SUN_HUMMINGBIRD))
-               return bus_node;
-
-       dp = bus_node->child;
-       while (dp) {
-               struct linux_prom_pci_registers *regs;
-               struct property *prop;
-               int len;
-
-               prop = of_find_property(dp, "reg", &len);
-               if (!prop)
-                       goto do_next_sibling;
-
-               regs = prop->value;
-               if (((regs[0].phys_hi >> 8) & 0xff) == pdev->devfn) {
-                       *pregs = regs;
-                       *nregs = len / sizeof(struct linux_prom_pci_registers);
-                       return dp;
-               }
-
-       do_next_sibling:
-               dp = dp->sibling;
-       }
-
-       return NULL;
-}
+       struct resource *p;
 
-/* Older versions of OBP on PCI systems encode 64-bit MEM
- * space assignments incorrectly, this fixes them up.  We also
- * take the opportunity here to hide other kinds of bogus
- * assignments.
- */
-static void __init fixup_obp_assignments(struct pci_dev *pdev,
-                                        struct pcidev_cookie *pcp)
-{
-       int i;
-
-       if (pdev->vendor == PCI_VENDOR_ID_AL &&
-           (pdev->device == PCI_DEVICE_ID_AL_M7101 ||
-            pdev->device == PCI_DEVICE_ID_AL_M1533)) {
-               int i;
-
-               /* Zap all of the normal resources, they are
-                * meaningless and generate bogus resource collision
-                * messages.  This is OpenBoot's ill-fated attempt to
-                * represent the implicit resources that these devices
-                * have.
-                */
-               pcp->num_prom_assignments = 0;
-               for (i = 0; i < 6; i++) {
-                       pdev->resource[i].start =
-                               pdev->resource[i].end =
-                               pdev->resource[i].flags = 0;
-               }
-               pdev->resource[PCI_ROM_RESOURCE].start =
-                       pdev->resource[PCI_ROM_RESOURCE].end =
-                       pdev->resource[PCI_ROM_RESOURCE].flags = 0;
+       /* VGA Video RAM. */
+       p = kzalloc(sizeof(*p), GFP_KERNEL);
+       if (!p)
                return;
-       }
-
-       for (i = 0; i < pcp->num_prom_assignments; i++) {
-               struct linux_prom_pci_registers *ap;
-               int space;
 
-               ap = &pcp->prom_assignments[i];
-               space = ap->phys_hi >> 24;
-               if ((space & 0x3) == 2 &&
-                   (space & 0x4) != 0) {
-                       ap->phys_hi &= ~(0x7 << 24);
-                       ap->phys_hi |= 0x3 << 24;
-               }
-       }
-}
-
-static ssize_t
-show_pciobppath_attr(struct device * dev, struct device_attribute * attr, char * buf)
-{
-       struct pci_dev *pdev;
-       struct pcidev_cookie *sysdata;
-
-       pdev = to_pci_dev(dev);
-       sysdata = pdev->sysdata;
-
-       return snprintf (buf, PAGE_SIZE, "%s\n", sysdata->prom_node->full_name);
-}
-
-static DEVICE_ATTR(obppath, S_IRUSR | S_IRGRP | S_IROTH, show_pciobppath_attr, NULL);
+       p->name = "Video RAM area";
+       p->start = mem_res->start + 0xa0000UL;
+       p->end = p->start + 0x1ffffUL;
+       p->flags = IORESOURCE_BUSY;
+       request_resource(mem_res, p);
 
-/* Fill in the PCI device cookie sysdata for the given
- * PCI device.  This cookie is the means by which one
- * can get to OBP and PCI controller specific information
- * for a PCI device.
- */
-static void __init pdev_cookie_fillin(struct pci_pbm_info *pbm,
-                                     struct pci_dev *pdev,
-                                     struct device_node *bus_node)
-{
-       struct linux_prom_pci_registers *pregs = NULL;
-       struct pcidev_cookie *pcp;
-       struct device_node *dp;
-       struct property *prop;
-       int nregs, len, err;
-
-       dp = find_device_prom_node(pbm, pdev, bus_node,
-                                  &pregs, &nregs);
-       if (!dp) {
-               /* If it is not in the OBP device tree then
-                * there must be a damn good reason for it.
-                *
-                * So what we do is delete the device from the
-                * PCI device tree completely.  This scenario
-                * is seen, for example, on CP1500 for the
-                * second EBUS/HappyMeal pair if the external
-                * connector for it is not present.
-                */
-               pci_remove_bus_device(pdev);
+       p = kzalloc(sizeof(*p), GFP_KERNEL);
+       if (!p)
                return;
-       }
-
-       pcp = kzalloc(sizeof(*pcp), GFP_ATOMIC);
-       if (pcp == NULL) {
-               prom_printf("PCI_COOKIE: Fatal malloc error, aborting...\n");
-               prom_halt();
-       }
-       pcp->pbm = pbm;
-       pcp->prom_node = dp;
-       pcp->op = of_find_device_by_node(dp);
-       memcpy(pcp->prom_regs, pregs,
-              nregs * sizeof(struct linux_prom_pci_registers));
-       pcp->num_prom_regs = nregs;
-
-       /* We can't have the pcidev_cookie assignments be just
-        * direct pointers into the property value, since they
-        * are potentially modified by the probing process.
-        */
-       prop = of_find_property(dp, "assigned-addresses", &len);
-       if (!prop) {
-               pcp->num_prom_assignments = 0;
-       } else {
-               memcpy(pcp->prom_assignments, prop->value, len);
-               pcp->num_prom_assignments =
-                       (len / sizeof(pcp->prom_assignments[0]));
-       }
-
-       if (strcmp(dp->name, "ebus") == 0) {
-               struct linux_prom_ebus_ranges *erng;
-               int iter;
-
-               /* EBUS is special... */
-               prop = of_find_property(dp, "ranges", &len);
-               if (!prop) {
-                       prom_printf("EBUS: Fatal error, no range property\n");
-                       prom_halt();
-               }
-               erng = prop->value;
-               len = (len / sizeof(erng[0]));
-               for (iter = 0; iter < len; iter++) {
-                       struct linux_prom_ebus_ranges *ep = &erng[iter];
-                       struct linux_prom_pci_registers *ap;
-
-                       ap = &pcp->prom_assignments[iter];
-
-                       ap->phys_hi = ep->parent_phys_hi;
-                       ap->phys_mid = ep->parent_phys_mid;
-                       ap->phys_lo = ep->parent_phys_lo;
-                       ap->size_hi = 0;
-                       ap->size_lo = ep->size;
-               }
-               pcp->num_prom_assignments = len;
-       }
-
-       fixup_obp_assignments(pdev, pcp);
-
-       pdev->sysdata = pcp;
-
-       /* we don't really care if we can create this file or not,
-        * but we need to assign the result of the call or the world will fall
-        * under alien invasion and everybody will be frozen on a spaceship
-        * ready to be eaten on alpha centauri by some green and jelly humanoid.
-        */
-       err = sysfs_create_file(&pdev->dev.kobj, &dev_attr_obppath.attr);
-}
-
-void __init pci_fill_in_pbm_cookies(struct pci_bus *pbus,
-                                   struct pci_pbm_info *pbm,
-                                   struct device_node *dp)
-{
-       struct pci_dev *pdev, *pdev_next;
-       struct pci_bus *this_pbus, *pbus_next;
-
-       /* This must be _safe because the cookie fillin
-          routine can delete devices from the tree.  */
-       list_for_each_entry_safe(pdev, pdev_next, &pbus->devices, bus_list)
-               pdev_cookie_fillin(pbm, pdev, dp);
-
-       list_for_each_entry_safe(this_pbus, pbus_next, &pbus->children, node) {
-               struct pcidev_cookie *pcp = this_pbus->self->sysdata;
-
-               pci_fill_in_pbm_cookies(this_pbus, pbm, pcp->prom_node);
-       }
-}
 
-static void __init bad_assignment(struct pci_dev *pdev,
-                                 struct linux_prom_pci_registers *ap,
-                                 struct resource *res,
-                                 int do_prom_halt)
-{
-       prom_printf("PCI: Bogus PROM assignment. BUS[%02x] DEVFN[%x]\n",
-                   pdev->bus->number, pdev->devfn);
-       if (ap)
-               prom_printf("PCI: phys[%08x:%08x:%08x] size[%08x:%08x]\n",
-                           ap->phys_hi, ap->phys_mid, ap->phys_lo,
-                           ap->size_hi, ap->size_lo);
-       if (res)
-               prom_printf("PCI: RES[%016lx-->%016lx:(%lx)]\n",
-                           res->start, res->end, res->flags);
-       if (do_prom_halt)
-               prom_halt();
-}
-
-static struct resource *
-__init get_root_resource(struct linux_prom_pci_registers *ap,
-                        struct pci_pbm_info *pbm)
-{
-       int space = (ap->phys_hi >> 24) & 3;
-
-       switch (space) {
-       case 0:
-               /* Configuration space, silently ignore it. */
-               return NULL;
-
-       case 1:
-               /* 16-bit IO space */
-               return &pbm->io_space;
-
-       case 2:
-               /* 32-bit MEM space */
-               return &pbm->mem_space;
-
-       case 3:
-               /* 64-bit MEM space, these are allocated out of
-                * the 32-bit mem_space range for the PBM, ie.
-                * we just zero out the upper 32-bits.
-                */
-               return &pbm->mem_space;
-
-       default:
-               printk("PCI: What is resource space %x?\n", space);
-               return NULL;
-       };
-}
-
-static struct resource *
-__init get_device_resource(struct linux_prom_pci_registers *ap,
-                          struct pci_dev *pdev)
-{
-       struct resource *res;
-       int breg = (ap->phys_hi & 0xff);
-
-       switch (breg) {
-       case  PCI_ROM_ADDRESS:
-               /* Unfortunately I have seen several cases where
-                * buggy FCODE uses a space value of '1' (I/O space)
-                * in the register property for the ROM address
-                * so disable this sanity check for now.
-                */
-#if 0
-       {
-               int space = (ap->phys_hi >> 24) & 3;
-
-               /* It had better be MEM space. */
-               if (space != 2)
-                       bad_assignment(pdev, ap, NULL, 0);
-       }
-#endif
-               res = &pdev->resource[PCI_ROM_RESOURCE];
-               break;
-
-       case PCI_BASE_ADDRESS_0:
-       case PCI_BASE_ADDRESS_1:
-       case PCI_BASE_ADDRESS_2:
-       case PCI_BASE_ADDRESS_3:
-       case PCI_BASE_ADDRESS_4:
-       case PCI_BASE_ADDRESS_5:
-               res = &pdev->resource[(breg - PCI_BASE_ADDRESS_0) / 4];
-               break;
-
-       default:
-               bad_assignment(pdev, ap, NULL, 0);
-               res = NULL;
-               break;
-       };
-
-       return res;
-}
-
-static void __init pdev_record_assignments(struct pci_pbm_info *pbm,
-                                          struct pci_dev *pdev)
-{
-       struct pcidev_cookie *pcp = pdev->sysdata;
-       int i;
-
-       for (i = 0; i < pcp->num_prom_assignments; i++) {
-               struct linux_prom_pci_registers *ap;
-               struct resource *root, *res;
-
-               /* The format of this property is specified in
-                * the PCI Bus Binding to IEEE1275-1994.
-                */
-               ap = &pcp->prom_assignments[i];
-               root = get_root_resource(ap, pbm);
-               res = get_device_resource(ap, pdev);
-               if (root == NULL || res == NULL ||
-                   res->flags == 0)
-                       continue;
-
-               /* Ok we know which resource this PROM assignment is
-                * for, sanity check it.
-                */
-               if ((res->start & 0xffffffffUL) != ap->phys_lo)
-                       bad_assignment(pdev, ap, res, 1);
-
-               /* If it is a 64-bit MEM space assignment, verify that
-                * the resource is too and that the upper 32-bits match.
-                */
-               if (((ap->phys_hi >> 24) & 3) == 3) {
-                       if (((res->flags & IORESOURCE_MEM) == 0) ||
-                           ((res->flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK)
-                            != PCI_BASE_ADDRESS_MEM_TYPE_64))
-                               bad_assignment(pdev, ap, res, 1);
-                       if ((res->start >> 32) != ap->phys_mid)
-                               bad_assignment(pdev, ap, res, 1);
-
-                       /* PBM cannot generate cpu initiated PIOs
-                        * to the full 64-bit space.  Therefore the
-                        * upper 32-bits better be zero.  If it is
-                        * not, just skip it and we will assign it
-                        * properly ourselves.
-                        */
-                       if ((res->start >> 32) != 0UL) {
-                               printk(KERN_ERR "PCI: OBP assigns out of range MEM address "
-                                      "%016lx for region %ld on device %s\n",
-                                      res->start, (res - &pdev->resource[0]), pci_name(pdev));
-                               continue;
-                       }
-               }
-
-               /* Adjust the resource into the physical address space
-                * of this PBM.
-                */
-               pbm->parent->resource_adjust(pdev, res, root);
-
-               if (request_resource(root, res) < 0) {
-                       int rnum;
-
-                       /* OK, there is some conflict.  But this is fine
-                        * since we'll reassign it in the fixup pass.
-                        *
-                        * Do not print the warning for ROM resources
-                        * as such a conflict is quite common and
-                        * harmless as the ROM bar is disabled.
-                        */
-                       rnum = (res - &pdev->resource[0]);
-                       if (rnum != PCI_ROM_RESOURCE)
-                               printk(KERN_ERR "PCI: Resource collision, "
-                                      "region %d "
-                                      "[%016lx:%016lx] of device %s\n",
-                                      rnum,
-                                      res->start, res->end,
-                                      pci_name(pdev));
-               }
-       }
-}
-
-void __init pci_record_assignments(struct pci_pbm_info *pbm,
-                                  struct pci_bus *pbus)
-{
-       struct pci_dev *dev;
-       struct pci_bus *bus;
+       p->name = "System ROM";
+       p->start = mem_res->start + 0xf0000UL;
+       p->end = p->start + 0xffffUL;
+       p->flags = IORESOURCE_BUSY;
+       request_resource(mem_res, p);
 
-       list_for_each_entry(dev, &pbus->devices, bus_list)
-               pdev_record_assignments(pbm, dev);
+       p = kzalloc(sizeof(*p), GFP_KERNEL);
+       if (!p)
+               return;
 
-       list_for_each_entry(bus, &pbus->children, node)
-               pci_record_assignments(pbm, bus);
+       p->name = "Video ROM";
+       p->start = mem_res->start + 0xc0000UL;
+       p->end = p->start + 0x7fffUL;
+       p->flags = IORESOURCE_BUSY;
+       request_resource(mem_res, p);
 }
 
-/* Return non-zero if PDEV has implicit I/O resources even
- * though it may not have an I/O base address register
- * active.
- */
-static int __init has_implicit_io(struct pci_dev *pdev)
+static void pci_register_iommu_region(struct pci_pbm_info *pbm)
 {
-       int class = pdev->class >> 8;
+       const u32 *vdma = of_get_property(pbm->prom_node, "virtual-dma", NULL);
 
-       if (class == PCI_CLASS_NOT_DEFINED ||
-           class == PCI_CLASS_NOT_DEFINED_VGA ||
-           class == PCI_CLASS_STORAGE_IDE ||
-           (pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
-               return 1;
+       if (vdma) {
+               struct resource *rp = kmalloc(sizeof(*rp), GFP_KERNEL);
 
-       return 0;
-}
-
-static void __init pdev_assign_unassigned(struct pci_pbm_info *pbm,
-                                         struct pci_dev *pdev)
-{
-       u32 reg;
-       u16 cmd;
-       int i, io_seen, mem_seen;
-
-       io_seen = mem_seen = 0;
-       for (i = 0; i < PCI_NUM_RESOURCES; i++) {
-               struct resource *root, *res;
-               unsigned long size, min, max, align;
-
-               res = &pdev->resource[i];
-
-               if (res->flags & IORESOURCE_IO)
-                       io_seen++;
-               else if (res->flags & IORESOURCE_MEM)
-                       mem_seen++;
-
-               /* If it is already assigned or the resource does
-                * not exist, there is nothing to do.
-                */
-               if (res->parent != NULL || res->flags == 0UL)
-                       continue;
-
-               /* Determine the root we allocate from. */
-               if (res->flags & IORESOURCE_IO) {
-                       root = &pbm->io_space;
-                       min = root->start + 0x400UL;
-                       max = root->end;
-               } else {
-                       root = &pbm->mem_space;
-                       min = root->start;
-                       max = min + 0x80000000UL;
-               }
-
-               size = res->end - res->start;
-               align = size + 1;
-               if (allocate_resource(root, res, size + 1, min, max, align, NULL, NULL) < 0) {
-                       /* uh oh */
-                       prom_printf("PCI: Failed to allocate resource %d for %s\n",
-                                   i, pci_name(pdev));
+               if (!rp) {
+                       prom_printf("Cannot allocate IOMMU resource.\n");
                        prom_halt();
                }
-
-               /* Update PCI config space. */
-               pbm->parent->base_address_update(pdev, i);
-       }
-
-       /* Special case, disable the ROM.  Several devices
-        * act funny (ie. do not respond to memory space writes)
-        * when it is left enabled.  A good example are Qlogic,ISP
-        * adapters.
-        */
-       pci_read_config_dword(pdev, PCI_ROM_ADDRESS, &reg);
-       reg &= ~PCI_ROM_ADDRESS_ENABLE;
-       pci_write_config_dword(pdev, PCI_ROM_ADDRESS, reg);
-
-       /* If we saw I/O or MEM resources, enable appropriate
-        * bits in PCI command register.
-        */
-       if (io_seen || mem_seen) {
-               pci_read_config_word(pdev, PCI_COMMAND, &cmd);
-               if (io_seen || has_implicit_io(pdev))
-                       cmd |= PCI_COMMAND_IO;
-               if (mem_seen)
-                       cmd |= PCI_COMMAND_MEMORY;
-               pci_write_config_word(pdev, PCI_COMMAND, cmd);
-       }
-
-       /* If this is a PCI bridge or an IDE controller,
-        * enable bus mastering.  In the former case also
-        * set the cache line size correctly.
-        */
-       if (((pdev->class >> 8) == PCI_CLASS_BRIDGE_PCI) ||
-           (((pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) &&
-            ((pdev->class & 0x80) != 0))) {
-               pci_read_config_word(pdev, PCI_COMMAND, &cmd);
-               cmd |= PCI_COMMAND_MASTER;
-               pci_write_config_word(pdev, PCI_COMMAND, cmd);
-
-               if ((pdev->class >> 8) == PCI_CLASS_BRIDGE_PCI)
-                       pci_write_config_byte(pdev,
-                                             PCI_CACHE_LINE_SIZE,
-                                             (64 / sizeof(u32)));
+               rp->name = "IOMMU";
+               rp->start = pbm->mem_space.start + (unsigned long) vdma[0];
+               rp->end = rp->start + (unsigned long) vdma[1] - 1UL;
+               rp->flags = IORESOURCE_BUSY;
+               request_resource(&pbm->mem_space, rp);
        }
 }
 
-void __init pci_assign_unassigned(struct pci_pbm_info *pbm,
-                                 struct pci_bus *pbus)
+void pci_determine_mem_io_space(struct pci_pbm_info *pbm)
 {
-       struct pci_dev *dev;
-       struct pci_bus *bus;
-
-       list_for_each_entry(dev, &pbus->devices, bus_list)
-               pdev_assign_unassigned(pbm, dev);
+       const struct linux_prom_pci_ranges *pbm_ranges;
+       int i, saw_mem, saw_io;
+       int num_pbm_ranges;
 
-       list_for_each_entry(bus, &pbus->children, node)
-               pci_assign_unassigned(pbm, bus);
-}
+       saw_mem = saw_io = 0;
+       pbm_ranges = of_get_property(pbm->prom_node, "ranges", &i);
+       num_pbm_ranges = i / sizeof(*pbm_ranges);
 
-static void __init pdev_fixup_irq(struct pci_dev *pdev)
-{
-       struct pcidev_cookie *pcp = pdev->sysdata;
-       struct of_device *op = pcp->op;
+       for (i = 0; i < num_pbm_ranges; i++) {
+               const struct linux_prom_pci_ranges *pr = &pbm_ranges[i];
+               unsigned long a;
+               u32 parent_phys_hi, parent_phys_lo;
+               int type;
 
-       if (op->irqs[0] == 0xffffffff) {
-               pdev->irq = PCI_IRQ_NONE;
-               return;
-       }
+               parent_phys_hi = pr->parent_phys_hi;
+               parent_phys_lo = pr->parent_phys_lo;
+               if (tlb_type == hypervisor)
+                       parent_phys_hi &= 0x0fffffff;
 
-       pdev->irq = op->irqs[0];
+               type = (pr->child_phys_hi >> 24) & 0x3;
+               a = (((unsigned long)parent_phys_hi << 32UL) |
+                    ((unsigned long)parent_phys_lo  <<  0UL));
 
-       pci_write_config_byte(pdev, PCI_INTERRUPT_LINE,
-                             pdev->irq & PCI_IRQ_INO);
-}
-
-void __init pci_fixup_irq(struct pci_pbm_info *pbm,
-                         struct pci_bus *pbus)
-{
-       struct pci_dev *dev;
-       struct pci_bus *bus;
-
-       list_for_each_entry(dev, &pbus->devices, bus_list)
-               pdev_fixup_irq(dev);
-
-       list_for_each_entry(bus, &pbus->children, node)
-               pci_fixup_irq(pbm, bus);
-}
-
-static void pdev_setup_busmastering(struct pci_dev *pdev, int is_66mhz)
-{
-       u16 cmd;
-       u8 hdr_type, min_gnt, ltimer;
-
-       pci_read_config_word(pdev, PCI_COMMAND, &cmd);
-       cmd |= PCI_COMMAND_MASTER;
-       pci_write_config_word(pdev, PCI_COMMAND, cmd);
-
-       /* Read it back, if the mastering bit did not
-        * get set, the device does not support bus
-        * mastering so we have nothing to do here.
-        */
-       pci_read_config_word(pdev, PCI_COMMAND, &cmd);
-       if ((cmd & PCI_COMMAND_MASTER) == 0)
-               return;
-
-       /* Set correct cache line size, 64-byte on all
-        * Sparc64 PCI systems.  Note that the value is
-        * measured in 32-bit words.
-        */
-       pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
-                             64 / sizeof(u32));
-
-       pci_read_config_byte(pdev, PCI_HEADER_TYPE, &hdr_type);
-       hdr_type &= ~0x80;
-       if (hdr_type != PCI_HEADER_TYPE_NORMAL)
-               return;
-
-       /* If the latency timer is already programmed with a non-zero
-        * value, assume whoever set it (OBP or whoever) knows what
-        * they are doing.
-        */
-       pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &ltimer);
-       if (ltimer != 0)
-               return;
-
-       /* XXX Since I'm tipping off the min grant value to
-        * XXX choose a suitable latency timer value, I also
-        * XXX considered making use of the max latency value
-        * XXX as well.  Unfortunately I've seen too many bogusly
-        * XXX low settings for it to the point where it lacks
-        * XXX any usefulness.  In one case, an ethernet card
-        * XXX claimed a min grant of 10 and a max latency of 5.
-        * XXX Now, if I had two such cards on the same bus I
-        * XXX could not set the desired burst period (calculated
-        * XXX from min grant) without violating the max latency
-        * XXX bound.  Duh...
-        * XXX
-        * XXX I blame dumb PC bios implementors for stuff like
-        * XXX this, most of them don't even try to do something
-        * XXX sensible with latency timer values and just set some
-        * XXX default value (usually 32) into every device.
-        */
-
-       pci_read_config_byte(pdev, PCI_MIN_GNT, &min_gnt);
-
-       if (min_gnt == 0) {
-               /* If no min_gnt setting then use a default
-                * value.
-                */
-               if (is_66mhz)
-                       ltimer = 16;
-               else
-                       ltimer = 32;
-       } else {
-               int shift_factor;
-
-               if (is_66mhz)
-                       shift_factor = 2;
-               else
-                       shift_factor = 3;
-
-               /* Use a default value when the min_gnt value
-                * is erroneously high.
-                */
-               if (((unsigned int) min_gnt << shift_factor) > 512 ||
-                   ((min_gnt << shift_factor) & 0xff) == 0) {
-                       ltimer = 8 << shift_factor;
-               } else {
-                       ltimer = min_gnt << shift_factor;
-               }
-       }
+               switch (type) {
+               case 0:
+                       /* PCI config space, 16MB */
+                       pbm->config_space = a;
+                       break;
 
-       pci_write_config_byte(pdev, PCI_LATENCY_TIMER, ltimer);
-}
+               case 1:
+                       /* 16-bit IO space, 16MB */
+                       pbm->io_space.start = a;
+                       pbm->io_space.end = a + ((16UL*1024UL*1024UL) - 1UL);
+                       pbm->io_space.flags = IORESOURCE_IO;
+                       saw_io = 1;
+                       break;
 
-void pci_determine_66mhz_disposition(struct pci_pbm_info *pbm,
-                                    struct pci_bus *pbus)
-{
-       struct pci_dev *pdev;
-       int all_are_66mhz;
-       u16 status;
+               case 2:
+                       /* 32-bit MEM space, 2GB */
+                       pbm->mem_space.start = a;
+                       pbm->mem_space.end = a + (0x80000000UL - 1UL);
+                       pbm->mem_space.flags = IORESOURCE_MEM;
+                       saw_mem = 1;
+                       break;
 
-       if (pbm->is_66mhz_capable == 0) {
-               all_are_66mhz = 0;
-               goto out;
-       }
+               case 3:
+                       /* XXX 64-bit MEM handling XXX */
 
-       all_are_66mhz = 1;
-       list_for_each_entry(pdev, &pbus->devices, bus_list) {
-               pci_read_config_word(pdev, PCI_STATUS, &status);
-               if (!(status & PCI_STATUS_66MHZ)) {
-                       all_are_66mhz = 0;
+               default:
                        break;
-               }
+               };
        }
-out:
-       pbm->all_devs_66mhz = all_are_66mhz;
-
-       printk("PCI%d(PBM%c): Bus running at %dMHz\n",
-              pbm->parent->index,
-              (pbm == &pbm->parent->pbm_A) ? 'A' : 'B',
-              (all_are_66mhz ? 66 : 33));
-}
-
-void pci_setup_busmastering(struct pci_pbm_info *pbm,
-                           struct pci_bus *pbus)
-{
-       struct pci_dev *dev;
-       struct pci_bus *bus;
-       int is_66mhz;
-
-       is_66mhz = pbm->is_66mhz_capable && pbm->all_devs_66mhz;
-
-       list_for_each_entry(dev, &pbus->devices, bus_list)
-               pdev_setup_busmastering(dev, is_66mhz);
-
-       list_for_each_entry(bus, &pbus->children, node)
-               pci_setup_busmastering(pbm, bus);
-}
-
-void pci_register_legacy_regions(struct resource *io_res,
-                                struct resource *mem_res)
-{
-       struct resource *p;
-
-       /* VGA Video RAM. */
-       p = kzalloc(sizeof(*p), GFP_KERNEL);
-       if (!p)
-               return;
 
-       p->name = "Video RAM area";
-       p->start = mem_res->start + 0xa0000UL;
-       p->end = p->start + 0x1ffffUL;
-       p->flags = IORESOURCE_BUSY;
-       request_resource(mem_res, p);
+       if (!saw_io || !saw_mem) {
+               prom_printf("%s: Fatal error, missing %s PBM range.\n",
+                           pbm->name,
+                           (!saw_io ? "IO" : "MEM"));
+               prom_halt();
+       }
 
-       p = kzalloc(sizeof(*p), GFP_KERNEL);
-       if (!p)
-               return;
+       printk("%s: PCI IO[%lx] MEM[%lx]\n",
+              pbm->name,
+              pbm->io_space.start,
+              pbm->mem_space.start);
 
-       p->name = "System ROM";
-       p->start = mem_res->start + 0xf0000UL;
-       p->end = p->start + 0xffffUL;
-       p->flags = IORESOURCE_BUSY;
-       request_resource(mem_res, p);
+       pbm->io_space.name = pbm->mem_space.name = pbm->name;
 
-       p = kzalloc(sizeof(*p), GFP_KERNEL);
-       if (!p)
-               return;
+       request_resource(&ioport_resource, &pbm->io_space);
+       request_resource(&iomem_resource, &pbm->mem_space);
 
-       p->name = "Video ROM";
-       p->start = mem_res->start + 0xc0000UL;
-       p->end = p->start + 0x7fffUL;
-       p->flags = IORESOURCE_BUSY;
-       request_resource(mem_res, p);
+       pci_register_legacy_regions(&pbm->io_space,
+                                   &pbm->mem_space);
+       pci_register_iommu_region(pbm);
 }
 
 /* Generic helper routines for PCI error reporting. */
index 971e2be..1208583 100644 (file)
@@ -1,7 +1,6 @@
-/* $Id: pci_impl.h,v 1.9 2001/06/13 06:34:30 davem Exp $
- * pci_impl.h: Helper definitions for PCI controller support.
+/* pci_impl.h: Helper definitions for PCI controller support.
  *
- * Copyright (C) 1999 David S. Miller (davem@redhat.com)
+ * Copyright (C) 1999, 2007 David S. Miller (davem@davemloft.net)
  */
 
 #ifndef PCI_IMPL_H
 #include <asm/prom.h>
 
 extern struct pci_controller_info *pci_controller_root;
+extern unsigned long pci_memspace_mask;
 
 extern int pci_num_controllers;
 
 /* PCI bus scanning and fixup support. */
-extern void pci_fixup_host_bridge_self(struct pci_bus *pbus);
-extern void pci_fill_in_pbm_cookies(struct pci_bus *pbus,
-                                   struct pci_pbm_info *pbm,
-                                   struct device_node *prom_node);
-extern void pci_record_assignments(struct pci_pbm_info *pbm,
-                                  struct pci_bus *pbus);
-extern void pci_assign_unassigned(struct pci_pbm_info *pbm,
-                                 struct pci_bus *pbus);
-extern void pci_fixup_irq(struct pci_pbm_info *pbm,
-                         struct pci_bus *pbus);
-extern void pci_determine_66mhz_disposition(struct pci_pbm_info *pbm,
-                                           struct pci_bus *pbus);
-extern void pci_setup_busmastering(struct pci_pbm_info *pbm,
-                                  struct pci_bus *pbus);
-extern void pci_register_legacy_regions(struct resource *io_res,
-                                       struct resource *mem_res);
+extern struct pci_bus *pci_scan_one_pbm(struct pci_pbm_info *pbm);
+extern void pci_determine_mem_io_space(struct pci_pbm_info *pbm);
+
+extern int pci_host_bridge_read_pci_cfg(struct pci_bus *bus_dev,
+                                       unsigned int devfn,
+                                       int where, int size,
+                                       u32 *value);
+extern int pci_host_bridge_write_pci_cfg(struct pci_bus *bus_dev,
+                                        unsigned int devfn,
+                                        int where, int size,
+                                        u32 value);
 
 /* Error reporting support. */
 extern void pci_scan_for_target_abort(struct pci_controller_info *, struct pci_pbm_info *, struct pci_bus *);
index 7aca0f3..6671277 100644 (file)
@@ -1,7 +1,6 @@
-/* $Id: pci_iommu.c,v 1.17 2001/12/17 07:05:09 davem Exp $
- * pci_iommu.c: UltraSparc PCI controller IOM/STC support.
+/* pci_iommu.c: UltraSparc PCI controller IOM/STC support.
  *
- * Copyright (C) 1999 David S. Miller (davem@redhat.com)
+ * Copyright (C) 1999, 2007 David S. Miller (davem@davemloft.net)
  * Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com)
  */
 
@@ -36,7 +35,7 @@
                               "i" (ASI_PHYS_BYPASS_EC_E))
 
 /* Must be invoked under the IOMMU lock. */
-static void __iommu_flushall(struct pci_iommu *iommu)
+static void __iommu_flushall(struct iommu *iommu)
 {
        unsigned long tag;
        int entry;
@@ -64,7 +63,7 @@ static void __iommu_flushall(struct pci_iommu *iommu)
 #define IOPTE_IS_DUMMY(iommu, iopte)   \
        ((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa)
 
-static inline void iopte_make_dummy(struct pci_iommu *iommu, iopte_t *iopte)
+static inline void iopte_make_dummy(struct iommu *iommu, iopte_t *iopte)
 {
        unsigned long val = iopte_val(*iopte);
 
@@ -75,9 +74,9 @@ static inline void iopte_make_dummy(struct pci_iommu *iommu, iopte_t *iopte)
 }
 
 /* Based largely upon the ppc64 iommu allocator.  */
-static long pci_arena_alloc(struct pci_iommu *iommu, unsigned long npages)
+static long pci_arena_alloc(struct iommu *iommu, unsigned long npages)
 {
-       struct pci_iommu_arena *arena = &iommu->arena;
+       struct iommu_arena *arena = &iommu->arena;
        unsigned long n, i, start, end, limit;
        int pass;
 
@@ -116,7 +115,7 @@ again:
        return n;
 }
 
-static void pci_arena_free(struct pci_iommu_arena *arena, unsigned long base, unsigned long npages)
+static void pci_arena_free(struct iommu_arena *arena, unsigned long base, unsigned long npages)
 {
        unsigned long i;
 
@@ -124,7 +123,7 @@ static void pci_arena_free(struct pci_iommu_arena *arena, unsigned long base, un
                __clear_bit(i, arena->map);
 }
 
-void pci_iommu_table_init(struct pci_iommu *iommu, int tsbsize, u32 dma_offset, u32 dma_addr_mask)
+void pci_iommu_table_init(struct iommu *iommu, int tsbsize, u32 dma_offset, u32 dma_addr_mask)
 {
        unsigned long i, tsbbase, order, sz, num_tsb_entries;
 
@@ -170,7 +169,7 @@ void pci_iommu_table_init(struct pci_iommu *iommu, int tsbsize, u32 dma_offset,
                iopte_make_dummy(iommu, &iommu->page_table[i]);
 }
 
-static inline iopte_t *alloc_npages(struct pci_iommu *iommu, unsigned long npages)
+static inline iopte_t *alloc_npages(struct iommu *iommu, unsigned long npages)
 {
        long entry;
 
@@ -181,12 +180,12 @@ static inline iopte_t *alloc_npages(struct pci_iommu *iommu, unsigned long npage
        return iommu->page_table + entry;
 }
 
-static inline void free_npages(struct pci_iommu *iommu, dma_addr_t base, unsigned long npages)
+static inline void free_npages(struct iommu *iommu, dma_addr_t base, unsigned long npages)
 {
        pci_arena_free(&iommu->arena, base >> IO_PAGE_SHIFT, npages);
 }
 
-static int iommu_alloc_ctx(struct pci_iommu *iommu)
+static int iommu_alloc_ctx(struct iommu *iommu)
 {
        int lowest = iommu->ctx_lowest_free;
        int sz = IOMMU_NUM_CTXS - lowest;
@@ -205,7 +204,7 @@ static int iommu_alloc_ctx(struct pci_iommu *iommu)
        return n;
 }
 
-static inline void iommu_free_ctx(struct pci_iommu *iommu, int ctx)
+static inline void iommu_free_ctx(struct iommu *iommu, int ctx)
 {
        if (likely(ctx)) {
                __clear_bit(ctx, iommu->ctx_bitmap);
@@ -220,8 +219,7 @@ static inline void iommu_free_ctx(struct pci_iommu *iommu, int ctx)
  */
 static void *pci_4u_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp, gfp_t gfp)
 {
-       struct pcidev_cookie *pcp;
-       struct pci_iommu *iommu;
+       struct iommu *iommu;
        iopte_t *iopte;
        unsigned long flags, order, first_page;
        void *ret;
@@ -237,8 +235,7 @@ static void *pci_4u_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr
                return NULL;
        memset((char *)first_page, 0, PAGE_SIZE << order);
 
-       pcp = pdev->sysdata;
-       iommu = pcp->pbm->iommu;
+       iommu = pdev->dev.archdata.iommu;
 
        spin_lock_irqsave(&iommu->lock, flags);
        iopte = alloc_npages(iommu, size >> IO_PAGE_SHIFT);
@@ -268,14 +265,12 @@ static void *pci_4u_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr
 /* Free and unmap a consistent DMA translation. */
 static void pci_4u_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma)
 {
-       struct pcidev_cookie *pcp;
-       struct pci_iommu *iommu;
+       struct iommu *iommu;
        iopte_t *iopte;
        unsigned long flags, order, npages;
 
        npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
-       pcp = pdev->sysdata;
-       iommu = pcp->pbm->iommu;
+       iommu = pdev->dev.archdata.iommu;
        iopte = iommu->page_table +
                ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
 
@@ -295,18 +290,16 @@ static void pci_4u_free_consistent(struct pci_dev *pdev, size_t size, void *cpu,
  */
 static dma_addr_t pci_4u_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction)
 {
-       struct pcidev_cookie *pcp;
-       struct pci_iommu *iommu;
-       struct pci_strbuf *strbuf;
+       struct iommu *iommu;
+       struct strbuf *strbuf;
        iopte_t *base;
        unsigned long flags, npages, oaddr;
        unsigned long i, base_paddr, ctx;
        u32 bus_addr, ret;
        unsigned long iopte_protection;
 
-       pcp = pdev->sysdata;
-       iommu = pcp->pbm->iommu;
-       strbuf = &pcp->pbm->stc;
+       iommu = pdev->dev.archdata.iommu;
+       strbuf = pdev->dev.archdata.stc;
 
        if (unlikely(direction == PCI_DMA_NONE))
                goto bad_no_ctx;
@@ -349,7 +342,7 @@ bad_no_ctx:
        return PCI_DMA_ERROR_CODE;
 }
 
-static void pci_strbuf_flush(struct pci_strbuf *strbuf, struct pci_iommu *iommu, u32 vaddr, unsigned long ctx, unsigned long npages, int direction)
+static void pci_strbuf_flush(struct strbuf *strbuf, struct iommu *iommu, u32 vaddr, unsigned long ctx, unsigned long npages, int direction)
 {
        int limit;
 
@@ -416,9 +409,8 @@ do_flush_sync:
 /* Unmap a single streaming mode DMA translation. */
 static void pci_4u_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
 {
-       struct pcidev_cookie *pcp;
-       struct pci_iommu *iommu;
-       struct pci_strbuf *strbuf;
+       struct iommu *iommu;
+       struct strbuf *strbuf;
        iopte_t *base;
        unsigned long flags, npages, ctx, i;
 
@@ -428,9 +420,8 @@ static void pci_4u_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_
                return;
        }
 
-       pcp = pdev->sysdata;
-       iommu = pcp->pbm->iommu;
-       strbuf = &pcp->pbm->stc;
+       iommu = pdev->dev.archdata.iommu;
+       strbuf = pdev->dev.archdata.stc;
 
        npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
        npages >>= IO_PAGE_SHIFT;
@@ -549,9 +540,8 @@ static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg,
  */
 static int pci_4u_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
 {
-       struct pcidev_cookie *pcp;
-       struct pci_iommu *iommu;
-       struct pci_strbuf *strbuf;
+       struct iommu *iommu;
+       struct strbuf *strbuf;
        unsigned long flags, ctx, npages, iopte_protection;
        iopte_t *base;
        u32 dma_base;
@@ -570,9 +560,8 @@ static int pci_4u_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int n
                return 1;
        }
 
-       pcp = pdev->sysdata;
-       iommu = pcp->pbm->iommu;
-       strbuf = &pcp->pbm->stc;
+       iommu = pdev->dev.archdata.iommu;
+       strbuf = pdev->dev.archdata.stc;
        
        if (unlikely(direction == PCI_DMA_NONE))
                goto bad_no_ctx;
@@ -636,9 +625,8 @@ bad_no_ctx:
 /* Unmap a set of streaming mode DMA translations. */
 static void pci_4u_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
 {
-       struct pcidev_cookie *pcp;
-       struct pci_iommu *iommu;
-       struct pci_strbuf *strbuf;
+       struct iommu *iommu;
+       struct strbuf *strbuf;
        iopte_t *base;
        unsigned long flags, ctx, i, npages;
        u32 bus_addr;
@@ -648,9 +636,8 @@ static void pci_4u_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, in
                        WARN_ON(1);
        }
 
-       pcp = pdev->sysdata;
-       iommu = pcp->pbm->iommu;
-       strbuf = &pcp->pbm->stc;
+       iommu = pdev->dev.archdata.iommu;
+       strbuf = pdev->dev.archdata.stc;
        
        bus_addr = sglist->dma_address & IO_PAGE_MASK;
 
@@ -696,14 +683,12 @@ static void pci_4u_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, in
  */
 static void pci_4u_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
 {
-       struct pcidev_cookie *pcp;
-       struct pci_iommu *iommu;
-       struct pci_strbuf *strbuf;
+       struct iommu *iommu;
+       struct strbuf *strbuf;
        unsigned long flags, ctx, npages;
 
-       pcp = pdev->sysdata;
-       iommu = pcp->pbm->iommu;
-       strbuf = &pcp->pbm->stc;
+       iommu = pdev->dev.archdata.iommu;
+       strbuf = pdev->dev.archdata.stc;
 
        if (!strbuf->strbuf_enabled)
                return;
@@ -736,15 +721,13 @@ static void pci_4u_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_
  */
 static void pci_4u_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
 {
-       struct pcidev_cookie *pcp;
-       struct pci_iommu *iommu;
-       struct pci_strbuf *strbuf;
+       struct iommu *iommu;
+       struct strbuf *strbuf;
        unsigned long flags, ctx, npages, i;
        u32 bus_addr;
 
-       pcp = pdev->sysdata;
-       iommu = pcp->pbm->iommu;
-       strbuf = &pcp->pbm->stc;
+       iommu = pdev->dev.archdata.iommu;
+       strbuf = pdev->dev.archdata.stc;
 
        if (!strbuf->strbuf_enabled)
                return;
@@ -775,7 +758,7 @@ static void pci_4u_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist
        spin_unlock_irqrestore(&iommu->lock, flags);
 }
 
-struct pci_iommu_ops pci_sun4u_iommu_ops = {
+const struct pci_iommu_ops pci_sun4u_iommu_ops = {
        .alloc_consistent               = pci_4u_alloc_consistent,
        .free_consistent                = pci_4u_free_consistent,
        .map_single                     = pci_4u_map_single,
@@ -809,13 +792,12 @@ static void ali_sound_dma_hack(struct pci_dev *pdev, int set_bit)
 
 int pci_dma_supported(struct pci_dev *pdev, u64 device_mask)
 {
-       struct pcidev_cookie *pcp = pdev->sysdata;
        u64 dma_addr_mask;
 
        if (pdev == NULL) {
                dma_addr_mask = 0xffffffff;
        } else {
-               struct pci_iommu *iommu = pcp->pbm->iommu;
+               struct iommu *iommu = pdev->dev.archdata.iommu;
 
                dma_addr_mask = iommu->dma_addr_mask;
 
index fda5db2..253d40e 100644 (file)
@@ -1,7 +1,6 @@
-/* $Id: pci_psycho.c,v 1.33 2002/02/01 00:58:33 davem Exp $
- * pci_psycho.c: PSYCHO/U2P specific PCI controller support.
+/* pci_psycho.c: PSYCHO/U2P specific PCI controller support.
  *
- * Copyright (C) 1997, 1998, 1999 David S. Miller (davem@caipfs.rutgers.edu)
+ * Copyright (C) 1997, 1998, 1999, 2007 David S. Miller (davem@davemloft.net)
  * Copyright (C) 1998, 1999 Eddie C. Dost   (ecd@skynet.be)
  * Copyright (C) 1999 Jakub Jelinek   (jakub@redhat.com)
  */
@@ -119,6 +118,10 @@ static int psycho_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
        u16 tmp16;
        u8 tmp8;
 
+       if (bus_dev == pbm->pci_bus && devfn == 0x00)
+               return pci_host_bridge_read_pci_cfg(bus_dev, devfn, where,
+                                                   size, value);
+
        switch (size) {
        case 1:
                *value = 0xff;
@@ -172,6 +175,9 @@ static int psycho_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
        unsigned char bus = bus_dev->number;
        u32 *addr;
 
+       if (bus_dev == pbm->pci_bus && devfn == 0x00)
+               return pci_host_bridge_write_pci_cfg(bus_dev, devfn, where,
+                                                    size, value);
        addr = psycho_pci_config_mkaddr(pbm, bus, devfn, where);
        if (!addr)
                return PCIBIOS_SUCCESSFUL;
@@ -263,7 +269,7 @@ static void __psycho_check_one_stc(struct pci_controller_info *p,
                                   struct pci_pbm_info *pbm,
                                   int is_pbm_a)
 {
-       struct pci_strbuf *strbuf = &pbm->stc;
+       struct strbuf *strbuf = &pbm->stc;
        unsigned long regbase = p->pbm_A.controller_regs;
        unsigned long err_base, tag_base, line_base;
        u64 control;
@@ -412,7 +418,7 @@ static void psycho_check_iommu_error(struct pci_controller_info *p,
                                     unsigned long afar,
                                     enum psycho_error_type type)
 {
-       struct pci_iommu *iommu = p->pbm_A.iommu;
+       struct iommu *iommu = p->pbm_A.iommu;
        unsigned long iommu_tag[16];
        unsigned long iommu_data[16];
        unsigned long flags;
@@ -895,59 +901,6 @@ static void psycho_register_error_handlers(struct pci_controller_info *p)
 }
 
 /* PSYCHO boot time probing and initialization. */
-static void psycho_resource_adjust(struct pci_dev *pdev,
-                                  struct resource *res,
-                                  struct resource *root)
-{
-       res->start += root->start;
-       res->end += root->start;
-}
-
-static void psycho_base_address_update(struct pci_dev *pdev, int resource)
-{
-       struct pcidev_cookie *pcp = pdev->sysdata;
-       struct pci_pbm_info *pbm = pcp->pbm;
-       struct resource *res, *root;
-       u32 reg;
-       int where, size, is_64bit;
-
-       res = &pdev->resource[resource];
-       if (resource < 6) {
-               where = PCI_BASE_ADDRESS_0 + (resource * 4);
-       } else if (resource == PCI_ROM_RESOURCE) {
-               where = pdev->rom_base_reg;
-       } else {
-               /* Somebody might have asked allocation of a non-standard resource */
-               return;
-       }
-
-       is_64bit = 0;
-       if (res->flags & IORESOURCE_IO)
-               root = &pbm->io_space;
-       else {
-               root = &pbm->mem_space;
-               if ((res->flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK)
-                   == PCI_BASE_ADDRESS_MEM_TYPE_64)
-                       is_64bit = 1;
-       }
-
-       size = res->end - res->start;
-       pci_read_config_dword(pdev, where, &reg);
-       reg = ((reg & size) |
-              (((u32)(res->start - root->start)) & ~size));
-       if (resource == PCI_ROM_RESOURCE) {
-               reg |= PCI_ROM_ADDRESS_ENABLE;
-               res->flags |= IORESOURCE_ROM_ENABLE;
-       }
-       pci_write_config_dword(pdev, where, reg);
-
-       /* This knows that the upper 32-bits of the address
-        * must be zero.  Our PCI common layer enforces this.
-        */
-       if (is_64bit)
-               pci_write_config_dword(pdev, where + 4, 0);
-}
-
 static void pbm_config_busmastering(struct pci_pbm_info *pbm)
 {
        u8 *addr;
@@ -968,28 +921,7 @@ static void pbm_config_busmastering(struct pci_pbm_info *pbm)
 static void pbm_scan_bus(struct pci_controller_info *p,
                         struct pci_pbm_info *pbm)
 {
-       struct pcidev_cookie *cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
-
-       if (!cookie) {
-               prom_printf("PSYCHO: Critical allocation failure.\n");
-               prom_halt();
-       }
-
-       /* All we care about is the PBM. */
-       cookie->pbm = pbm;
-
-       pbm->pci_bus = pci_scan_bus(pbm->pci_first_busno,
-                                   p->pci_ops,
-                                   pbm);
-       pci_fixup_host_bridge_self(pbm->pci_bus);
-       pbm->pci_bus->self->sysdata = cookie;
-
-       pci_fill_in_pbm_cookies(pbm->pci_bus, pbm, pbm->prom_node);
-       pci_record_assignments(pbm, pbm->pci_bus);
-       pci_assign_unassigned(pbm, pbm->pci_bus);
-       pci_fixup_irq(pbm, pbm->pci_bus);
-       pci_determine_66mhz_disposition(pbm, pbm->pci_bus);
-       pci_setup_busmastering(pbm, pbm->pci_bus);
+       pbm->pci_bus = pci_scan_one_pbm(pbm);
 }
 
 static void psycho_scan_bus(struct pci_controller_info *p)
@@ -1009,7 +941,7 @@ static void psycho_scan_bus(struct pci_controller_info *p)
 
 static void psycho_iommu_init(struct pci_controller_info *p)
 {
-       struct pci_iommu *iommu = p->pbm_A.iommu;
+       struct iommu *iommu = p->pbm_A.iommu;
        unsigned long i;
        u64 control;
 
@@ -1094,19 +1026,6 @@ static void psycho_controller_hwinit(struct pci_controller_info *p)
        psycho_write(p->pbm_A.controller_regs + PSYCHO_PCIB_DIAG, tmp);
 }
 
-static void pbm_register_toplevel_resources(struct pci_controller_info *p,
-                                           struct pci_pbm_info *pbm)
-{
-       char *name = pbm->name;
-
-       pbm->io_space.name = pbm->mem_space.name = name;
-
-       request_resource(&ioport_resource, &pbm->io_space);
-       request_resource(&iomem_resource, &pbm->mem_space);
-       pci_register_legacy_regions(&pbm->io_space,
-                                   &pbm->mem_space);
-}
-
 static void psycho_pbm_strbuf_init(struct pci_controller_info *p,
                                   struct pci_pbm_info *pbm,
                                   int is_pbm_a)
@@ -1172,19 +1091,11 @@ static void psycho_pbm_init(struct pci_controller_info *p,
        unsigned int *busrange;
        struct property *prop;
        struct pci_pbm_info *pbm;
-       int len;
 
-       if (is_pbm_a) {
+       if (is_pbm_a)
                pbm = &p->pbm_A;
-               pbm->pci_first_slot = 1;
-               pbm->io_space.start = pbm->controller_regs + PSYCHO_IOSPACE_A;
-               pbm->mem_space.start = pbm->controller_regs + PSYCHO_MEMSPACE_A;
-       } else {
+       else
                pbm = &p->pbm_B;
-               pbm->pci_first_slot = 2;
-               pbm->io_space.start = pbm->controller_regs + PSYCHO_IOSPACE_B;
-               pbm->mem_space.start = pbm->controller_regs + PSYCHO_MEMSPACE_B;
-       }
 
        pbm->chip_type = PBM_CHIP_TYPE_PSYCHO;
        pbm->chip_version = 0;
@@ -1196,41 +1107,15 @@ static void psycho_pbm_init(struct pci_controller_info *p,
        if (prop)
                pbm->chip_revision = *(int *) prop->value;
 
-       pbm->io_space.end = pbm->io_space.start + PSYCHO_IOSPACE_SIZE;
-       pbm->io_space.flags = IORESOURCE_IO;
-       pbm->mem_space.end = pbm->mem_space.start + PSYCHO_MEMSPACE_SIZE;
-       pbm->mem_space.flags = IORESOURCE_MEM;
-
        pbm->parent = p;
        pbm->prom_node = dp;
        pbm->name = dp->full_name;
 
-       pbm_register_toplevel_resources(p, pbm);
-
        printk("%s: PSYCHO PCI Bus Module ver[%x:%x]\n",
               pbm->name,
               pbm->chip_version, pbm->chip_revision);
 
-       prop = of_find_property(dp, "ranges", &len);
-       if (prop) {
-               pbm->pbm_ranges = prop->value;
-               pbm->num_pbm_ranges =
-                       (len / sizeof(struct linux_prom_pci_ranges));
-       } else {
-               pbm->num_pbm_ranges = 0;
-       }
-
-       prop = of_find_property(dp, "interrupt-map", &len);
-       if (prop) {
-               pbm->pbm_intmap = prop->value;
-               pbm->num_pbm_intmap =
-                       (len / sizeof(struct linux_prom_pci_intmap));
-
-               prop = of_find_property(dp, "interrupt-map-mask", NULL);
-               pbm->pbm_intmask = prop->value;
-       } else {
-               pbm->num_pbm_intmap = 0;
-       }
+       pci_determine_mem_io_space(pbm);
 
        prop = of_find_property(dp, "bus-range", NULL);
        busrange = prop->value;
@@ -1246,7 +1131,7 @@ void psycho_init(struct device_node *dp, char *model_name)
 {
        struct linux_prom64_registers *pr_regs;
        struct pci_controller_info *p;
-       struct pci_iommu *iommu;
+       struct iommu *iommu;
        struct property *prop;
        u32 upa_portid;
        int is_pbm_a;
@@ -1269,7 +1154,7 @@ void psycho_init(struct device_node *dp, char *model_name)
                prom_printf("PSYCHO: Fatal memory allocation error.\n");
                prom_halt();
        }
-       iommu = kzalloc(sizeof(struct pci_iommu), GFP_ATOMIC);
+       iommu = kzalloc(sizeof(struct iommu), GFP_ATOMIC);
        if (!iommu) {
                prom_printf("PSYCHO: Fatal memory allocation error.\n");
                prom_halt();
@@ -1282,10 +1167,7 @@ void psycho_init(struct device_node *dp, char *model_name)
        p->pbm_A.portid = upa_portid;
        p->pbm_B.portid = upa_portid;
        p->index = pci_num_controllers++;
-       p->pbms_same_domain = 0;
        p->scan_bus = psycho_scan_bus;
-       p->base_address_update = psycho_base_address_update;
-       p->resource_adjust = psycho_resource_adjust;
        p->pci_ops = &psycho_ops;
 
        prop = of_find_property(dp, "reg", NULL);
index 94bb681..397862f 100644 (file)
@@ -1,7 +1,6 @@
-/* $Id: pci_sabre.c,v 1.42 2002/01/23 11:27:32 davem Exp $
- * pci_sabre.c: Sabre specific PCI controller support.
+/* pci_sabre.c: Sabre specific PCI controller support.
  *
- * Copyright (C) 1997, 1998, 1999 David S. Miller (davem@caipfs.rutgers.edu)
+ * Copyright (C) 1997, 1998, 1999, 2007 David S. Miller (davem@davemloft.net)
  * Copyright (C) 1998, 1999 Eddie C. Dost   (ecd@skynet.be)
  * Copyright (C) 1999 Jakub Jelinek   (jakub@redhat.com)
  */
@@ -254,9 +253,6 @@ static int __sabre_out_of_range(struct pci_pbm_info *pbm,
                return 0;
 
        return ((pbm->parent == 0) ||
-               ((pbm == &pbm->parent->pbm_B) &&
-                (bus == pbm->pci_first_busno) &&
-                PCI_SLOT(devfn) > 8) ||
                ((pbm == &pbm->parent->pbm_A) &&
                 (bus == pbm->pci_first_busno) &&
                 PCI_SLOT(devfn) > 8));
@@ -322,6 +318,12 @@ static int __sabre_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
 static int sabre_read_pci_cfg(struct pci_bus *bus, unsigned int devfn,
                              int where, int size, u32 *value)
 {
+       struct pci_pbm_info *pbm = bus->sysdata;
+
+       if (bus == pbm->pci_bus && devfn == 0x00)
+               return pci_host_bridge_read_pci_cfg(bus, devfn, where,
+                                                   size, value);
+
        if (!bus->number && sabre_out_of_range(devfn)) {
                switch (size) {
                case 1:
@@ -438,6 +440,12 @@ static int __sabre_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
 static int sabre_write_pci_cfg(struct pci_bus *bus, unsigned int devfn,
                               int where, int size, u32 value)
 {
+       struct pci_pbm_info *pbm = bus->sysdata;
+
+       if (bus == pbm->pci_bus && devfn == 0x00)
+               return pci_host_bridge_write_pci_cfg(bus, devfn, where,
+                                                    size, value);
+
        if (bus->number)
                return __sabre_write_pci_cfg(bus, devfn, where, size, value);
 
@@ -490,7 +498,7 @@ static void sabre_check_iommu_error(struct pci_controller_info *p,
                                    unsigned long afsr,
                                    unsigned long afar)
 {
-       struct pci_iommu *iommu = p->pbm_A.iommu;
+       struct iommu *iommu = p->pbm_A.iommu;
        unsigned long iommu_tag[16];
        unsigned long iommu_data[16];
        unsigned long flags;
@@ -710,8 +718,8 @@ static irqreturn_t sabre_pcierr_intr_other(struct pci_controller_info *p)
                               p->index);
                ret = IRQ_HANDLED;
        }
-       pci_read_config_word(sabre_root_bus->self,
-                            PCI_STATUS, &stat);
+       pci_bus_read_config_word(sabre_root_bus, 0,
+                                PCI_STATUS, &stat);
        if (stat & (PCI_STATUS_PARITY |
                    PCI_STATUS_SIG_TARGET_ABORT |
                    PCI_STATUS_REC_TARGET_ABORT |
@@ -719,8 +727,8 @@ static irqreturn_t sabre_pcierr_intr_other(struct pci_controller_info *p)
                    PCI_STATUS_SIG_SYSTEM_ERROR)) {
                printk("SABRE%d: PCI bus error, PCI_STATUS[%04x]\n",
                       p->index, stat);
-               pci_write_config_word(sabre_root_bus->self,
-                                     PCI_STATUS, 0xffff);
+               pci_bus_write_config_word(sabre_root_bus, 0,
+                                         PCI_STATUS, 0xffff);
                ret = IRQ_HANDLED;
        }
        return ret;
@@ -800,12 +808,10 @@ static irqreturn_t sabre_pcierr_intr(int irq, void *dev_id)
        if (error_bits & (SABRE_PIOAFSR_PTA | SABRE_PIOAFSR_STA)) {
                sabre_check_iommu_error(p, afsr, afar);
                pci_scan_for_target_abort(p, &p->pbm_A, p->pbm_A.pci_bus);
-               pci_scan_for_target_abort(p, &p->pbm_B, p->pbm_B.pci_bus);
        }
-       if (error_bits & (SABRE_PIOAFSR_PMA | SABRE_PIOAFSR_SMA)) {
+       if (error_bits & (SABRE_PIOAFSR_PMA | SABRE_PIOAFSR_SMA))
                pci_scan_for_master_abort(p, &p->pbm_A, p->pbm_A.pci_bus);
-               pci_scan_for_master_abort(p, &p->pbm_B, p->pbm_B.pci_bus);
-       }
+
        /* For excessive retries, SABRE/PBM will abort the device
         * and there is no way to specifically check for excessive
         * retries in the config space status registers.  So what
@@ -813,10 +819,8 @@ static irqreturn_t sabre_pcierr_intr(int irq, void *dev_id)
         * abort events.
         */
 
-       if (error_bits & (SABRE_PIOAFSR_PPERR | SABRE_PIOAFSR_SPERR)) {
+       if (error_bits & (SABRE_PIOAFSR_PPERR | SABRE_PIOAFSR_SPERR))
                pci_scan_for_parity_error(p, &p->pbm_A, p->pbm_A.pci_bus);
-               pci_scan_for_parity_error(p, &p->pbm_B, p->pbm_B.pci_bus);
-       }
 
        return IRQ_HANDLED;
 }
@@ -869,144 +873,52 @@ static void sabre_register_error_handlers(struct pci_controller_info *p)
        sabre_write(base + SABRE_PCICTRL, tmp);
 }
 
-static void sabre_resource_adjust(struct pci_dev *pdev,
-                                 struct resource *res,
-                                 struct resource *root)
-{
-       struct pci_pbm_info *pbm = pdev->bus->sysdata;
-       unsigned long base;
-
-       if (res->flags & IORESOURCE_IO)
-               base = pbm->controller_regs + SABRE_IOSPACE;
-       else
-               base = pbm->controller_regs + SABRE_MEMSPACE;
-
-       res->start += base;
-       res->end += base;
-}
-
-static void sabre_base_address_update(struct pci_dev *pdev, int resource)
-{
-       struct pcidev_cookie *pcp = pdev->sysdata;
-       struct pci_pbm_info *pbm = pcp->pbm;
-       struct resource *res;
-       unsigned long base;
-       u32 reg;
-       int where, size, is_64bit;
-
-       res = &pdev->resource[resource];
-       if (resource < 6) {
-               where = PCI_BASE_ADDRESS_0 + (resource * 4);
-       } else if (resource == PCI_ROM_RESOURCE) {
-               where = pdev->rom_base_reg;
-       } else {
-               /* Somebody might have asked allocation of a non-standard resource */
-               return;
-       }
-
-       is_64bit = 0;
-       if (res->flags & IORESOURCE_IO)
-               base = pbm->controller_regs + SABRE_IOSPACE;
-       else {
-               base = pbm->controller_regs + SABRE_MEMSPACE;
-               if ((res->flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK)
-                   == PCI_BASE_ADDRESS_MEM_TYPE_64)
-                       is_64bit = 1;
-       }
-
-       size = res->end - res->start;
-       pci_read_config_dword(pdev, where, &reg);
-       reg = ((reg & size) |
-              (((u32)(res->start - base)) & ~size));
-       if (resource == PCI_ROM_RESOURCE) {
-               reg |= PCI_ROM_ADDRESS_ENABLE;
-               res->flags |= IORESOURCE_ROM_ENABLE;
-       }
-       pci_write_config_dword(pdev, where, reg);
-
-       /* This knows that the upper 32-bits of the address
-        * must be zero.  Our PCI common layer enforces this.
-        */
-       if (is_64bit)
-               pci_write_config_dword(pdev, where + 4, 0);
-}
-
 static void apb_init(struct pci_controller_info *p, struct pci_bus *sabre_bus)
 {
        struct pci_dev *pdev;
 
        list_for_each_entry(pdev, &sabre_bus->devices, bus_list) {
-
                if (pdev->vendor == PCI_VENDOR_ID_SUN &&
                    pdev->device == PCI_DEVICE_ID_SUN_SIMBA) {
-                       u32 word32;
                        u16 word16;
 
-                       sabre_read_pci_cfg(pdev->bus, pdev->devfn,
-                                          PCI_COMMAND, 2, &word32);
-                       word16 = (u16) word32;
+                       pci_read_config_word(pdev, PCI_COMMAND, &word16);
                        word16 |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY |
                                PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY |
                                PCI_COMMAND_IO;
-                       word32 = (u32) word16;
-                       sabre_write_pci_cfg(pdev->bus, pdev->devfn,
-                                           PCI_COMMAND, 2, word32);
+                       pci_write_config_word(pdev, PCI_COMMAND, word16);
 
                        /* Status register bits are "write 1 to clear". */
-                       sabre_write_pci_cfg(pdev->bus, pdev->devfn,
-                                           PCI_STATUS, 2, 0xffff);
-                       sabre_write_pci_cfg(pdev->bus, pdev->devfn,
-                                           PCI_SEC_STATUS, 2, 0xffff);
+                       pci_write_config_word(pdev, PCI_STATUS, 0xffff);
+                       pci_write_config_word(pdev, PCI_SEC_STATUS, 0xffff);
 
                        /* Use a primary/seconday latency timer value
                         * of 64.
                         */
-                       sabre_write_pci_cfg(pdev->bus, pdev->devfn,
-                                           PCI_LATENCY_TIMER, 1, 64);
-                       sabre_write_pci_cfg(pdev->bus, pdev->devfn,
-                                           PCI_SEC_LATENCY_TIMER, 1, 64);
+                       pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 64);
+                       pci_write_config_byte(pdev, PCI_SEC_LATENCY_TIMER, 64);
 
                        /* Enable reporting/forwarding of master aborts,
                         * parity, and SERR.
                         */
-                       sabre_write_pci_cfg(pdev->bus, pdev->devfn,
-                                           PCI_BRIDGE_CONTROL, 1,
-                                           (PCI_BRIDGE_CTL_PARITY |
-                                            PCI_BRIDGE_CTL_SERR |
-                                            PCI_BRIDGE_CTL_MASTER_ABORT));
+                       pci_write_config_byte(pdev, PCI_BRIDGE_CONTROL,
+                                             (PCI_BRIDGE_CTL_PARITY |
+                                              PCI_BRIDGE_CTL_SERR |
+                                              PCI_BRIDGE_CTL_MASTER_ABORT));
                }
        }
 }
 
-static struct pcidev_cookie *alloc_bridge_cookie(struct pci_pbm_info *pbm)
-{
-       struct pcidev_cookie *cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
-
-       if (!cookie) {
-               prom_printf("SABRE: Critical allocation failure.\n");
-               prom_halt();
-       }
-
-       /* All we care about is the PBM. */
-       cookie->pbm = pbm;
-
-       return cookie;
-}
-
 static void sabre_scan_bus(struct pci_controller_info *p)
 {
        static int once;
-       struct pci_bus *sabre_bus, *pbus;
-       struct pci_pbm_info *pbm;
-       struct pcidev_cookie *cookie;
-       int sabres_scanned;
+       struct pci_bus *pbus;
 
        /* The APB bridge speaks to the Sabre host PCI bridge
         * at 66Mhz, but the front side of APB runs at 33Mhz
         * for both segments.
         */
        p->pbm_A.is_66mhz_capable = 0;
-       p->pbm_B.is_66mhz_capable = 0;
 
        /* This driver has not been verified to handle
         * multiple SABREs yet, so trap this.
@@ -1020,56 +932,13 @@ static void sabre_scan_bus(struct pci_controller_info *p)
        }
        once++;
 
-       cookie = alloc_bridge_cookie(&p->pbm_A);
-
-       sabre_bus = pci_scan_bus(p->pci_first_busno,
-                                p->pci_ops,
-                                &p->pbm_A);
-       pci_fixup_host_bridge_self(sabre_bus);
-       sabre_bus->self->sysdata = cookie;
-
-       sabre_root_bus = sabre_bus;
-
-       apb_init(p, sabre_bus);
-
-       sabres_scanned = 0;
-
-       list_for_each_entry(pbus, &sabre_bus->children, node) {
-
-               if (pbus->number == p->pbm_A.pci_first_busno) {
-                       pbm = &p->pbm_A;
-               } else if (pbus->number == p->pbm_B.pci_first_busno) {
-                       pbm = &p->pbm_B;
-               } else
-                       continue;
-
-               cookie = alloc_bridge_cookie(pbm);
-               pbus->self->sysdata = cookie;
-
-               sabres_scanned++;
+       pbus = pci_scan_one_pbm(&p->pbm_A);
+       if (!pbus)
+               return;
 
-               pbus->sysdata = pbm;
-               pbm->pci_bus = pbus;
-               pci_fill_in_pbm_cookies(pbus, pbm, pbm->prom_node);
-               pci_record_assignments(pbm, pbus);
-               pci_assign_unassigned(pbm, pbus);
-               pci_fixup_irq(pbm, pbus);
-               pci_determine_66mhz_disposition(pbm, pbus);
-               pci_setup_busmastering(pbm, pbus);
-       }
+       sabre_root_bus = pbus;
 
-       if (!sabres_scanned) {
-               /* Hummingbird, no APBs. */
-               pbm = &p->pbm_A;
-               sabre_bus->sysdata = pbm;
-               pbm->pci_bus = sabre_bus;
-               pci_fill_in_pbm_cookies(sabre_bus, pbm, pbm->prom_node);
-               pci_record_assignments(pbm, sabre_bus);
-               pci_assign_unassigned(pbm, sabre_bus);
-               pci_fixup_irq(pbm, sabre_bus);
-               pci_determine_66mhz_disposition(pbm, sabre_bus);
-               pci_setup_busmastering(pbm, sabre_bus);
-       }
+       apb_init(p, pbus);
 
        sabre_register_error_handlers(p);
 }
@@ -1078,7 +947,7 @@ static void sabre_iommu_init(struct pci_controller_info *p,
                             int tsbsize, unsigned long dvma_offset,
                             u32 dma_mask)
 {
-       struct pci_iommu *iommu = p->pbm_A.iommu;
+       struct iommu *iommu = p->pbm_A.iommu;
        unsigned long i;
        u64 control;
 
@@ -1126,224 +995,31 @@ static void sabre_iommu_init(struct pci_controller_info *p,
        sabre_write(p->pbm_A.controller_regs + SABRE_IOMMU_CONTROL, control);
 }
 
-static void pbm_register_toplevel_resources(struct pci_controller_info *p,
-                                           struct pci_pbm_info *pbm)
-{
-       char *name = pbm->name;
-       unsigned long ibase = p->pbm_A.controller_regs + SABRE_IOSPACE;
-       unsigned long mbase = p->pbm_A.controller_regs + SABRE_MEMSPACE;
-       unsigned int devfn;
-       unsigned long first, last, i;
-       u8 *addr, map;
-
-       sprintf(name, "SABRE%d PBM%c",
-               p->index,
-               (pbm == &p->pbm_A ? 'A' : 'B'));
-       pbm->io_space.name = pbm->mem_space.name = name;
-
-       devfn = PCI_DEVFN(1, (pbm == &p->pbm_A) ? 0 : 1);
-       addr = sabre_pci_config_mkaddr(pbm, 0, devfn, APB_IO_ADDRESS_MAP);
-       map = 0;
-       pci_config_read8(addr, &map);
-
-       first = 8;
-       last = 0;
-       for (i = 0; i < 8; i++) {
-               if ((map & (1 << i)) != 0) {
-                       if (first > i)
-                               first = i;
-                       if (last < i)
-                               last = i;
-               }
-       }
-       pbm->io_space.start = ibase + (first << 21UL);
-       pbm->io_space.end   = ibase + (last << 21UL) + ((1 << 21UL) - 1);
-       pbm->io_space.flags = IORESOURCE_IO;
-
-       addr = sabre_pci_config_mkaddr(pbm, 0, devfn, APB_MEM_ADDRESS_MAP);
-       map = 0;
-       pci_config_read8(addr, &map);
-
-       first = 8;
-       last = 0;
-       for (i = 0; i < 8; i++) {
-               if ((map & (1 << i)) != 0) {
-                       if (first > i)
-                               first = i;
-                       if (last < i)
-                               last = i;
-               }
-       }
-       pbm->mem_space.start = mbase + (first << 29UL);
-       pbm->mem_space.end   = mbase + (last << 29UL) + ((1 << 29UL) - 1);
-       pbm->mem_space.flags = IORESOURCE_MEM;
-
-       if (request_resource(&ioport_resource, &pbm->io_space) < 0) {
-               prom_printf("Cannot register PBM-%c's IO space.\n",
-                           (pbm == &p->pbm_A ? 'A' : 'B'));
-               prom_halt();
-       }
-       if (request_resource(&iomem_resource, &pbm->mem_space) < 0) {
-               prom_printf("Cannot register PBM-%c's MEM space.\n",
-                           (pbm == &p->pbm_A ? 'A' : 'B'));
-               prom_halt();
-       }
-
-       /* Register legacy regions if this PBM covers that area. */
-       if (pbm->io_space.start == ibase &&
-           pbm->mem_space.start == mbase)
-               pci_register_legacy_regions(&pbm->io_space,
-                                           &pbm->mem_space);
-}
-
-static void sabre_pbm_init(struct pci_controller_info *p, struct device_node *dp, u32 dma_start, u32 dma_end)
+static void sabre_pbm_init(struct pci_controller_info *p, struct device_node *dp)
 {
        struct pci_pbm_info *pbm;
-       struct device_node *node;
-       struct property *prop;
-       u32 *busrange;
-       int len, simbas_found;
-
-       simbas_found = 0;
-       node = dp->child;
-       while (node != NULL) {
-               if (strcmp(node->name, "pci"))
-                       goto next_pci;
-
-               prop = of_find_property(node, "model", NULL);
-               if (!prop || strncmp(prop->value, "SUNW,simba", prop->length))
-                       goto next_pci;
-
-               simbas_found++;
-
-               prop = of_find_property(node, "bus-range", NULL);
-               busrange = prop->value;
-               if (busrange[0] == 1)
-                       pbm = &p->pbm_B;
-               else
-                       pbm = &p->pbm_A;
-
-               pbm->name = node->full_name;
-               printk("%s: SABRE PCI Bus Module\n", pbm->name);
-
-               pbm->chip_type = PBM_CHIP_TYPE_SABRE;
-               pbm->parent = p;
-               pbm->prom_node = node;
-               pbm->pci_first_slot = 1;
-               pbm->pci_first_busno = busrange[0];
-               pbm->pci_last_busno = busrange[1];
-
-               prop = of_find_property(node, "ranges", &len);
-               if (prop) {
-                       pbm->pbm_ranges = prop->value;
-                       pbm->num_pbm_ranges =
-                               (len / sizeof(struct linux_prom_pci_ranges));
-               } else {
-                       pbm->num_pbm_ranges = 0;
-               }
 
-               prop = of_find_property(node, "interrupt-map", &len);
-               if (prop) {
-                       pbm->pbm_intmap = prop->value;
-                       pbm->num_pbm_intmap =
-                               (len / sizeof(struct linux_prom_pci_intmap));
-
-                       prop = of_find_property(node, "interrupt-map-mask",
-                                               NULL);
-                       pbm->pbm_intmask = prop->value;
-               } else {
-                       pbm->num_pbm_intmap = 0;
-               }
+       pbm = &p->pbm_A;
+       pbm->name = dp->full_name;
+       printk("%s: SABRE PCI Bus Module\n", pbm->name);
 
-               pbm_register_toplevel_resources(p, pbm);
-
-       next_pci:
-               node = node->sibling;
-       }
-       if (simbas_found == 0) {
-               struct resource *rp;
+       pbm->chip_type = PBM_CHIP_TYPE_SABRE;
+       pbm->parent = p;
+       pbm->prom_node = dp;
+       pbm->pci_first_busno = p->pci_first_busno;
+       pbm->pci_last_busno = p->pci_last_busno;
 
-               /* No APBs underneath, probably this is a hummingbird
-                * system.
-                */
-               pbm = &p->pbm_A;
-               pbm->parent = p;
-               pbm->prom_node = dp;
-               pbm->pci_first_busno = p->pci_first_busno;
-               pbm->pci_last_busno = p->pci_last_busno;
-
-               prop = of_find_property(dp, "ranges", &len);
-               if (prop) {
-                       pbm->pbm_ranges = prop->value;
-                       pbm->num_pbm_ranges =
-                               (len / sizeof(struct linux_prom_pci_ranges));
-               } else {
-                       pbm->num_pbm_ranges = 0;
-               }
-
-               prop = of_find_property(dp, "interrupt-map", &len);
-               if (prop) {
-                       pbm->pbm_intmap = prop->value;
-                       pbm->num_pbm_intmap =
-                               (len / sizeof(struct linux_prom_pci_intmap));
-
-                       prop = of_find_property(dp, "interrupt-map-mask",
-                                               NULL);
-                       pbm->pbm_intmask = prop->value;
-               } else {
-                       pbm->num_pbm_intmap = 0;
-               }
-
-               pbm->name = dp->full_name;
-               printk("%s: SABRE PCI Bus Module\n", pbm->name);
-
-               pbm->io_space.name = pbm->mem_space.name = pbm->name;
-
-               /* Hack up top-level resources. */
-               pbm->io_space.start = p->pbm_A.controller_regs + SABRE_IOSPACE;
-               pbm->io_space.end   = pbm->io_space.start + (1UL << 24) - 1UL;
-               pbm->io_space.flags = IORESOURCE_IO;
-
-               pbm->mem_space.start =
-                       (p->pbm_A.controller_regs + SABRE_MEMSPACE);
-               pbm->mem_space.end =
-                       (pbm->mem_space.start + ((1UL << 32UL) - 1UL));
-               pbm->mem_space.flags = IORESOURCE_MEM;
-
-               if (request_resource(&ioport_resource, &pbm->io_space) < 0) {
-                       prom_printf("Cannot register Hummingbird's IO space.\n");
-                       prom_halt();
-               }
-               if (request_resource(&iomem_resource, &pbm->mem_space) < 0) {
-                       prom_printf("Cannot register Hummingbird's MEM space.\n");
-                       prom_halt();
-               }
-
-               rp = kmalloc(sizeof(*rp), GFP_KERNEL);
-               if (!rp) {
-                       prom_printf("Cannot allocate IOMMU resource.\n");
-                       prom_halt();
-               }
-               rp->name = "IOMMU";
-               rp->start = pbm->mem_space.start + (unsigned long) dma_start;
-               rp->end = pbm->mem_space.start + (unsigned long) dma_end - 1UL;
-               rp->flags = IORESOURCE_BUSY;
-               request_resource(&pbm->mem_space, rp);
-
-               pci_register_legacy_regions(&pbm->io_space,
-                                           &pbm->mem_space);
-       }
+       pci_determine_mem_io_space(pbm);
 }
 
 void sabre_init(struct device_node *dp, char *model_name)
 {
-       struct linux_prom64_registers *pr_regs;
+       const struct linux_prom64_registers *pr_regs;
        struct pci_controller_info *p;
-       struct pci_iommu *iommu;
-       struct property *prop;
+       struct iommu *iommu;
        int tsbsize;
-       u32 *busrange;
-       u32 *vdma;
+       const u32 *busrange;
+       const u32 *vdma;
        u32 upa_portid, dma_mask;
        u64 clear_irq;
 
@@ -1351,13 +1027,9 @@ void sabre_init(struct device_node *dp, char *model_name)
        if (!strcmp(model_name, "pci108e,a001"))
                hummingbird_p = 1;
        else if (!strcmp(model_name, "SUNW,sabre")) {
-               prop = of_find_property(dp, "compatible", NULL);
-               if (prop) {
-                       const char *compat = prop->value;
-
-                       if (!strcmp(compat, "pci108e,a001"))
-                               hummingbird_p = 1;
-               }
+               const char *compat = of_get_property(dp, "compatible", NULL);
+               if (compat && !strcmp(compat, "pci108e,a001"))
+                       hummingbird_p = 1;
                if (!hummingbird_p) {
                        struct device_node *dp;
 
@@ -1381,37 +1053,28 @@ void sabre_init(struct device_node *dp, char *model_name)
                prom_printf("SABRE: Error, kmalloc(pci_iommu) failed.\n");
                prom_halt();
        }
-       p->pbm_A.iommu = p->pbm_B.iommu = iommu;
+       p->pbm_A.iommu = iommu;
 
-       upa_portid = 0xff;
-       prop = of_find_property(dp, "upa-portid", NULL);
-       if (prop)
-               upa_portid = *(u32 *) prop->value;
+       upa_portid = of_getintprop_default(dp, "upa-portid", 0xff);
 
        p->next = pci_controller_root;
        pci_controller_root = p;
 
        p->pbm_A.portid = upa_portid;
-       p->pbm_B.portid = upa_portid;
        p->index = pci_num_controllers++;
-       p->pbms_same_domain = 1;
        p->scan_bus = sabre_scan_bus;
-       p->base_address_update = sabre_base_address_update;
-       p->resource_adjust = sabre_resource_adjust;
        p->pci_ops = &sabre_ops;
 
        /*
         * Map in SABRE register set and report the presence of this SABRE.
         */
        
-       prop = of_find_property(dp, "reg", NULL);
-       pr_regs = prop->value;
+       pr_regs = of_get_property(dp, "reg", NULL);
 
        /*
         * First REG in property is base of entire SABRE register space.
         */
        p->pbm_A.controller_regs = pr_regs[0].phys_addr;
-       p->pbm_B.controller_regs = pr_regs[0].phys_addr;
 
        /* Clear interrupts */
 
@@ -1429,11 +1092,10 @@ void sabre_init(struct device_node *dp, char *model_name)
                     SABRE_PCICTRL_ARBPARK | SABRE_PCICTRL_AEN));
 
        /* Now map in PCI config space for entire SABRE. */
-       p->pbm_A.config_space = p->pbm_B.config_space =
+       p->pbm_A.config_space =
                (p->pbm_A.controller_regs + SABRE_CONFIGSPACE);
 
-       prop = of_find_property(dp, "virtual-dma", NULL);
-       vdma = prop->value;
+       vdma = of_get_property(dp, "virtual-dma", NULL);
 
        dma_mask = vdma[0];
        switch(vdma[1]) {
@@ -1457,13 +1119,12 @@ void sabre_init(struct device_node *dp, char *model_name)
 
        sabre_iommu_init(p, tsbsize, vdma[0], dma_mask);
 
-       prop = of_find_property(dp, "bus-range", NULL);
-       busrange = prop->value;
+       busrange = of_get_property(dp, "bus-range", NULL);
        p->pci_first_busno = busrange[0];
        p->pci_last_busno = busrange[1];
 
        /*
         * Look for APB underneath.
         */
-       sabre_pbm_init(p, dp, vdma[0], vdma[0] + vdma[1]);
+       sabre_pbm_init(p, dp);
 }
index 66911b1..91a7385 100644 (file)
@@ -1,7 +1,6 @@
-/* $Id: pci_schizo.c,v 1.24 2002/01/23 11:27:32 davem Exp $
- * pci_schizo.c: SCHIZO/TOMATILLO specific PCI controller support.
+/* pci_schizo.c: SCHIZO/TOMATILLO specific PCI controller support.
  *
- * Copyright (C) 2001, 2002, 2003 David S. Miller (davem@redhat.com)
+ * Copyright (C) 2001, 2002, 2003, 2007 David S. Miller (davem@davemloft.net)
  */
 
 #include <linux/kernel.h>
@@ -126,6 +125,9 @@ static int schizo_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
        u16 tmp16;
        u8 tmp8;
 
+       if (bus_dev == pbm->pci_bus && devfn == 0x00)
+               return pci_host_bridge_read_pci_cfg(bus_dev, devfn, where,
+                                                   size, value);
        switch (size) {
        case 1:
                *value = 0xff;
@@ -179,6 +181,9 @@ static int schizo_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
        unsigned char bus = bus_dev->number;
        u32 *addr;
 
+       if (bus_dev == pbm->pci_bus && devfn == 0x00)
+               return pci_host_bridge_write_pci_cfg(bus_dev, devfn, where,
+                                                    size, value);
        addr = schizo_pci_config_mkaddr(pbm, bus, devfn, where);
        if (!addr)
                return PCIBIOS_SUCCESSFUL;
@@ -274,7 +279,7 @@ struct pci_pbm_info *pbm_for_ino(struct pci_controller_info *p, u32 ino)
 static void __schizo_check_stc_error_pbm(struct pci_pbm_info *pbm,
                                         enum schizo_error_type type)
 {
-       struct pci_strbuf *strbuf = &pbm->stc;
+       struct strbuf *strbuf = &pbm->stc;
        unsigned long regbase = pbm->pbm_regs;
        unsigned long err_base, tag_base, line_base;
        u64 control;
@@ -382,7 +387,7 @@ static void __schizo_check_stc_error_pbm(struct pci_pbm_info *pbm,
 static void schizo_check_iommu_error_pbm(struct pci_pbm_info *pbm,
                                         enum schizo_error_type type)
 {
-       struct pci_iommu *iommu = pbm->iommu;
+       struct iommu *iommu = pbm->iommu;
        unsigned long iommu_tag[16];
        unsigned long iommu_data[16];
        unsigned long flags;
@@ -1229,42 +1234,8 @@ static void pbm_config_busmastering(struct pci_pbm_info *pbm)
        pci_config_write8(addr, 64);
 }
 
-static void pbm_scan_bus(struct pci_controller_info *p,
-                        struct pci_pbm_info *pbm)
-{
-       struct pcidev_cookie *cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
-
-       if (!cookie) {
-               prom_printf("%s: Critical allocation failure.\n", pbm->name);
-               prom_halt();
-       }
-
-       /* All we care about is the PBM. */
-       cookie->pbm = pbm;
-
-       pbm->pci_bus = pci_scan_bus(pbm->pci_first_busno,
-                                   p->pci_ops,
-                                   pbm);
-       pci_fixup_host_bridge_self(pbm->pci_bus);
-       pbm->pci_bus->self->sysdata = cookie;
-
-       pci_fill_in_pbm_cookies(pbm->pci_bus, pbm, pbm->prom_node);
-       pci_record_assignments(pbm, pbm->pci_bus);
-       pci_assign_unassigned(pbm, pbm->pci_bus);
-       pci_fixup_irq(pbm, pbm->pci_bus);
-       pci_determine_66mhz_disposition(pbm, pbm->pci_bus);
-       pci_setup_busmastering(pbm, pbm->pci_bus);
-}
-
-static void __schizo_scan_bus(struct pci_controller_info *p,
-                             int chip_type)
+static void schizo_scan_bus(struct pci_controller_info *p)
 {
-       if (!p->pbm_B.prom_node || !p->pbm_A.prom_node) {
-               printk("PCI: Only one PCI bus module of controller found.\n");
-               printk("PCI: Ignoring entire controller.\n");
-               return;
-       }
-
        pbm_config_busmastering(&p->pbm_B);
        p->pbm_B.is_66mhz_capable =
                (of_find_property(p->pbm_B.prom_node, "66mhz-capable", NULL)
@@ -1273,154 +1244,19 @@ static void __schizo_scan_bus(struct pci_controller_info *p,
        p->pbm_A.is_66mhz_capable =
                (of_find_property(p->pbm_A.prom_node, "66mhz-capable", NULL)
                 != NULL);
-       pbm_scan_bus(p, &p->pbm_B);
-       pbm_scan_bus(p, &p->pbm_A);
+
+       p->pbm_B.pci_bus = pci_scan_one_pbm(&p->pbm_B);
+       p->pbm_A.pci_bus = pci_scan_one_pbm(&p->pbm_A);
 
        /* After the PCI bus scan is complete, we can register
         * the error interrupt handlers.
         */
-       if (chip_type == PBM_CHIP_TYPE_TOMATILLO)
+       if (p->pbm_B.chip_type == PBM_CHIP_TYPE_TOMATILLO)
                tomatillo_register_error_handlers(p);
        else
                schizo_register_error_handlers(p);
 }
 
-static void schizo_scan_bus(struct pci_controller_info *p)
-{
-       __schizo_scan_bus(p, PBM_CHIP_TYPE_SCHIZO);
-}
-
-static void tomatillo_scan_bus(struct pci_controller_info *p)
-{
-       __schizo_scan_bus(p, PBM_CHIP_TYPE_TOMATILLO);
-}
-
-static void schizo_base_address_update(struct pci_dev *pdev, int resource)
-{
-       struct pcidev_cookie *pcp = pdev->sysdata;
-       struct pci_pbm_info *pbm = pcp->pbm;
-       struct resource *res, *root;
-       u32 reg;
-       int where, size, is_64bit;
-
-       res = &pdev->resource[resource];
-       if (resource < 6) {
-               where = PCI_BASE_ADDRESS_0 + (resource * 4);
-       } else if (resource == PCI_ROM_RESOURCE) {
-               where = pdev->rom_base_reg;
-       } else {
-               /* Somebody might have asked allocation of a non-standard resource */
-               return;
-       }
-
-       is_64bit = 0;
-       if (res->flags & IORESOURCE_IO)
-               root = &pbm->io_space;
-       else {
-               root = &pbm->mem_space;
-               if ((res->flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK)
-                   == PCI_BASE_ADDRESS_MEM_TYPE_64)
-                       is_64bit = 1;
-       }
-
-       size = res->end - res->start;
-       pci_read_config_dword(pdev, where, &reg);
-       reg = ((reg & size) |
-              (((u32)(res->start - root->start)) & ~size));
-       if (resource == PCI_ROM_RESOURCE) {
-               reg |= PCI_ROM_ADDRESS_ENABLE;
-               res->flags |= IORESOURCE_ROM_ENABLE;
-       }
-       pci_write_config_dword(pdev, where, reg);
-
-       /* This knows that the upper 32-bits of the address
-        * must be zero.  Our PCI common layer enforces this.
-        */
-       if (is_64bit)
-               pci_write_config_dword(pdev, where + 4, 0);
-}
-
-static void schizo_resource_adjust(struct pci_dev *pdev,
-                                  struct resource *res,
-                                  struct resource *root)
-{
-       res->start += root->start;
-       res->end += root->start;
-}
-
-/* Use ranges property to determine where PCI MEM, I/O, and Config
- * space are for this PCI bus module.
- */
-static void schizo_determine_mem_io_space(struct pci_pbm_info *pbm)
-{
-       int i, saw_cfg, saw_mem, saw_io;
-
-       saw_cfg = saw_mem = saw_io = 0;
-       for (i = 0; i < pbm->num_pbm_ranges; i++) {
-               struct linux_prom_pci_ranges *pr = &pbm->pbm_ranges[i];
-               unsigned long a;
-               int type;
-
-               type = (pr->child_phys_hi >> 24) & 0x3;
-               a = (((unsigned long)pr->parent_phys_hi << 32UL) |
-                    ((unsigned long)pr->parent_phys_lo  <<  0UL));
-
-               switch (type) {
-               case 0:
-                       /* PCI config space, 16MB */
-                       pbm->config_space = a;
-                       saw_cfg = 1;
-                       break;
-
-               case 1:
-                       /* 16-bit IO space, 16MB */
-                       pbm->io_space.start = a;
-                       pbm->io_space.end = a + ((16UL*1024UL*1024UL) - 1UL);
-                       pbm->io_space.flags = IORESOURCE_IO;
-                       saw_io = 1;
-                       break;
-
-               case 2:
-                       /* 32-bit MEM space, 2GB */
-                       pbm->mem_space.start = a;
-                       pbm->mem_space.end = a + (0x80000000UL - 1UL);
-                       pbm->mem_space.flags = IORESOURCE_MEM;
-                       saw_mem = 1;
-                       break;
-
-               default:
-                       break;
-               };
-       }
-
-       if (!saw_cfg || !saw_io || !saw_mem) {
-               prom_printf("%s: Fatal error, missing %s PBM range.\n",
-                           pbm->name,
-                           ((!saw_cfg ?
-                             "CFG" :
-                             (!saw_io ?
-                              "IO" : "MEM"))));
-               prom_halt();
-       }
-
-       printk("%s: PCI CFG[%lx] IO[%lx] MEM[%lx]\n",
-              pbm->name,
-              pbm->config_space,
-              pbm->io_space.start,
-              pbm->mem_space.start);
-}
-
-static void pbm_register_toplevel_resources(struct pci_controller_info *p,
-                                           struct pci_pbm_info *pbm)
-{
-       pbm->io_space.name = pbm->mem_space.name = pbm->name;
-
-       request_resource(&ioport_resource, &pbm->io_space);
-       request_resource(&iomem_resource, &pbm->mem_space);
-       pci_register_legacy_regions(&pbm->io_space,
-                                   &pbm->mem_space);
-}
-
 #define SCHIZO_STRBUF_CONTROL          (0x02800UL)
 #define SCHIZO_STRBUF_FLUSH            (0x02808UL)
 #define SCHIZO_STRBUF_FSYNC            (0x02810UL)
@@ -1472,7 +1308,7 @@ static void schizo_pbm_strbuf_init(struct pci_pbm_info *pbm)
 
 static void schizo_pbm_iommu_init(struct pci_pbm_info *pbm)
 {
-       struct pci_iommu *iommu = pbm->iommu;
+       struct iommu *iommu = pbm->iommu;
        unsigned long i, tagbase, database;
        struct property *prop;
        u32 vdma[2], dma_mask;
@@ -1654,14 +1490,12 @@ static void schizo_pbm_init(struct pci_controller_info *p,
                            struct device_node *dp, u32 portid,
                            int chip_type)
 {
-       struct linux_prom64_registers *regs;
-       struct property *prop;
-       unsigned int *busrange;
+       const struct linux_prom64_registers *regs;
+       const unsigned int *busrange;
        struct pci_pbm_info *pbm;
        const char *chipset_name;
-       u32 *ino_bitmap;
+       const u32 *ino_bitmap;
        int is_pbm_a;
-       int len;
 
        switch (chip_type) {
        case PBM_CHIP_TYPE_TOMATILLO:
@@ -1689,11 +1523,9 @@ static void schizo_pbm_init(struct pci_controller_info *p,
         * 3) PBM PCI config space
         * 4) Ichip regs
         */
-       prop = of_find_property(dp, "reg", NULL);
-       regs = prop->value;
+       regs = of_get_property(dp, "reg", NULL);
 
        is_pbm_a = ((regs[0].phys_addr & 0x00700000) == 0x00600000);
-
        if (is_pbm_a)
                pbm = &p->pbm_A;
        else
@@ -1702,17 +1534,10 @@ static void schizo_pbm_init(struct pci_controller_info *p,
        pbm->portid = portid;
        pbm->parent = p;
        pbm->prom_node = dp;
-       pbm->pci_first_slot = 1;
 
        pbm->chip_type = chip_type;
-       pbm->chip_version = 0;
-       prop = of_find_property(dp, "version#", NULL);
-       if (prop)
-               pbm->chip_version = *(int *) prop->value;
-       pbm->chip_revision = 0;
-       prop = of_find_property(dp, "module-revision#", NULL);
-       if (prop)
-               pbm->chip_revision = *(int *) prop->value;
+       pbm->chip_version = of_getintprop_default(dp, "version#", 0);
+       pbm->chip_revision = of_getintprop_default(dp, "module-version#", 0);
 
        pbm->pbm_regs = regs[0].phys_addr;
        pbm->controller_regs = regs[1].phys_addr - 0x10000UL;
@@ -1723,40 +1548,18 @@ static void schizo_pbm_init(struct pci_controller_info *p,
        pbm->name = dp->full_name;
 
        printk("%s: %s PCI Bus Module ver[%x:%x]\n",
-              pbm->name,
-              (chip_type == PBM_CHIP_TYPE_TOMATILLO ?
-               "TOMATILLO" : "SCHIZO"),
+              pbm->name, chipset_name,
               pbm->chip_version, pbm->chip_revision);
 
        schizo_pbm_hw_init(pbm);
 
-       prop = of_find_property(dp, "ranges", &len);
-       pbm->pbm_ranges = prop->value;
-       pbm->num_pbm_ranges =
-               (len / sizeof(struct linux_prom_pci_ranges));
+       pci_determine_mem_io_space(pbm);
 
-       schizo_determine_mem_io_space(pbm);
-       pbm_register_toplevel_resources(p, pbm);
-
-       prop = of_find_property(dp, "interrupt-map", &len);
-       if (prop) {
-               pbm->pbm_intmap = prop->value;
-               pbm->num_pbm_intmap =
-                       (len / sizeof(struct linux_prom_pci_intmap));
-
-               prop = of_find_property(dp, "interrupt-map-mask", NULL);
-               pbm->pbm_intmask = prop->value;
-       } else {
-               pbm->num_pbm_intmap = 0;
-       }
-
-       prop = of_find_property(dp, "ino-bitmap", NULL);
-       ino_bitmap = prop->value;
+       ino_bitmap = of_get_property(dp, "ino-bitmap", NULL);
        pbm->ino_bitmap = (((u64)ino_bitmap[1] << 32UL) |
                           ((u64)ino_bitmap[0] <<  0UL));
 
-       prop = of_find_property(dp, "bus-range", NULL);
-       busrange = prop->value;
+       busrange = of_get_property(dp, "bus-range", NULL);
        pbm->pci_first_busno = busrange[0];
        pbm->pci_last_busno = busrange[1];
 
@@ -1777,15 +1580,10 @@ static inline int portid_compare(u32 x, u32 y, int chip_type)
 static void __schizo_init(struct device_node *dp, char *model_name, int chip_type)
 {
        struct pci_controller_info *p;
-       struct pci_iommu *iommu;
-       struct property *prop;
-       int is_pbm_a;
+       struct iommu *iommu;
        u32 portid;
 
-       portid = 0xff;
-       prop = of_find_property(dp, "portid", NULL);
-       if (prop)
-               portid = *(u32 *) prop->value;
+       portid = of_getintprop_default(dp, "portid", 0xff);
 
        for (p = pci_controller_root; p; p = p->next) {
                struct pci_pbm_info *pbm;
@@ -1798,48 +1596,43 @@ static void __schizo_init(struct device_node *dp, char *model_name, int chip_typ
                       &p->pbm_B);
 
                if (portid_compare(pbm->portid, portid, chip_type)) {
-                       is_pbm_a = (p->pbm_A.prom_node == NULL);
                        schizo_pbm_init(p, dp, portid, chip_type);
                        return;
                }
        }
 
        p = kzalloc(sizeof(struct pci_controller_info), GFP_ATOMIC);
-       if (!p) {
-               prom_printf("SCHIZO: Fatal memory allocation error.\n");
-               prom_halt();
-       }
+       if (!p)
+               goto memfail;
+
+       iommu = kzalloc(sizeof(struct iommu), GFP_ATOMIC);
+       if (!iommu)
+               goto memfail;
 
-       iommu = kzalloc(sizeof(struct pci_iommu), GFP_ATOMIC);
-       if (!iommu) {
-               prom_printf("SCHIZO: Fatal memory allocation error.\n");
-               prom_halt();
-       }
        p->pbm_A.iommu = iommu;
 
-       iommu = kzalloc(sizeof(struct pci_iommu), GFP_ATOMIC);
-       if (!iommu) {
-               prom_printf("SCHIZO: Fatal memory allocation error.\n");
-               prom_halt();
-       }
+       iommu = kzalloc(sizeof(struct iommu), GFP_ATOMIC);
+       if (!iommu)
+               goto memfail;
+
        p->pbm_B.iommu = iommu;
 
        p->next = pci_controller_root;
        pci_controller_root = p;
 
        p->index = pci_num_controllers++;
-       p->pbms_same_domain = 0;
-       p->scan_bus = (chip_type == PBM_CHIP_TYPE_TOMATILLO ?
-                      tomatillo_scan_bus :
-                      schizo_scan_bus);
-       p->base_address_update = schizo_base_address_update;
-       p->resource_adjust = schizo_resource_adjust;
+       p->scan_bus = schizo_scan_bus;
        p->pci_ops = &schizo_ops;
 
        /* Like PSYCHO we have a 2GB aligned area for memory space. */
        pci_memspace_mask = 0x7fffffffUL;
 
        schizo_pbm_init(p, dp, portid, chip_type);
+       return;
+
+memfail:
+       prom_printf("SCHIZO: Fatal memory allocation error.\n");
+       prom_halt();
 }
 
 void schizo_init(struct device_node *dp, char *model_name)
index ec22cd6..94295c2 100644 (file)
@@ -1,6 +1,6 @@
 /* pci_sun4v.c: SUN4V specific PCI controller support.
  *
- * Copyright (C) 2006 David S. Miller (davem@davemloft.net)
+ * Copyright (C) 2006, 2007 David S. Miller (davem@davemloft.net)
  */
 
 #include <linux/kernel.h>
@@ -29,7 +29,7 @@
 
 #define PGLIST_NENTS   (PAGE_SIZE / sizeof(u64))
 
-struct pci_iommu_batch {
+struct iommu_batch {
        struct pci_dev  *pdev;          /* Device mapping is for.       */
        unsigned long   prot;           /* IOMMU page protections       */
        unsigned long   entry;          /* Index into IOTSB.            */
@@ -37,12 +37,12 @@ struct pci_iommu_batch {
        unsigned long   npages;         /* Number of pages in list.     */
 };
 
-static DEFINE_PER_CPU(struct pci_iommu_batch, pci_iommu_batch);
+static DEFINE_PER_CPU(struct iommu_batch, pci_iommu_batch);
 
 /* Interrupts must be disabled.  */
 static inline void pci_iommu_batch_start(struct pci_dev *pdev, unsigned long prot, unsigned long entry)
 {
-       struct pci_iommu_batch *p = &__get_cpu_var(pci_iommu_batch);
+       struct iommu_batch *p = &__get_cpu_var(pci_iommu_batch);
 
        p->pdev         = pdev;
        p->prot         = prot;
@@ -51,10 +51,10 @@ static inline void pci_iommu_batch_start(struct pci_dev *pdev, unsigned long pro
 }
 
 /* Interrupts must be disabled.  */
-static long pci_iommu_batch_flush(struct pci_iommu_batch *p)
+static long pci_iommu_batch_flush(struct iommu_batch *p)
 {
-       struct pcidev_cookie *pcp = p->pdev->sysdata;
-       unsigned long devhandle = pcp->pbm->devhandle;
+       struct pci_pbm_info *pbm = p->pdev->dev.archdata.host_controller;
+       unsigned long devhandle = pbm->devhandle;
        unsigned long prot = p->prot;
        unsigned long entry = p->entry;
        u64 *pglist = p->pglist;
@@ -89,7 +89,7 @@ static long pci_iommu_batch_flush(struct pci_iommu_batch *p)
 /* Interrupts must be disabled.  */
 static inline long pci_iommu_batch_add(u64 phys_page)
 {
-       struct pci_iommu_batch *p = &__get_cpu_var(pci_iommu_batch);
+       struct iommu_batch *p = &__get_cpu_var(pci_iommu_batch);
 
        BUG_ON(p->npages >= PGLIST_NENTS);
 
@@ -103,14 +103,14 @@ static inline long pci_iommu_batch_add(u64 phys_page)
 /* Interrupts must be disabled.  */
 static inline long pci_iommu_batch_end(void)
 {
-       struct pci_iommu_batch *p = &__get_cpu_var(pci_iommu_batch);
+       struct iommu_batch *p = &__get_cpu_var(pci_iommu_batch);
 
        BUG_ON(p->npages >= PGLIST_NENTS);
 
        return pci_iommu_batch_flush(p);
 }
 
-static long pci_arena_alloc(struct pci_iommu_arena *arena, unsigned long npages)
+static long pci_arena_alloc(struct iommu_arena *arena, unsigned long npages)
 {
        unsigned long n, i, start, end, limit;
        int pass;
@@ -149,7 +149,7 @@ again:
        return n;
 }
 
-static void pci_arena_free(struct pci_iommu_arena *arena, unsigned long base, unsigned long npages)
+static void pci_arena_free(struct iommu_arena *arena, unsigned long base, unsigned long npages)
 {
        unsigned long i;
 
@@ -159,8 +159,7 @@ static void pci_arena_free(struct pci_iommu_arena *arena, unsigned long base, un
 
 static void *pci_4v_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp, gfp_t gfp)
 {
-       struct pcidev_cookie *pcp;
-       struct pci_iommu *iommu;
+       struct iommu *iommu;
        unsigned long flags, order, first_page, npages, n;
        void *ret;
        long entry;
@@ -178,8 +177,7 @@ static void *pci_4v_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr
 
        memset((char *)first_page, 0, PAGE_SIZE << order);
 
-       pcp = pdev->sysdata;
-       iommu = pcp->pbm->iommu;
+       iommu = pdev->dev.archdata.iommu;
 
        spin_lock_irqsave(&iommu->lock, flags);
        entry = pci_arena_alloc(&iommu->arena, npages);
@@ -226,15 +224,15 @@ arena_alloc_fail:
 
 static void pci_4v_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma)
 {
-       struct pcidev_cookie *pcp;
-       struct pci_iommu *iommu;
+       struct pci_pbm_info *pbm;
+       struct iommu *iommu;
        unsigned long flags, order, npages, entry;
        u32 devhandle;
 
        npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
-       pcp = pdev->sysdata;
-       iommu = pcp->pbm->iommu;
-       devhandle = pcp->pbm->devhandle;
+       iommu = pdev->dev.archdata.iommu;
+       pbm = pdev->dev.archdata.host_controller;
+       devhandle = pbm->devhandle;
        entry = ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
 
        spin_lock_irqsave(&iommu->lock, flags);
@@ -259,16 +257,14 @@ static void pci_4v_free_consistent(struct pci_dev *pdev, size_t size, void *cpu,
 
 static dma_addr_t pci_4v_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction)
 {
-       struct pcidev_cookie *pcp;
-       struct pci_iommu *iommu;
+       struct iommu *iommu;
        unsigned long flags, npages, oaddr;
        unsigned long i, base_paddr;
        u32 bus_addr, ret;
        unsigned long prot;
        long entry;
 
-       pcp = pdev->sysdata;
-       iommu = pcp->pbm->iommu;
+       iommu = pdev->dev.archdata.iommu;
 
        if (unlikely(direction == PCI_DMA_NONE))
                goto bad;
@@ -324,8 +320,8 @@ iommu_map_fail:
 
 static void pci_4v_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
 {
-       struct pcidev_cookie *pcp;
-       struct pci_iommu *iommu;
+       struct pci_pbm_info *pbm;
+       struct iommu *iommu;
        unsigned long flags, npages;
        long entry;
        u32 devhandle;
@@ -336,9 +332,9 @@ static void pci_4v_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_
                return;
        }
 
-       pcp = pdev->sysdata;
-       iommu = pcp->pbm->iommu;
-       devhandle = pcp->pbm->devhandle;
+       iommu = pdev->dev.archdata.iommu;
+       pbm = pdev->dev.archdata.host_controller;
+       devhandle = pbm->devhandle;
 
        npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
        npages >>= IO_PAGE_SHIFT;
@@ -460,8 +456,7 @@ iommu_map_failed:
 
 static int pci_4v_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
 {
-       struct pcidev_cookie *pcp;
-       struct pci_iommu *iommu;
+       struct iommu *iommu;
        unsigned long flags, npages, prot;
        u32 dma_base;
        struct scatterlist *sgtmp;
@@ -480,8 +475,7 @@ static int pci_4v_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int n
                return 1;
        }
 
-       pcp = pdev->sysdata;
-       iommu = pcp->pbm->iommu;
+       iommu = pdev->dev.archdata.iommu;
        
        if (unlikely(direction == PCI_DMA_NONE))
                goto bad;
@@ -537,8 +531,8 @@ iommu_map_failed:
 
 static void pci_4v_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
 {
-       struct pcidev_cookie *pcp;
-       struct pci_iommu *iommu;
+       struct pci_pbm_info *pbm;
+       struct iommu *iommu;
        unsigned long flags, i, npages;
        long entry;
        u32 devhandle, bus_addr;
@@ -548,9 +542,9 @@ static void pci_4v_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, in
                        WARN_ON(1);
        }
 
-       pcp = pdev->sysdata;
-       iommu = pcp->pbm->iommu;
-       devhandle = pcp->pbm->devhandle;
+       iommu = pdev->dev.archdata.iommu;
+       pbm = pdev->dev.archdata.host_controller;
+       devhandle = pbm->devhandle;
        
        bus_addr = sglist->dma_address & IO_PAGE_MASK;
 
@@ -589,7 +583,7 @@ static void pci_4v_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist
        /* Nothing to do... */
 }
 
-struct pci_iommu_ops pci_sun4v_iommu_ops = {
+const struct pci_iommu_ops pci_sun4v_iommu_ops = {
        .alloc_consistent               = pci_4v_alloc_consistent,
        .free_consistent                = pci_4v_free_consistent,
        .map_single                     = pci_4v_map_single,
@@ -600,132 +594,12 @@ struct pci_iommu_ops pci_sun4v_iommu_ops = {
        .dma_sync_sg_for_cpu            = pci_4v_dma_sync_sg_for_cpu,
 };
 
-/* SUN4V PCI configuration space accessors. */
-
-struct pdev_entry {
-       struct pdev_entry       *next;
-       u32                     devhandle;
-       unsigned int            bus;
-       unsigned int            device;
-       unsigned int            func;
-};
-
-#define PDEV_HTAB_SIZE 16
-#define PDEV_HTAB_MASK (PDEV_HTAB_SIZE - 1)
-static struct pdev_entry *pdev_htab[PDEV_HTAB_SIZE];
-
-static inline unsigned int pdev_hashfn(u32 devhandle, unsigned int bus, unsigned int device, unsigned int func)
-{
-       unsigned int val;
-
-       val = (devhandle ^ (devhandle >> 4));
-       val ^= bus;
-       val ^= device;
-       val ^= func;
-
-       return val & PDEV_HTAB_MASK;
-}
-
-static int pdev_htab_add(u32 devhandle, unsigned int bus, unsigned int device, unsigned int func)
-{
-       struct pdev_entry *p = kmalloc(sizeof(*p), GFP_KERNEL);
-       struct pdev_entry **slot;
-
-       if (!p)
-               return -ENOMEM;
-
-       slot = &pdev_htab[pdev_hashfn(devhandle, bus, device, func)];
-       p->next = *slot;
-       *slot = p;
-
-       p->devhandle = devhandle;
-       p->bus = bus;
-       p->device = device;
-       p->func = func;
-
-       return 0;
-}
-
-/* Recursively descend into the OBP device tree, rooted at toplevel_node,
- * looking for a PCI device matching bus and devfn.
- */
-static int obp_find(struct device_node *toplevel_node, unsigned int bus, unsigned int devfn)
-{
-       toplevel_node = toplevel_node->child;
-
-       while (toplevel_node != NULL) {
-               struct linux_prom_pci_registers *regs;
-               struct property *prop;
-               int ret;
-
-               ret = obp_find(toplevel_node, bus, devfn);
-               if (ret != 0)
-                       return ret;
-
-               prop = of_find_property(toplevel_node, "reg", NULL);
-               if (!prop)
-                       goto next_sibling;
-
-               regs = prop->value;
-               if (((regs->phys_hi >> 16) & 0xff) == bus &&
-                   ((regs->phys_hi >> 8) & 0xff) == devfn)
-                       break;
-
-       next_sibling:
-               toplevel_node = toplevel_node->sibling;
-       }
-
-       return toplevel_node != NULL;
-}
-
-static int pdev_htab_populate(struct pci_pbm_info *pbm)
-{
-       u32 devhandle = pbm->devhandle;
-       unsigned int bus;
-
-       for (bus = pbm->pci_first_busno; bus <= pbm->pci_last_busno; bus++) {
-               unsigned int devfn;
-
-               for (devfn = 0; devfn < 256; devfn++) {
-                       unsigned int device = PCI_SLOT(devfn);
-                       unsigned int func = PCI_FUNC(devfn);
-
-                       if (obp_find(pbm->prom_node, bus, devfn)) {
-                               int err = pdev_htab_add(devhandle, bus,
-                                                       device, func);
-                               if (err)
-                                       return err;
-                       }
-               }
-       }
-
-       return 0;
-}
-
-static struct pdev_entry *pdev_find(u32 devhandle, unsigned int bus, unsigned int device, unsigned int func)
-{
-       struct pdev_entry *p;
-
-       p = pdev_htab[pdev_hashfn(devhandle, bus, device, func)];
-       while (p) {
-               if (p->devhandle == devhandle &&
-                   p->bus == bus &&
-                   p->device == device &&
-                   p->func == func)
-                       break;
-
-               p = p->next;
-       }
-
-       return p;
-}
-
 static inline int pci_sun4v_out_of_range(struct pci_pbm_info *pbm, unsigned int bus, unsigned int device, unsigned int func)
 {
        if (bus < pbm->pci_first_busno ||
            bus > pbm->pci_last_busno)
                return 1;
-       return pdev_find(pbm->devhandle, bus, device, func) == NULL;
+       return 0;
 }
 
 static int pci_sun4v_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
@@ -738,6 +612,9 @@ static int pci_sun4v_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
        unsigned int func = PCI_FUNC(devfn);
        unsigned long ret;
 
+       if (bus_dev == pbm->pci_bus && devfn == 0x00)
+               return pci_host_bridge_read_pci_cfg(bus_dev, devfn, where,
+                                                   size, value);
        if (pci_sun4v_out_of_range(pbm, bus, device, func)) {
                ret = ~0UL;
        } else {
@@ -776,6 +653,9 @@ static int pci_sun4v_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
        unsigned int func = PCI_FUNC(devfn);
        unsigned long ret;
 
+       if (bus_dev == pbm->pci_bus && devfn == 0x00)
+               return pci_host_bridge_write_pci_cfg(bus_dev, devfn, where,
+                                                    size, value);
        if (pci_sun4v_out_of_range(pbm, bus, device, func)) {
                /* Do nothing. */
        } else {
@@ -800,27 +680,7 @@ static struct pci_ops pci_sun4v_ops = {
 static void pbm_scan_bus(struct pci_controller_info *p,
                         struct pci_pbm_info *pbm)
 {
-       struct pcidev_cookie *cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
-
-       if (!cookie) {
-               prom_printf("%s: Critical allocation failure.\n", pbm->name);
-               prom_halt();
-       }
-
-       /* All we care about is the PBM. */
-       cookie->pbm = pbm;
-
-       pbm->pci_bus = pci_scan_bus(pbm->pci_first_busno, p->pci_ops, pbm);
-#if 0
-       pci_fixup_host_bridge_self(pbm->pci_bus);
-       pbm->pci_bus->self->sysdata = cookie;
-#endif
-       pci_fill_in_pbm_cookies(pbm->pci_bus, pbm, pbm->prom_node);
-       pci_record_assignments(pbm, pbm->pci_bus);
-       pci_assign_unassigned(pbm, pbm->pci_bus);
-       pci_fixup_irq(pbm, pbm->pci_bus);
-       pci_determine_66mhz_disposition(pbm, pbm->pci_bus);
-       pci_setup_busmastering(pbm, pbm->pci_bus);
+       pbm->pci_bus = pci_scan_one_pbm(pbm);
 }
 
 static void pci_sun4v_scan_bus(struct pci_controller_info *p)
@@ -844,130 +704,10 @@ static void pci_sun4v_scan_bus(struct pci_controller_info *p)
        /* XXX register error interrupt handlers XXX */
 }
 
-static void pci_sun4v_base_address_update(struct pci_dev *pdev, int resource)
-{
-       struct pcidev_cookie *pcp = pdev->sysdata;
-       struct pci_pbm_info *pbm = pcp->pbm;
-       struct resource *res, *root;
-       u32 reg;
-       int where, size, is_64bit;
-
-       res = &pdev->resource[resource];
-       if (resource < 6) {
-               where = PCI_BASE_ADDRESS_0 + (resource * 4);
-       } else if (resource == PCI_ROM_RESOURCE) {
-               where = pdev->rom_base_reg;
-       } else {
-               /* Somebody might have asked allocation of a non-standard resource */
-               return;
-       }
-
-       /* XXX 64-bit MEM handling is not %100 correct... XXX */
-       is_64bit = 0;
-       if (res->flags & IORESOURCE_IO)
-               root = &pbm->io_space;
-       else {
-               root = &pbm->mem_space;
-               if ((res->flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK)
-                   == PCI_BASE_ADDRESS_MEM_TYPE_64)
-                       is_64bit = 1;
-       }
-
-       size = res->end - res->start;
-       pci_read_config_dword(pdev, where, &reg);
-       reg = ((reg & size) |
-              (((u32)(res->start - root->start)) & ~size));
-       if (resource == PCI_ROM_RESOURCE) {
-               reg |= PCI_ROM_ADDRESS_ENABLE;
-               res->flags |= IORESOURCE_ROM_ENABLE;
-       }
-       pci_write_config_dword(pdev, where, reg);
-
-       /* This knows that the upper 32-bits of the address
-        * must be zero.  Our PCI common layer enforces this.
-        */
-       if (is_64bit)
-               pci_write_config_dword(pdev, where + 4, 0);
-}
-
-static void pci_sun4v_resource_adjust(struct pci_dev *pdev,
-                                     struct resource *res,
-                                     struct resource *root)
-{
-       res->start += root->start;
-       res->end += root->start;
-}
-
-/* Use ranges property to determine where PCI MEM, I/O, and Config
- * space are for this PCI bus module.
- */
-static void pci_sun4v_determine_mem_io_space(struct pci_pbm_info *pbm)
-{
-       int i, saw_mem, saw_io;
-
-       saw_mem = saw_io = 0;
-       for (i = 0; i < pbm->num_pbm_ranges; i++) {
-               struct linux_prom_pci_ranges *pr = &pbm->pbm_ranges[i];
-               unsigned long a;
-               int type;
-
-               type = (pr->child_phys_hi >> 24) & 0x3;
-               a = (((unsigned long)pr->parent_phys_hi << 32UL) |
-                    ((unsigned long)pr->parent_phys_lo  <<  0UL));
-
-               switch (type) {
-               case 1:
-                       /* 16-bit IO space, 16MB */
-                       pbm->io_space.start = a;
-                       pbm->io_space.end = a + ((16UL*1024UL*1024UL) - 1UL);
-                       pbm->io_space.flags = IORESOURCE_IO;
-                       saw_io = 1;
-                       break;
-
-               case 2:
-                       /* 32-bit MEM space, 2GB */
-                       pbm->mem_space.start = a;
-                       pbm->mem_space.end = a + (0x80000000UL - 1UL);
-                       pbm->mem_space.flags = IORESOURCE_MEM;
-                       saw_mem = 1;
-                       break;
-
-               case 3:
-                       /* XXX 64-bit MEM handling XXX */
-
-               default:
-                       break;
-               };
-       }
-
-       if (!saw_io || !saw_mem) {
-               prom_printf("%s: Fatal error, missing %s PBM range.\n",
-                           pbm->name,
-                           (!saw_io ? "IO" : "MEM"));
-               prom_halt();
-       }
-
-       printk("%s: PCI IO[%lx] MEM[%lx]\n",
-              pbm->name,
-              pbm->io_space.start,
-              pbm->mem_space.start);
-}
-
-static void pbm_register_toplevel_resources(struct pci_controller_info *p,
-                                           struct pci_pbm_info *pbm)
-{
-       pbm->io_space.name = pbm->mem_space.name = pbm->name;
-
-       request_resource(&ioport_resource, &pbm->io_space);
-       request_resource(&iomem_resource, &pbm->mem_space);
-       pci_register_legacy_regions(&pbm->io_space,
-                                   &pbm->mem_space);
-}
-
 static unsigned long probe_existing_entries(struct pci_pbm_info *pbm,
-                                           struct pci_iommu *iommu)
+                                           struct iommu *iommu)
 {
-       struct pci_iommu_arena *arena = &iommu->arena;
+       struct iommu_arena *arena = &iommu->arena;
        unsigned long i, cnt = 0;
        u32 devhandle;
 
@@ -994,7 +734,7 @@ static unsigned long probe_existing_entries(struct pci_pbm_info *pbm,
 
 static void pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
 {
-       struct pci_iommu *iommu = pbm->iommu;
+       struct iommu *iommu = pbm->iommu;
        struct property *prop;
        unsigned long num_tsb_entries, sz;
        u32 vdma[2], dma_mask, dma_offset;
@@ -1281,7 +1021,7 @@ h_error:
 
 static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
 {
-       u32 *val;
+       const u32 *val;
        int len;
 
        val = of_get_property(pbm->prom_node, "#msi-eqs", &len);
@@ -1289,16 +1029,16 @@ static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
                goto no_msi;
        pbm->msiq_num = *val;
        if (pbm->msiq_num) {
-               struct msiq_prop {
+               const struct msiq_prop {
                        u32 first_msiq;
                        u32 num_msiq;
                        u32 first_devino;
                } *mqp;
-               struct msi_range_prop {
+               const struct msi_range_prop {
                        u32 first_msi;
                        u32 num_msi;
                } *mrng;
-               struct addr_range_prop {
+               const struct addr_range_prop {
                        u32 msi32_high;
                        u32 msi32_low;
                        u32 msi32_len;
@@ -1410,8 +1150,7 @@ static int pci_sun4v_setup_msi_irq(unsigned int *virt_irq_p,
                                   struct pci_dev *pdev,
                                   struct msi_desc *entry)
 {
-       struct pcidev_cookie *pcp = pdev->sysdata;
-       struct pci_pbm_info *pbm = pcp->pbm;
+       struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
        unsigned long devino, msiqid;
        struct msi_msg msg;
        int msi_num, err;
@@ -1455,7 +1194,7 @@ static int pci_sun4v_setup_msi_irq(unsigned int *virt_irq_p,
        if (pci_sun4v_msi_setvalid(pbm->devhandle, msi_num, HV_MSIVALID_VALID))
                goto out_err;
 
-       pcp->msi_num = msi_num;
+       pdev->dev.archdata.msi_num = msi_num;
 
        if (entry->msi_attrib.is_64) {
                msg.address_hi = pbm->msi64_start >> 32;
@@ -1484,12 +1223,11 @@ out_err:
 static void pci_sun4v_teardown_msi_irq(unsigned int virt_irq,
                                       struct pci_dev *pdev)
 {
-       struct pcidev_cookie *pcp = pdev->sysdata;
-       struct pci_pbm_info *pbm = pcp->pbm;
+       struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
        unsigned long msiqid, err;
        unsigned int msi_num;
 
-       msi_num = pcp->msi_num;
+       msi_num = pdev->dev.archdata.msi_num;
        err = pci_sun4v_msi_getmsiq(pbm->devhandle, msi_num, &msiqid);
        if (err) {
                printk(KERN_ERR "%s: getmsiq gives error %lu\n",
@@ -1516,8 +1254,6 @@ static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
 static void pci_sun4v_pbm_init(struct pci_controller_info *p, struct device_node *dp, u32 devhandle)
 {
        struct pci_pbm_info *pbm;
-       struct property *prop;
-       int len, i;
 
        if (devhandle & 0x40)
                pbm = &p->pbm_B;
@@ -1526,7 +1262,6 @@ static void pci_sun4v_pbm_init(struct pci_controller_info *p, struct device_node
 
        pbm->parent = p;
        pbm->prom_node = dp;
-       pbm->pci_first_slot = 1;
 
        pbm->devhandle = devhandle;
 
@@ -1534,39 +1269,17 @@ static void pci_sun4v_pbm_init(struct pci_controller_info *p, struct device_node
 
        printk("%s: SUN4V PCI Bus Module\n", pbm->name);
 
-       prop = of_find_property(dp, "ranges", &len);
-       pbm->pbm_ranges = prop->value;
-       pbm->num_pbm_ranges =
-               (len / sizeof(struct linux_prom_pci_ranges));
-
-       /* Mask out the top 8 bits of the ranges, leaving the real
-        * physical address.
-        */
-       for (i = 0; i < pbm->num_pbm_ranges; i++)
-               pbm->pbm_ranges[i].parent_phys_hi &= 0x0fffffff;
-
-       pci_sun4v_determine_mem_io_space(pbm);
-       pbm_register_toplevel_resources(p, pbm);
-
-       prop = of_find_property(dp, "interrupt-map", &len);
-       pbm->pbm_intmap = prop->value;
-       pbm->num_pbm_intmap =
-               (len / sizeof(struct linux_prom_pci_intmap));
-
-       prop = of_find_property(dp, "interrupt-map-mask", NULL);
-       pbm->pbm_intmask = prop->value;
+       pci_determine_mem_io_space(pbm);
 
        pci_sun4v_get_bus_range(pbm);
        pci_sun4v_iommu_init(pbm);
        pci_sun4v_msi_init(pbm);
-
-       pdev_htab_populate(pbm);
 }
 
 void sun4v_pci_init(struct device_node *dp, char *model_name)
 {
        struct pci_controller_info *p;
-       struct pci_iommu *iommu;
+       struct iommu *iommu;
        struct property *prop;
        struct linux_prom64_registers *regs;
        u32 devhandle;
@@ -1606,13 +1319,13 @@ void sun4v_pci_init(struct device_node *dp, char *model_name)
        if (!p)
                goto fatal_memory_error;
 
-       iommu = kzalloc(sizeof(struct pci_iommu), GFP_ATOMIC);
+       iommu = kzalloc(sizeof(struct iommu), GFP_ATOMIC);
        if (!iommu)
                goto fatal_memory_error;
 
        p->pbm_A.iommu = iommu;
 
-       iommu = kzalloc(sizeof(struct pci_iommu), GFP_ATOMIC);
+       iommu = kzalloc(sizeof(struct iommu), GFP_ATOMIC);
        if (!iommu)
                goto fatal_memory_error;
 
@@ -1622,11 +1335,8 @@ void sun4v_pci_init(struct device_node *dp, char *model_name)
        pci_controller_root = p;
 
        p->index = pci_num_controllers++;
-       p->pbms_same_domain = 0;
 
        p->scan_bus = pci_sun4v_scan_bus;
-       p->base_address_update = pci_sun4v_base_address_update;
-       p->resource_adjust = pci_sun4v_resource_adjust;
 #ifdef CONFIG_PCI_MSI
        p->setup_msi_irq = pci_sun4v_setup_msi_irq;
        p->teardown_msi_irq = pci_sun4v_teardown_msi_irq;
index b291060..a114151 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/reboot.h>
 #include <linux/delay.h>
 #include <linux/compat.h>
+#include <linux/tick.h>
 #include <linux/init.h>
 
 #include <asm/oplib.h>
@@ -88,12 +89,14 @@ void cpu_idle(void)
        set_thread_flag(TIF_POLLING_NRFLAG);
 
        while(1) {
-               if (need_resched()) {
-                       preempt_enable_no_resched();
-                       schedule();
-                       preempt_disable();
-               }
-               sparc64_yield();
+               tick_nohz_stop_sched_tick();
+               while (!need_resched())
+                       sparc64_yield();
+               tick_nohz_restart_sched_tick();
+
+               preempt_enable_no_resched();
+               schedule();
+               preempt_disable();
        }
 }
 
index 0917c24..5e1fcd0 100644 (file)
@@ -36,12 +36,13 @@ static struct device_node *allnodes;
  */
 static DEFINE_RWLOCK(devtree_lock);
 
-int of_device_is_compatible(struct device_node *device, const char *compat)
+int of_device_is_compatible(const struct device_node *device,
+                           const char *compat)
 {
        const char* cp;
        int cplen, l;
 
-       cp = (char *) of_get_property(device, "compatible", &cplen);
+       cp = of_get_property(device, "compatible", &cplen);
        if (cp == NULL)
                return 0;
        while (cplen > 0) {
@@ -154,13 +155,14 @@ struct device_node *of_find_compatible_node(struct device_node *from,
 }
 EXPORT_SYMBOL(of_find_compatible_node);
 
-struct property *of_find_property(struct device_node *np, const char *name,
+struct property *of_find_property(const struct device_node *np,
+                                 const char *name,
                                  int *lenp)
 {
        struct property *pp;
 
        for (pp = np->properties; pp != 0; pp = pp->next) {
-               if (strcmp(pp->name, name) == 0) {
+               if (strcasecmp(pp->name, name) == 0) {
                        if (lenp != 0)
                                *lenp = pp->length;
                        break;
@@ -174,7 +176,8 @@ EXPORT_SYMBOL(of_find_property);
  * Find a property with a given name for a given node
  * and return the value.
  */
-void *of_get_property(struct device_node *np, const char *name, int *lenp)
+const void *of_get_property(const struct device_node *np, const char *name,
+                     int *lenp)
 {
        struct property *pp = of_find_property(np,name,lenp);
        return pp ? pp->value : NULL;
@@ -196,7 +199,7 @@ EXPORT_SYMBOL(of_getintprop_default);
 
 int of_n_addr_cells(struct device_node *np)
 {
-       int* ip;
+       const int* ip;
        do {
                if (np->parent)
                        np = np->parent;
@@ -211,7 +214,7 @@ EXPORT_SYMBOL(of_n_addr_cells);
 
 int of_n_size_cells(struct device_node *np)
 {
-       int* ip;
+       const int* ip;
        do {
                if (np->parent)
                        np = np->parent;
@@ -243,7 +246,7 @@ int of_set_property(struct device_node *dp, const char *name, void *val, int len
        while (*prevp) {
                struct property *prop = *prevp;
 
-               if (!strcmp(prop->name, name)) {
+               if (!strcasecmp(prop->name, name)) {
                        void *old_val = prop->value;
                        int ret;
 
@@ -397,7 +400,7 @@ static unsigned int psycho_irq_build(struct device_node *dp,
 
 static void psycho_irq_trans_init(struct device_node *dp)
 {
-       struct linux_prom64_registers *regs;
+       const struct linux_prom64_registers *regs;
 
        dp->irq_trans = prom_early_alloc(sizeof(struct of_irq_controller));
        dp->irq_trans->irq_build = psycho_irq_build;
@@ -547,7 +550,7 @@ static unsigned long __sabre_onboard_imap_off[] = {
 static int sabre_device_needs_wsync(struct device_node *dp)
 {
        struct device_node *parent = dp->parent;
-       char *parent_model, *parent_compat;
+       const char *parent_model, *parent_compat;
 
        /* This traversal up towards the root is meant to
         * handle two cases:
@@ -589,7 +592,7 @@ static unsigned int sabre_irq_build(struct device_node *dp,
 {
        struct sabre_irq_data *irq_data = _data;
        unsigned long controller_regs = irq_data->controller_regs;
-       struct linux_prom_pci_registers *regs;
+       const struct linux_prom_pci_registers *regs;
        unsigned long imap, iclr;
        unsigned long imap_off, iclr_off;
        int inofixup = 0;
@@ -639,9 +642,9 @@ static unsigned int sabre_irq_build(struct device_node *dp,
 
 static void sabre_irq_trans_init(struct device_node *dp)
 {
-       struct linux_prom64_registers *regs;
+       const struct linux_prom64_registers *regs;
        struct sabre_irq_data *irq_data;
-       u32 *busrange;
+       const u32 *busrange;
 
        dp->irq_trans = prom_early_alloc(sizeof(struct of_irq_controller));
        dp->irq_trans->irq_build = sabre_irq_build;
@@ -795,7 +798,7 @@ static unsigned int schizo_irq_build(struct device_node *dp,
 
 static void __schizo_irq_trans_init(struct device_node *dp, int is_tomatillo)
 {
-       struct linux_prom64_registers *regs;
+       const struct linux_prom64_registers *regs;
        struct schizo_irq_data *irq_data;
 
        dp->irq_trans = prom_early_alloc(sizeof(struct of_irq_controller));
@@ -836,7 +839,7 @@ static unsigned int pci_sun4v_irq_build(struct device_node *dp,
 
 static void pci_sun4v_irq_trans_init(struct device_node *dp)
 {
-       struct linux_prom64_registers *regs;
+       const struct linux_prom64_registers *regs;
 
        dp->irq_trans = prom_early_alloc(sizeof(struct of_irq_controller));
        dp->irq_trans->irq_build = pci_sun4v_irq_build;
@@ -940,7 +943,7 @@ static unsigned int sbus_of_build_irq(struct device_node *dp,
                                      void *_data)
 {
        unsigned long reg_base = (unsigned long) _data;
-       struct linux_prom_registers *regs;
+       const struct linux_prom_registers *regs;
        unsigned long imap, iclr;
        int sbus_slot = 0;
        int sbus_level = 0;
@@ -994,7 +997,7 @@ static unsigned int sbus_of_build_irq(struct device_node *dp,
 
 static void sbus_irq_trans_init(struct device_node *dp)
 {
-       struct linux_prom64_registers *regs;
+       const struct linux_prom64_registers *regs;
 
        dp->irq_trans = prom_early_alloc(sizeof(struct of_irq_controller));
        dp->irq_trans->irq_build = sbus_of_build_irq;
@@ -1080,7 +1083,7 @@ static unsigned int sun4v_vdev_irq_build(struct device_node *dp,
 
 static void sun4v_vdev_irq_trans_init(struct device_node *dp)
 {
-       struct linux_prom64_registers *regs;
+       const struct linux_prom64_registers *regs;
 
        dp->irq_trans = prom_early_alloc(sizeof(struct of_irq_controller));
        dp->irq_trans->irq_build = sun4v_vdev_irq_build;
index 14f78fb..3b05428 100644 (file)
 
 #define MAP_BASE       ((u32)0xc0000000)
 
-struct sbus_iommu_arena {
-       unsigned long   *map;
-       unsigned int    hint;
-       unsigned int    limit;
-};
-
-struct sbus_iommu {
-       spinlock_t              lock;
-
-       struct sbus_iommu_arena arena;
-
-       iopte_t                 *page_table;
-       unsigned long           strbuf_regs;
-       unsigned long           iommu_regs;
-       unsigned long           sbus_control_reg;
-
-       volatile unsigned long  strbuf_flushflag;
+struct sbus_info {
+       struct iommu    iommu;
+       struct strbuf   strbuf;
 };
 
 /* Offsets from iommu_regs */
@@ -58,16 +44,17 @@ struct sbus_iommu {
 
 #define IOMMU_DRAM_VALID       (1UL << 30UL)
 
-static void __iommu_flushall(struct sbus_iommu *iommu)
+static void __iommu_flushall(struct iommu *iommu)
 {
-       unsigned long tag = iommu->iommu_regs + IOMMU_TAGDIAG;
+       unsigned long tag;
        int entry;
 
+       tag = iommu->iommu_control + (IOMMU_TAGDIAG - IOMMU_CONTROL);
        for (entry = 0; entry < 16; entry++) {
                upa_writeq(0, tag);
                tag += 8UL;
        }
-       upa_readq(iommu->sbus_control_reg);
+       upa_readq(iommu->write_complete_reg);
 }
 
 /* Offsets from strbuf_regs */
@@ -82,15 +69,14 @@ static void __iommu_flushall(struct sbus_iommu *iommu)
 
 #define STRBUF_TAG_VALID       0x02UL
 
-static void sbus_strbuf_flush(struct sbus_iommu *iommu, u32 base, unsigned long npages, int direction)
+static void sbus_strbuf_flush(struct iommu *iommu, struct strbuf *strbuf, u32 base, unsigned long npages, int direction)
 {
        unsigned long n;
        int limit;
 
        n = npages;
        while (n--)
-               upa_writeq(base + (n << IO_PAGE_SHIFT),
-                          iommu->strbuf_regs + STRBUF_PFLUSH);
+               upa_writeq(base + (n << IO_PAGE_SHIFT), strbuf->strbuf_pflush);
 
        /* If the device could not have possibly put dirty data into
         * the streaming cache, no flush-flag synchronization needs
@@ -99,15 +85,14 @@ static void sbus_strbuf_flush(struct sbus_iommu *iommu, u32 base, unsigned long
        if (direction == SBUS_DMA_TODEVICE)
                return;
 
-       iommu->strbuf_flushflag = 0UL;
+       *(strbuf->strbuf_flushflag) = 0UL;
 
        /* Whoopee cushion! */
-       upa_writeq(__pa(&iommu->strbuf_flushflag),
-                  iommu->strbuf_regs + STRBUF_FSYNC);
-       upa_readq(iommu->sbus_control_reg);
+       upa_writeq(strbuf->strbuf_flushflag_pa, strbuf->strbuf_fsync);
+       upa_readq(iommu->write_complete_reg);
 
        limit = 100000;
-       while (iommu->strbuf_flushflag == 0UL) {
+       while (*(strbuf->strbuf_flushflag) == 0UL) {
                limit--;
                if (!limit)
                        break;
@@ -121,9 +106,9 @@ static void sbus_strbuf_flush(struct sbus_iommu *iommu, u32 base, unsigned long
 }
 
 /* Based largely upon the ppc64 iommu allocator.  */
-static long sbus_arena_alloc(struct sbus_iommu *iommu, unsigned long npages)
+static long sbus_arena_alloc(struct iommu *iommu, unsigned long npages)
 {
-       struct sbus_iommu_arena *arena = &iommu->arena;
+       struct iommu_arena *arena = &iommu->arena;
        unsigned long n, i, start, end, limit;
        int pass;
 
@@ -162,7 +147,7 @@ again:
        return n;
 }
 
-static void sbus_arena_free(struct sbus_iommu_arena *arena, unsigned long base, unsigned long npages)
+static void sbus_arena_free(struct iommu_arena *arena, unsigned long base, unsigned long npages)
 {
        unsigned long i;
 
@@ -170,7 +155,7 @@ static void sbus_arena_free(struct sbus_iommu_arena *arena, unsigned long base,
                __clear_bit(i, arena->map);
 }
 
-static void sbus_iommu_table_init(struct sbus_iommu *iommu, unsigned int tsbsize)
+static void sbus_iommu_table_init(struct iommu *iommu, unsigned int tsbsize)
 {
        unsigned long tsbbase, order, sz, num_tsb_entries;
 
@@ -178,13 +163,14 @@ static void sbus_iommu_table_init(struct sbus_iommu *iommu, unsigned int tsbsize
 
        /* Setup initial software IOMMU state. */
        spin_lock_init(&iommu->lock);
+       iommu->page_table_map_base = MAP_BASE;
 
        /* Allocate and initialize the free area map.  */
        sz = num_tsb_entries / 8;
        sz = (sz + 7UL) & ~7UL;
        iommu->arena.map = kzalloc(sz, GFP_KERNEL);
        if (!iommu->arena.map) {
-               prom_printf("PCI_IOMMU: Error, kmalloc(arena.map) failed.\n");
+               prom_printf("SBUS_IOMMU: Error, kmalloc(arena.map) failed.\n");
                prom_halt();
        }
        iommu->arena.limit = num_tsb_entries;
@@ -200,7 +186,7 @@ static void sbus_iommu_table_init(struct sbus_iommu *iommu, unsigned int tsbsize
        memset(iommu->page_table, 0, tsbsize);
 }
 
-static inline iopte_t *alloc_npages(struct sbus_iommu *iommu, unsigned long npages)
+static inline iopte_t *alloc_npages(struct iommu *iommu, unsigned long npages)
 {
        long entry;
 
@@ -211,14 +197,15 @@ static inline iopte_t *alloc_npages(struct sbus_iommu *iommu, unsigned long npag
        return iommu->page_table + entry;
 }
 
-static inline void free_npages(struct sbus_iommu *iommu, dma_addr_t base, unsigned long npages)
+static inline void free_npages(struct iommu *iommu, dma_addr_t base, unsigned long npages)
 {
        sbus_arena_free(&iommu->arena, base >> IO_PAGE_SHIFT, npages);
 }
 
 void *sbus_alloc_consistent(struct sbus_dev *sdev, size_t size, dma_addr_t *dvma_addr)
 {
-       struct sbus_iommu *iommu;
+       struct sbus_info *info;
+       struct iommu *iommu;
        iopte_t *iopte;
        unsigned long flags, order, first_page;
        void *ret;
@@ -234,7 +221,8 @@ void *sbus_alloc_consistent(struct sbus_dev *sdev, size_t size, dma_addr_t *dvma
                return NULL;
        memset((char *)first_page, 0, PAGE_SIZE << order);
 
-       iommu = sdev->bus->iommu;
+       info = sdev->bus->iommu;
+       iommu = &info->iommu;
 
        spin_lock_irqsave(&iommu->lock, flags);
        iopte = alloc_npages(iommu, size >> IO_PAGE_SHIFT);
@@ -245,7 +233,7 @@ void *sbus_alloc_consistent(struct sbus_dev *sdev, size_t size, dma_addr_t *dvma
                return NULL;
        }
 
-       *dvma_addr = (MAP_BASE +
+       *dvma_addr = (iommu->page_table_map_base +
                      ((iopte - iommu->page_table) << IO_PAGE_SHIFT));
        ret = (void *) first_page;
        npages = size >> IO_PAGE_SHIFT;
@@ -263,18 +251,20 @@ void *sbus_alloc_consistent(struct sbus_dev *sdev, size_t size, dma_addr_t *dvma
 
 void sbus_free_consistent(struct sbus_dev *sdev, size_t size, void *cpu, dma_addr_t dvma)
 {
-       struct sbus_iommu *iommu;
+       struct sbus_info *info;
+       struct iommu *iommu;
        iopte_t *iopte;
        unsigned long flags, order, npages;
 
        npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
-       iommu = sdev->bus->iommu;
+       info = sdev->bus->iommu;
+       iommu = &info->iommu;
        iopte = iommu->page_table +
-               ((dvma - MAP_BASE) >> IO_PAGE_SHIFT);
+               ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
 
        spin_lock_irqsave(&iommu->lock, flags);
 
-       free_npages(iommu, dvma - MAP_BASE, npages);
+       free_npages(iommu, dvma - iommu->page_table_map_base, npages);
 
        spin_unlock_irqrestore(&iommu->lock, flags);
 
@@ -285,14 +275,16 @@ void sbus_free_consistent(struct sbus_dev *sdev, size_t size, void *cpu, dma_add
 
 dma_addr_t sbus_map_single(struct sbus_dev *sdev, void *ptr, size_t sz, int direction)
 {
-       struct sbus_iommu *iommu;
+       struct sbus_info *info;
+       struct iommu *iommu;
        iopte_t *base;
        unsigned long flags, npages, oaddr;
        unsigned long i, base_paddr;
        u32 bus_addr, ret;
        unsigned long iopte_protection;
 
-       iommu = sdev->bus->iommu;
+       info = sdev->bus->iommu;
+       iommu = &info->iommu;
 
        if (unlikely(direction == SBUS_DMA_NONE))
                BUG();
@@ -308,7 +300,7 @@ dma_addr_t sbus_map_single(struct sbus_dev *sdev, void *ptr, size_t sz, int dire
        if (unlikely(!base))
                BUG();
 
-       bus_addr = (MAP_BASE +
+       bus_addr = (iommu->page_table_map_base +
                    ((base - iommu->page_table) << IO_PAGE_SHIFT));
        ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
        base_paddr = __pa(oaddr & IO_PAGE_MASK);
@@ -325,7 +317,9 @@ dma_addr_t sbus_map_single(struct sbus_dev *sdev, void *ptr, size_t sz, int dire
 
 void sbus_unmap_single(struct sbus_dev *sdev, dma_addr_t bus_addr, size_t sz, int direction)
 {
-       struct sbus_iommu *iommu = sdev->bus->iommu;
+       struct sbus_info *info = sdev->bus->iommu;
+       struct iommu *iommu = &info->iommu;
+       struct strbuf *strbuf = &info->strbuf;
        iopte_t *base;
        unsigned long flags, npages, i;
 
@@ -335,15 +329,15 @@ void sbus_unmap_single(struct sbus_dev *sdev, dma_addr_t bus_addr, size_t sz, in
        npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
        npages >>= IO_PAGE_SHIFT;
        base = iommu->page_table +
-               ((bus_addr - MAP_BASE) >> IO_PAGE_SHIFT);
+               ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
 
        bus_addr &= IO_PAGE_MASK;
 
        spin_lock_irqsave(&iommu->lock, flags);
-       sbus_strbuf_flush(iommu, bus_addr, npages, direction);
+       sbus_strbuf_flush(iommu, strbuf, bus_addr, npages, direction);
        for (i = 0; i < npages; i++)
                iopte_val(base[i]) = 0UL;
-       free_npages(iommu, bus_addr - MAP_BASE, npages);
+       free_npages(iommu, bus_addr - iommu->page_table_map_base, npages);
        spin_unlock_irqrestore(&iommu->lock, flags);
 }
 
@@ -425,7 +419,8 @@ static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg,
 
 int sbus_map_sg(struct sbus_dev *sdev, struct scatterlist *sglist, int nelems, int direction)
 {
-       struct sbus_iommu *iommu;
+       struct sbus_info *info;
+       struct iommu *iommu;
        unsigned long flags, npages, iopte_protection;
        iopte_t *base;
        u32 dma_base;
@@ -442,7 +437,8 @@ int sbus_map_sg(struct sbus_dev *sdev, struct scatterlist *sglist, int nelems, i
                return 1;
        }
 
-       iommu = sdev->bus->iommu;
+       info = sdev->bus->iommu;
+       iommu = &info->iommu;
 
        if (unlikely(direction == SBUS_DMA_NONE))
                BUG();
@@ -456,7 +452,7 @@ int sbus_map_sg(struct sbus_dev *sdev, struct scatterlist *sglist, int nelems, i
        if (unlikely(base == NULL))
                BUG();
 
-       dma_base = MAP_BASE +
+       dma_base = iommu->page_table_map_base +
                ((base - iommu->page_table) << IO_PAGE_SHIFT);
 
        /* Normalize DVMA addresses. */
@@ -485,7 +481,9 @@ int sbus_map_sg(struct sbus_dev *sdev, struct scatterlist *sglist, int nelems, i
 
 void sbus_unmap_sg(struct sbus_dev *sdev, struct scatterlist *sglist, int nelems, int direction)
 {
-       struct sbus_iommu *iommu;
+       struct sbus_info *info;
+       struct iommu *iommu;
+       struct strbuf *strbuf;
        iopte_t *base;
        unsigned long flags, i, npages;
        u32 bus_addr;
@@ -493,7 +491,9 @@ void sbus_unmap_sg(struct sbus_dev *sdev, struct scatterlist *sglist, int nelems
        if (unlikely(direction == SBUS_DMA_NONE))
                BUG();
 
-       iommu = sdev->bus->iommu;
+       info = sdev->bus->iommu;
+       iommu = &info->iommu;
+       strbuf = &info->strbuf;
 
        bus_addr = sglist->dma_address & IO_PAGE_MASK;
 
@@ -505,29 +505,33 @@ void sbus_unmap_sg(struct sbus_dev *sdev, struct scatterlist *sglist, int nelems
                  bus_addr) >> IO_PAGE_SHIFT;
 
        base = iommu->page_table +
-               ((bus_addr - MAP_BASE) >> IO_PAGE_SHIFT);
+               ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
 
        spin_lock_irqsave(&iommu->lock, flags);
-       sbus_strbuf_flush(iommu, bus_addr, npages, direction);
+       sbus_strbuf_flush(iommu, strbuf, bus_addr, npages, direction);
        for (i = 0; i < npages; i++)
                iopte_val(base[i]) = 0UL;
-       free_npages(iommu, bus_addr - MAP_BASE, npages);
+       free_npages(iommu, bus_addr - iommu->page_table_map_base, npages);
        spin_unlock_irqrestore(&iommu->lock, flags);
 }
 
 void sbus_dma_sync_single_for_cpu(struct sbus_dev *sdev, dma_addr_t bus_addr, size_t sz, int direction)
 {
-       struct sbus_iommu *iommu;
+       struct sbus_info *info;
+       struct iommu *iommu;
+       struct strbuf *strbuf;
        unsigned long flags, npages;
 
-       iommu = sdev->bus->iommu;
+       info = sdev->bus->iommu;
+       iommu = &info->iommu;
+       strbuf = &info->strbuf;
 
        npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
        npages >>= IO_PAGE_SHIFT;
        bus_addr &= IO_PAGE_MASK;
 
        spin_lock_irqsave(&iommu->lock, flags);
-       sbus_strbuf_flush(iommu, bus_addr, npages, direction);
+       sbus_strbuf_flush(iommu, strbuf, bus_addr, npages, direction);
        spin_unlock_irqrestore(&iommu->lock, flags);
 }
 
@@ -537,11 +541,15 @@ void sbus_dma_sync_single_for_device(struct sbus_dev *sdev, dma_addr_t base, siz
 
 void sbus_dma_sync_sg_for_cpu(struct sbus_dev *sdev, struct scatterlist *sglist, int nelems, int direction)
 {
-       struct sbus_iommu *iommu;
+       struct sbus_info *info;
+       struct iommu *iommu;
+       struct strbuf *strbuf;
        unsigned long flags, npages, i;
        u32 bus_addr;
 
-       iommu = sdev->bus->iommu;
+       info = sdev->bus->iommu;
+       iommu = &info->iommu;
+       strbuf = &info->strbuf;
 
        bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
        for (i = 0; i < nelems; i++) {
@@ -553,7 +561,7 @@ void sbus_dma_sync_sg_for_cpu(struct sbus_dev *sdev, struct scatterlist *sglist,
                  - bus_addr) >> IO_PAGE_SHIFT;
 
        spin_lock_irqsave(&iommu->lock, flags);
-       sbus_strbuf_flush(iommu, bus_addr, npages, direction);
+       sbus_strbuf_flush(iommu, strbuf, bus_addr, npages, direction);
        spin_unlock_irqrestore(&iommu->lock, flags);
 }
 
@@ -564,12 +572,13 @@ void sbus_dma_sync_sg_for_device(struct sbus_dev *sdev, struct scatterlist *sg,
 /* Enable 64-bit DVMA mode for the given device. */
 void sbus_set_sbus64(struct sbus_dev *sdev, int bursts)
 {
-       struct sbus_iommu *iommu = sdev->bus->iommu;
+       struct sbus_info *info = sdev->bus->iommu;
+       struct iommu *iommu = &info->iommu;
        int slot = sdev->slot;
        unsigned long cfg_reg;
        u64 val;
 
-       cfg_reg = iommu->sbus_control_reg;
+       cfg_reg = iommu->write_complete_reg;
        switch (slot) {
        case 0:
                cfg_reg += 0x20UL;
@@ -704,8 +713,9 @@ static unsigned long sysio_imap_to_iclr(unsigned long imap)
 unsigned int sbus_build_irq(void *buscookie, unsigned int ino)
 {
        struct sbus_bus *sbus = (struct sbus_bus *)buscookie;
-       struct sbus_iommu *iommu = sbus->iommu;
-       unsigned long reg_base = iommu->sbus_control_reg - 0x2000UL;
+       struct sbus_info *info = sbus->iommu;
+       struct iommu *iommu = &info->iommu;
+       unsigned long reg_base = iommu->write_complete_reg - 0x2000UL;
        unsigned long imap, iclr;
        int sbus_level = 0;
 
@@ -766,8 +776,9 @@ unsigned int sbus_build_irq(void *buscookie, unsigned int ino)
 static irqreturn_t sysio_ue_handler(int irq, void *dev_id)
 {
        struct sbus_bus *sbus = dev_id;
-       struct sbus_iommu *iommu = sbus->iommu;
-       unsigned long reg_base = iommu->sbus_control_reg - 0x2000UL;
+       struct sbus_info *info = sbus->iommu;
+       struct iommu *iommu = &info->iommu;
+       unsigned long reg_base = iommu->write_complete_reg - 0x2000UL;
        unsigned long afsr_reg, afar_reg;
        unsigned long afsr, afar, error_bits;
        int reported;
@@ -838,8 +849,9 @@ static irqreturn_t sysio_ue_handler(int irq, void *dev_id)
 static irqreturn_t sysio_ce_handler(int irq, void *dev_id)
 {
        struct sbus_bus *sbus = dev_id;
-       struct sbus_iommu *iommu = sbus->iommu;
-       unsigned long reg_base = iommu->sbus_control_reg - 0x2000UL;
+       struct sbus_info *info = sbus->iommu;
+       struct iommu *iommu = &info->iommu;
+       unsigned long reg_base = iommu->write_complete_reg - 0x2000UL;
        unsigned long afsr_reg, afar_reg;
        unsigned long afsr, afar, error_bits;
        int reported;
@@ -915,12 +927,13 @@ static irqreturn_t sysio_ce_handler(int irq, void *dev_id)
 static irqreturn_t sysio_sbus_error_handler(int irq, void *dev_id)
 {
        struct sbus_bus *sbus = dev_id;
-       struct sbus_iommu *iommu = sbus->iommu;
+       struct sbus_info *info = sbus->iommu;
+       struct iommu *iommu = &info->iommu;
        unsigned long afsr_reg, afar_reg, reg_base;
        unsigned long afsr, afar, error_bits;
        int reported;
 
-       reg_base = iommu->sbus_control_reg - 0x2000UL;
+       reg_base = iommu->write_complete_reg - 0x2000UL;
        afsr_reg = reg_base + SYSIO_SBUS_AFSR;
        afar_reg = reg_base + SYSIO_SBUS_AFAR;
 
@@ -982,8 +995,9 @@ static irqreturn_t sysio_sbus_error_handler(int irq, void *dev_id)
 
 static void __init sysio_register_error_handlers(struct sbus_bus *sbus)
 {
-       struct sbus_iommu *iommu = sbus->iommu;
-       unsigned long reg_base = iommu->sbus_control_reg - 0x2000UL;
+       struct sbus_info *info = sbus->iommu;
+       struct iommu *iommu = &info->iommu;
+       unsigned long reg_base = iommu->write_complete_reg - 0x2000UL;
        unsigned int irq;
        u64 control;
 
@@ -1017,18 +1031,20 @@ static void __init sysio_register_error_handlers(struct sbus_bus *sbus)
                    SYSIO_ECNTRL_CEEN),
                   reg_base + ECC_CONTROL);
 
-       control = upa_readq(iommu->sbus_control_reg);
+       control = upa_readq(iommu->write_complete_reg);
        control |= 0x100UL; /* SBUS Error Interrupt Enable */
-       upa_writeq(control, iommu->sbus_control_reg);
+       upa_writeq(control, iommu->write_complete_reg);
 }
 
 /* Boot time initialization. */
 static void __init sbus_iommu_init(int __node, struct sbus_bus *sbus)
 {
-       struct linux_prom64_registers *pr;
+       const struct linux_prom64_registers *pr;
        struct device_node *dp;
-       struct sbus_iommu *iommu;
-       unsigned long regs;
+       struct sbus_info *info;
+       struct iommu *iommu;
+       struct strbuf *strbuf;
+       unsigned long regs, reg_base;
        u64 control;
        int i;
 
@@ -1043,33 +1059,42 @@ static void __init sbus_iommu_init(int __node, struct sbus_bus *sbus)
        }
        regs = pr->phys_addr;
 
-       iommu = kmalloc(sizeof(*iommu) + SMP_CACHE_BYTES, GFP_ATOMIC);
-       if (iommu == NULL) {
-               prom_printf("sbus_iommu_init: Fatal error, kmalloc(iommu) failed\n");
+       info = kzalloc(sizeof(*info), GFP_ATOMIC);
+       if (info == NULL) {
+               prom_printf("sbus_iommu_init: Fatal error, "
+                           "kmalloc(info) failed\n");
                prom_halt();
        }
 
-       /* Align on E$ line boundary. */
-       iommu = (struct sbus_iommu *)
-               (((unsigned long)iommu + (SMP_CACHE_BYTES - 1UL)) &
-                ~(SMP_CACHE_BYTES - 1UL));
+       iommu = &info->iommu;
+       strbuf = &info->strbuf;
 
-       memset(iommu, 0, sizeof(*iommu));
+       reg_base = regs + SYSIO_IOMMUREG_BASE;
+       iommu->iommu_control = reg_base + IOMMU_CONTROL;
+       iommu->iommu_tsbbase = reg_base + IOMMU_TSBBASE;
+       iommu->iommu_flush = reg_base + IOMMU_FLUSH;
 
-       /* Setup spinlock. */
-       spin_lock_init(&iommu->lock);
+       reg_base = regs + SYSIO_STRBUFREG_BASE;
+       strbuf->strbuf_control = reg_base + STRBUF_CONTROL;
+       strbuf->strbuf_pflush = reg_base + STRBUF_PFLUSH;
+       strbuf->strbuf_fsync = reg_base + STRBUF_FSYNC;
 
-       /* Init register offsets. */
-       iommu->iommu_regs = regs + SYSIO_IOMMUREG_BASE;
-       iommu->strbuf_regs = regs + SYSIO_STRBUFREG_BASE;
+       strbuf->strbuf_enabled = 1;
+
+       strbuf->strbuf_flushflag = (volatile unsigned long *)
+               ((((unsigned long)&strbuf->__flushflag_buf[0])
+                 + 63UL)
+                & ~63UL);
+       strbuf->strbuf_flushflag_pa = (unsigned long)
+               __pa(strbuf->strbuf_flushflag);
 
        /* The SYSIO SBUS control register is used for dummy reads
         * in order to ensure write completion.
         */
-       iommu->sbus_control_reg = regs + 0x2000UL;
+       iommu->write_complete_reg = regs + 0x2000UL;
 
        /* Link into SYSIO software state. */
-       sbus->iommu = iommu;
+       sbus->iommu = info;
 
        printk("SYSIO: UPA portID %x, at %016lx\n",
               sbus->portid, regs);
@@ -1077,40 +1102,44 @@ static void __init sbus_iommu_init(int __node, struct sbus_bus *sbus)
        /* Setup for TSB_SIZE=7, TBW_SIZE=0, MMU_DE=1, MMU_EN=1 */
        sbus_iommu_table_init(iommu, IO_TSB_SIZE);
 
-       control = upa_readq(iommu->iommu_regs + IOMMU_CONTROL);
+       control = upa_readq(iommu->iommu_control);
        control = ((7UL << 16UL)        |
                   (0UL << 2UL)         |
                   (1UL << 1UL)         |
                   (1UL << 0UL));
-       upa_writeq(control, iommu->iommu_regs + IOMMU_CONTROL);
+       upa_writeq(control, iommu->iommu_control);
 
        /* Clean out any cruft in the IOMMU using
         * diagnostic accesses.
         */
        for (i = 0; i < 16; i++) {
-               unsigned long dram = iommu->iommu_regs + IOMMU_DRAMDIAG;
-               unsigned long tag = iommu->iommu_regs + IOMMU_TAGDIAG;
+               unsigned long dram, tag;
+
+               dram = iommu->iommu_control + (IOMMU_DRAMDIAG - IOMMU_CONTROL);
+               tag = iommu->iommu_control + (IOMMU_TAGDIAG - IOMMU_CONTROL);
 
                dram += (unsigned long)i * 8UL;
                tag += (unsigned long)i * 8UL;
                upa_writeq(0, dram);
                upa_writeq(0, tag);
        }
-       upa_readq(iommu->sbus_control_reg);
+       upa_readq(iommu->write_complete_reg);
 
        /* Give the TSB to SYSIO. */
-       upa_writeq(__pa(iommu->page_table), iommu->iommu_regs + IOMMU_TSBBASE);
+       upa_writeq(__pa(iommu->page_table), iommu->iommu_tsbbase);
 
        /* Setup streaming buffer, DE=1 SB_EN=1 */
        control = (1UL << 1UL) | (1UL << 0UL);
-       upa_writeq(control, iommu->strbuf_regs + STRBUF_CONTROL);
+       upa_writeq(control, strbuf->strbuf_control);
 
        /* Clear out the tags using diagnostics. */
        for (i = 0; i < 16; i++) {
                unsigned long ptag, ltag;
 
-               ptag = iommu->strbuf_regs + STRBUF_PTAGDIAG;
-               ltag = iommu->strbuf_regs + STRBUF_LTAGDIAG;
+               ptag = strbuf->strbuf_control +
+                       (STRBUF_PTAGDIAG - STRBUF_CONTROL);
+               ltag = strbuf->strbuf_control +
+                       (STRBUF_LTAGDIAG - STRBUF_CONTROL);
                ptag += (unsigned long)i * 8UL;
                ltag += (unsigned long)i * 8UL;
 
@@ -1119,9 +1148,9 @@ static void __init sbus_iommu_init(int __node, struct sbus_bus *sbus)
        }
 
        /* Enable DVMA arbitration for all devices/slots. */
-       control = upa_readq(iommu->sbus_control_reg);
+       control = upa_readq(iommu->write_complete_reg);
        control |= 0x3fUL;
-       upa_writeq(control, iommu->sbus_control_reg);
+       upa_writeq(control, iommu->write_complete_reg);
 
        /* Now some Xfire specific grot... */
        if (this_is_starfire)
@@ -1133,7 +1162,7 @@ static void __init sbus_iommu_init(int __node, struct sbus_bus *sbus)
 void sbus_fill_device_irq(struct sbus_dev *sdev)
 {
        struct device_node *dp = of_find_node_by_phandle(sdev->prom_node);
-       struct linux_prom_irqs *irqs;
+       const struct linux_prom_irqs *irqs;
 
        irqs = of_get_property(dp, "interrupts", NULL);
        if (!irqs) {
index fc99f7b..d4f0a70 100644 (file)
@@ -45,7 +45,7 @@
 extern void calibrate_delay(void);
 
 /* Please don't make this stuff initdata!!!  --DaveM */
-static unsigned char boot_cpu_id;
+unsigned char boot_cpu_id;
 
 cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE;
 cpumask_t phys_cpu_present_map __read_mostly = CPU_MASK_NONE;
@@ -81,8 +81,6 @@ void __init smp_store_cpu_info(int id)
        struct device_node *dp;
        int def;
 
-       /* multiplier and counter set by
-          smp_setup_percpu_timer()  */
        cpu_data(id).udelay_val                 = loops_per_jiffy;
 
        cpu_find_by_mid(id, &dp);
@@ -125,7 +123,7 @@ void __init smp_store_cpu_info(int id)
               cpu_data(id).ecache_size, cpu_data(id).ecache_line_size);
 }
 
-static void smp_setup_percpu_timer(void);
+extern void setup_sparc64_timer(void);
 
 static volatile unsigned long callin_flag = 0;
 
@@ -140,7 +138,7 @@ void __init smp_callin(void)
 
        __flush_tlb_all();
 
-       smp_setup_percpu_timer();
+       setup_sparc64_timer();
 
        if (cheetah_pcache_forced_on)
                cheetah_enable_pcache();
@@ -177,8 +175,6 @@ void cpu_panic(void)
        panic("SMP bolixed\n");
 }
 
-static unsigned long current_tick_offset __read_mostly;
-
 /* This tick register synchronization scheme is taken entirely from
  * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit.
  *
@@ -261,7 +257,7 @@ void smp_synchronize_tick_client(void)
                                } else
                                        adj = -delta;
 
-                               tick_ops->add_tick(adj, current_tick_offset);
+                               tick_ops->add_tick(adj);
                        }
 #if DEBUG_TICK_SYNC
                        t[i].rt = rt;
@@ -1180,117 +1176,15 @@ void smp_penguin_jailcell(int irq, struct pt_regs *regs)
        preempt_enable();
 }
 
-#define prof_multiplier(__cpu)         cpu_data(__cpu).multiplier
-#define prof_counter(__cpu)            cpu_data(__cpu).counter
-
-void smp_percpu_timer_interrupt(struct pt_regs *regs)
-{
-       unsigned long compare, tick, pstate;
-       int cpu = smp_processor_id();
-       int user = user_mode(regs);
-       struct pt_regs *old_regs;
-
-       /*
-        * Check for level 14 softint.
-        */
-       {
-               unsigned long tick_mask = tick_ops->softint_mask;
-
-               if (!(get_softint() & tick_mask)) {
-                       extern void handler_irq(int, struct pt_regs *);
-
-                       handler_irq(14, regs);
-                       return;
-               }
-               clear_softint(tick_mask);
-       }
-
-       old_regs = set_irq_regs(regs);
-       do {
-               profile_tick(CPU_PROFILING);
-               if (!--prof_counter(cpu)) {
-                       irq_enter();
-
-                       if (cpu == boot_cpu_id) {
-                               kstat_this_cpu.irqs[0]++;
-                               timer_tick_interrupt(regs);
-                       }
-
-                       update_process_times(user);
-
-                       irq_exit();
-
-                       prof_counter(cpu) = prof_multiplier(cpu);
-               }
-
-               /* Guarantee that the following sequences execute
-                * uninterrupted.
-                */
-               __asm__ __volatile__("rdpr      %%pstate, %0\n\t"
-                                    "wrpr      %0, %1, %%pstate"
-                                    : "=r" (pstate)
-                                    : "i" (PSTATE_IE));
-
-               compare = tick_ops->add_compare(current_tick_offset);
-               tick = tick_ops->get_tick();
-
-               /* Restore PSTATE_IE. */
-               __asm__ __volatile__("wrpr      %0, 0x0, %%pstate"
-                                    : /* no outputs */
-                                    : "r" (pstate));
-       } while (time_after_eq(tick, compare));
-       set_irq_regs(old_regs);
-}
-
-static void __init smp_setup_percpu_timer(void)
-{
-       int cpu = smp_processor_id();
-       unsigned long pstate;
-
-       prof_counter(cpu) = prof_multiplier(cpu) = 1;
-
-       /* Guarantee that the following sequences execute
-        * uninterrupted.
-        */
-       __asm__ __volatile__("rdpr      %%pstate, %0\n\t"
-                            "wrpr      %0, %1, %%pstate"
-                            : "=r" (pstate)
-                            : "i" (PSTATE_IE));
-
-       tick_ops->init_tick(current_tick_offset);
-
-       /* Restore PSTATE_IE. */
-       __asm__ __volatile__("wrpr      %0, 0x0, %%pstate"
-                            : /* no outputs */
-                            : "r" (pstate));
-}
-
 void __init smp_tick_init(void)
 {
        boot_cpu_id = hard_smp_processor_id();
-       current_tick_offset = timer_tick_offset;
-
-       prof_counter(boot_cpu_id) = prof_multiplier(boot_cpu_id) = 1;
 }
 
 /* /proc/profile writes can call this, don't __init it please. */
-static DEFINE_SPINLOCK(prof_setup_lock);
-
 int setup_profiling_timer(unsigned int multiplier)
 {
-       unsigned long flags;
-       int i;
-
-       if ((!multiplier) || (timer_tick_offset / multiplier) < 1000)
-               return -EINVAL;
-
-       spin_lock_irqsave(&prof_setup_lock, flags);
-       for_each_possible_cpu(i)
-               prof_multiplier(i) = multiplier;
-       current_tick_offset = (timer_tick_offset / multiplier);
-       spin_unlock_irqrestore(&prof_setup_lock, flags);
-
-       return 0;
+       return -EINVAL;
 }
 
 static void __init smp_tune_scheduling(void)
index beffc82..d00f51a 100644 (file)
@@ -212,7 +212,6 @@ EXPORT_SYMBOL(insl);
 #ifdef CONFIG_PCI
 EXPORT_SYMBOL(ebus_chain);
 EXPORT_SYMBOL(isa_chain);
-EXPORT_SYMBOL(pci_memspace_mask);
 EXPORT_SYMBOL(pci_alloc_consistent);
 EXPORT_SYMBOL(pci_free_consistent);
 EXPORT_SYMBOL(pci_map_single);
index f84da4f..259063f 100644 (file)
@@ -31,6 +31,9 @@
 #include <linux/profile.h>
 #include <linux/miscdevice.h>
 #include <linux/rtc.h>
+#include <linux/kernel_stat.h>
+#include <linux/clockchips.h>
+#include <linux/clocksource.h>
 
 #include <asm/oplib.h>
 #include <asm/mostek.h>
@@ -60,6 +63,7 @@ static void __iomem *mstk48t59_regs;
 static int set_rtc_mmss(unsigned long);
 
 #define TICK_PRIV_BIT  (1UL << 63)
+#define TICKCMP_IRQ_BIT        (1UL << 63)
 
 #ifdef CONFIG_SMP
 unsigned long profile_pc(struct pt_regs *regs)
@@ -93,21 +97,22 @@ static void tick_disable_protection(void)
        : "g2");
 }
 
-static void tick_init_tick(unsigned long offset)
+static void tick_disable_irq(void)
 {
-       tick_disable_protection();
-
        __asm__ __volatile__(
-       "       rd      %%tick, %%g1\n"
-       "       andn    %%g1, %1, %%g1\n"
        "       ba,pt   %%xcc, 1f\n"
-       "        add    %%g1, %0, %%g1\n"
+       "        nop\n"
        "       .align  64\n"
-       "1:     wr      %%g1, 0x0, %%tick_cmpr\n"
+       "1:     wr      %0, 0x0, %%tick_cmpr\n"
        "       rd      %%tick_cmpr, %%g0"
        : /* no outputs */
-       : "r" (offset), "r" (TICK_PRIV_BIT)
-       : "g1");
+       : "r" (TICKCMP_IRQ_BIT));
+}
+
+static void tick_init_tick(void)
+{
+       tick_disable_protection();
+       tick_disable_irq();
 }
 
 static unsigned long tick_get_tick(void)
@@ -121,20 +126,14 @@ static unsigned long tick_get_tick(void)
        return ret & ~TICK_PRIV_BIT;
 }
 
-static unsigned long tick_get_compare(void)
+static int tick_add_compare(unsigned long adj)
 {
-       unsigned long ret;
+       unsigned long orig_tick, new_tick, new_compare;
 
-       __asm__ __volatile__("rd        %%tick_cmpr, %0\n\t"
-                            "mov       %0, %0"
-                            : "=r" (ret));
+       __asm__ __volatile__("rd        %%tick, %0"
+                            : "=r" (orig_tick));
 
-       return ret;
-}
-
-static unsigned long tick_add_compare(unsigned long adj)
-{
-       unsigned long new_compare;
+       orig_tick &= ~TICKCMP_IRQ_BIT;
 
        /* Workaround for Spitfire Errata (#54 I think??), I discovered
         * this via Sun BugID 4008234, mentioned in Solaris-2.5.1 patch
@@ -145,44 +144,41 @@ static unsigned long tick_add_compare(unsigned long adj)
         * at the start of an I-cache line, and perform a dummy
         * read back from %tick_cmpr right after writing to it. -DaveM
         */
-       __asm__ __volatile__("rd        %%tick_cmpr, %0\n\t"
-                            "ba,pt     %%xcc, 1f\n\t"
-                            " add      %0, %1, %0\n\t"
+       __asm__ __volatile__("ba,pt     %%xcc, 1f\n\t"
+                            " add      %1, %2, %0\n\t"
                             ".align    64\n"
                             "1:\n\t"
                             "wr        %0, 0, %%tick_cmpr\n\t"
-                            "rd        %%tick_cmpr, %%g0"
-                            : "=&r" (new_compare)
-                            : "r" (adj));
+                            "rd        %%tick_cmpr, %%g0\n\t"
+                            : "=r" (new_compare)
+                            : "r" (orig_tick), "r" (adj));
 
-       return new_compare;
+       __asm__ __volatile__("rd        %%tick, %0"
+                            : "=r" (new_tick));
+       new_tick &= ~TICKCMP_IRQ_BIT;
+
+       return ((long)(new_tick - (orig_tick+adj))) > 0L;
 }
 
-static unsigned long tick_add_tick(unsigned long adj, unsigned long offset)
+static unsigned long tick_add_tick(unsigned long adj)
 {
-       unsigned long new_tick, tmp;
+       unsigned long new_tick;
 
        /* Also need to handle Blackbird bug here too. */
        __asm__ __volatile__("rd        %%tick, %0\n\t"
-                            "add       %0, %2, %0\n\t"
+                            "add       %0, %1, %0\n\t"
                             "wrpr      %0, 0, %%tick\n\t"
-                            "andn      %0, %4, %1\n\t"
-                            "ba,pt     %%xcc, 1f\n\t"
-                            " add      %1, %3, %1\n\t"
-                            ".align    64\n"
-                            "1:\n\t"
-                            "wr        %1, 0, %%tick_cmpr\n\t"
-                            "rd        %%tick_cmpr, %%g0"
-                            : "=&r" (new_tick), "=&r" (tmp)
-                            : "r" (adj), "r" (offset), "r" (TICK_PRIV_BIT));
+                            : "=&r" (new_tick)
+                            : "r" (adj));
 
        return new_tick;
 }
 
 static struct sparc64_tick_ops tick_operations __read_mostly = {
+       .name           =       "tick",
        .init_tick      =       tick_init_tick,
+       .disable_irq    =       tick_disable_irq,
        .get_tick       =       tick_get_tick,
-       .get_compare    =       tick_get_compare,
        .add_tick       =       tick_add_tick,
        .add_compare    =       tick_add_compare,
        .softint_mask   =       1UL << 0,
@@ -190,7 +186,15 @@ static struct sparc64_tick_ops tick_operations __read_mostly = {
 
 struct sparc64_tick_ops *tick_ops __read_mostly = &tick_operations;
 
-static void stick_init_tick(unsigned long offset)
+static void stick_disable_irq(void)
+{
+       __asm__ __volatile__(
+       "wr     %0, 0x0, %%asr25"
+       : /* no outputs */
+       : "r" (TICKCMP_IRQ_BIT));
+}
+
+static void stick_init_tick(void)
 {
        /* Writes to the %tick and %stick register are not
         * allowed on sun4v.  The Hypervisor controls that
@@ -198,6 +202,7 @@ static void stick_init_tick(unsigned long offset)
         */
        if (tlb_type != hypervisor) {
                tick_disable_protection();
+               tick_disable_irq();
 
                /* Let the user get at STICK too. */
                __asm__ __volatile__(
@@ -209,14 +214,7 @@ static void stick_init_tick(unsigned long offset)
                : "g1", "g2");
        }
 
-       __asm__ __volatile__(
-       "       rd      %%asr24, %%g1\n"
-       "       andn    %%g1, %1, %%g1\n"
-       "       add     %%g1, %0, %%g1\n"
-       "       wr      %%g1, 0x0, %%asr25"
-       : /* no outputs */
-       : "r" (offset), "r" (TICK_PRIV_BIT)
-       : "g1");
+       stick_disable_irq();
 }
 
 static unsigned long stick_get_tick(void)
@@ -229,49 +227,43 @@ static unsigned long stick_get_tick(void)
        return ret & ~TICK_PRIV_BIT;
 }
 
-static unsigned long stick_get_compare(void)
+static unsigned long stick_add_tick(unsigned long adj)
 {
-       unsigned long ret;
-
-       __asm__ __volatile__("rd        %%asr25, %0"
-                            : "=r" (ret));
-
-       return ret;
-}
-
-static unsigned long stick_add_tick(unsigned long adj, unsigned long offset)
-{
-       unsigned long new_tick, tmp;
+       unsigned long new_tick;
 
        __asm__ __volatile__("rd        %%asr24, %0\n\t"
-                            "add       %0, %2, %0\n\t"
+                            "add       %0, %1, %0\n\t"
                             "wr        %0, 0, %%asr24\n\t"
-                            "andn      %0, %4, %1\n\t"
-                            "add       %1, %3, %1\n\t"
-                            "wr        %1, 0, %%asr25"
-                            : "=&r" (new_tick), "=&r" (tmp)
-                            : "r" (adj), "r" (offset), "r" (TICK_PRIV_BIT));
+                            : "=&r" (new_tick)
+                            : "r" (adj));
 
        return new_tick;
 }
 
-static unsigned long stick_add_compare(unsigned long adj)
+static int stick_add_compare(unsigned long adj)
 {
-       unsigned long new_compare;
+       unsigned long orig_tick, new_tick;
 
-       __asm__ __volatile__("rd        %%asr25, %0\n\t"
-                            "add       %0, %1, %0\n\t"
-                            "wr        %0, 0, %%asr25"
-                            : "=&r" (new_compare)
-                            : "r" (adj));
+       __asm__ __volatile__("rd        %%asr24, %0"
+                            : "=r" (orig_tick));
+       orig_tick &= ~TICKCMP_IRQ_BIT;
+
+       __asm__ __volatile__("wr        %0, 0, %%asr25"
+                            : /* no outputs */
+                            : "r" (orig_tick + adj));
+
+       __asm__ __volatile__("rd        %%asr24, %0"
+                            : "=r" (new_tick));
+       new_tick &= ~TICKCMP_IRQ_BIT;
 
-       return new_compare;
+       return ((long)(new_tick - (orig_tick+adj))) > 0L;
 }
 
 static struct sparc64_tick_ops stick_operations __read_mostly = {
+       .name           =       "stick",
        .init_tick      =       stick_init_tick,
+       .disable_irq    =       stick_disable_irq,
        .get_tick       =       stick_get_tick,
-       .get_compare    =       stick_get_compare,
        .add_tick       =       stick_add_tick,
        .add_compare    =       stick_add_compare,
        .softint_mask   =       1UL << 16,
@@ -320,20 +312,6 @@ static unsigned long __hbird_read_stick(void)
        return ret;
 }
 
-static unsigned long __hbird_read_compare(void)
-{
-       unsigned long low, high;
-       unsigned long addr = HBIRD_STICKCMP_ADDR;
-
-       __asm__ __volatile__("ldxa      [%2] %3, %0\n\t"
-                            "add       %2, 0x8, %2\n\t"
-                            "ldxa      [%2] %3, %1"
-                            : "=&r" (low), "=&r" (high), "=&r" (addr)
-                            : "i" (ASI_PHYS_BYPASS_EC_E), "2" (addr));
-
-       return (high << 32UL) | low;
-}
-
 static void __hbird_write_stick(unsigned long val)
 {
        unsigned long low = (val & 0xffffffffUL);
@@ -364,10 +342,13 @@ static void __hbird_write_compare(unsigned long val)
                               "i" (ASI_PHYS_BYPASS_EC_E));
 }
 
-static void hbtick_init_tick(unsigned long offset)
+static void hbtick_disable_irq(void)
 {
-       unsigned long val;
+       __hbird_write_compare(TICKCMP_IRQ_BIT);
+}
 
+static void hbtick_init_tick(void)
+{
        tick_disable_protection();
 
        /* XXX This seems to be necessary to 'jumpstart' Hummingbird
@@ -377,8 +358,7 @@ static void hbtick_init_tick(unsigned long offset)
         */
        __hbird_write_stick(__hbird_read_stick());
 
-       val = __hbird_read_stick() & ~TICK_PRIV_BIT;
-       __hbird_write_compare(val + offset);
+       hbtick_disable_irq();
 }
 
 static unsigned long hbtick_get_tick(void)
@@ -386,122 +366,95 @@ static unsigned long hbtick_get_tick(void)
        return __hbird_read_stick() & ~TICK_PRIV_BIT;
 }
 
-static unsigned long hbtick_get_compare(void)
-{
-       return __hbird_read_compare();
-}
-
-static unsigned long hbtick_add_tick(unsigned long adj, unsigned long offset)
+static unsigned long hbtick_add_tick(unsigned long adj)
 {
        unsigned long val;
 
        val = __hbird_read_stick() + adj;
        __hbird_write_stick(val);
 
-       val &= ~TICK_PRIV_BIT;
-       __hbird_write_compare(val + offset);
-
        return val;
 }
 
-static unsigned long hbtick_add_compare(unsigned long adj)
+static int hbtick_add_compare(unsigned long adj)
 {
-       unsigned long val = __hbird_read_compare() + adj;
+       unsigned long val = __hbird_read_stick();
+       unsigned long val2;
 
-       val &= ~TICK_PRIV_BIT;
+       val &= ~TICKCMP_IRQ_BIT;
+       val += adj;
        __hbird_write_compare(val);
 
-       return val;
+       val2 = __hbird_read_stick() & ~TICKCMP_IRQ_BIT;
+
+       return ((long)(val2 - val)) > 0L;
 }
 
 static struct sparc64_tick_ops hbtick_operations __read_mostly = {
+       .name           =       "hbtick",
        .init_tick      =       hbtick_init_tick,
+       .disable_irq    =       hbtick_disable_irq,
        .get_tick       =       hbtick_get_tick,
-       .get_compare    =       hbtick_get_compare,
        .add_tick       =       hbtick_add_tick,
        .add_compare    =       hbtick_add_compare,
        .softint_mask   =       1UL << 0,
 };
 
-/* timer_interrupt() needs to keep up the real-time clock,
- * as well as call the "do_timer()" routine every clocktick
- *
- * NOTE: On SUN5 systems the ticker interrupt comes in using 2
- *       interrupts, one at level14 and one with softint bit 0.
- */
-unsigned long timer_tick_offset __read_mostly;
-
 static unsigned long timer_ticks_per_nsec_quotient __read_mostly;
 
 #define TICK_SIZE (tick_nsec / 1000)
 
-static inline void timer_check_rtc(void)
-{
-       /* last time the cmos clock got updated */
-       static long last_rtc_update;
-
-       /* Determine when to update the Mostek clock. */
-       if (ntp_synced() &&
-           xtime.tv_sec > last_rtc_update + 660 &&
-           (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 &&
-           (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) {
-               if (set_rtc_mmss(xtime.tv_sec) == 0)
-                       last_rtc_update = xtime.tv_sec;
-               else
-                       last_rtc_update = xtime.tv_sec - 600;
-                       /* do it again in 60 s */
-       }
-}
+#define USEC_AFTER     500000
+#define USEC_BEFORE    500000
 
-irqreturn_t timer_interrupt(int irq, void *dev_id)
-{
-       unsigned long ticks, compare, pstate;
+static void sync_cmos_clock(unsigned long dummy);
 
-       write_seqlock(&xtime_lock);
+static DEFINE_TIMER(sync_cmos_timer, sync_cmos_clock, 0, 0);
 
-       do {
-#ifndef CONFIG_SMP
-               profile_tick(CPU_PROFILING);
-               update_process_times(user_mode(get_irq_regs()));
-#endif
-               do_timer(1);
+static void sync_cmos_clock(unsigned long dummy)
+{
+       struct timeval now, next;
+       int fail = 1;
 
-               /* Guarantee that the following sequences execute
-                * uninterrupted.
+       /*
+        * If we have an externally synchronized Linux clock, then update
+        * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
+        * called as close as possible to 500 ms before the new second starts.
+        * This code is run on a timer.  If the clock is set, that timer
+        * may not expire at the correct time.  Thus, we adjust...
+        */
+       if (!ntp_synced())
+               /*
+                * Not synced, exit, do not restart a timer (if one is
+                * running, let it run out).
                 */
-               __asm__ __volatile__("rdpr      %%pstate, %0\n\t"
-                                    "wrpr      %0, %1, %%pstate"
-                                    : "=r" (pstate)
-                                    : "i" (PSTATE_IE));
+               return;
 
-               compare = tick_ops->add_compare(timer_tick_offset);
-               ticks = tick_ops->get_tick();
+       do_gettimeofday(&now);
+       if (now.tv_usec >= USEC_AFTER - ((unsigned) TICK_SIZE) / 2 &&
+           now.tv_usec <= USEC_BEFORE + ((unsigned) TICK_SIZE) / 2)
+               fail = set_rtc_mmss(now.tv_sec);
 
-               /* Restore PSTATE_IE. */
-               __asm__ __volatile__("wrpr      %0, 0x0, %%pstate"
-                                    : /* no outputs */
-                                    : "r" (pstate));
-       } while (time_after_eq(ticks, compare));
+       next.tv_usec = USEC_AFTER - now.tv_usec;
+       if (next.tv_usec <= 0)
+               next.tv_usec += USEC_PER_SEC;
 
-       timer_check_rtc();
+       if (!fail)
+               next.tv_sec = 659;
+       else
+               next.tv_sec = 0;
 
-       write_sequnlock(&xtime_lock);
-
-       return IRQ_HANDLED;
+       if (next.tv_usec >= USEC_PER_SEC) {
+               next.tv_sec++;
+               next.tv_usec -= USEC_PER_SEC;
+       }
+       mod_timer(&sync_cmos_timer, jiffies + timeval_to_jiffies(&next));
 }
 
-#ifdef CONFIG_SMP
-void timer_tick_interrupt(struct pt_regs *regs)
+void notify_arch_cmos_timer(void)
 {
-       write_seqlock(&xtime_lock);
-
-       do_timer(1);
-
-       timer_check_rtc();
-
-       write_sequnlock(&xtime_lock);
+       mod_timer(&sync_cmos_timer, jiffies + 1);
 }
-#endif
 
 /* Kick start a stopped clock (procedure from the Sun NVRAM/hostid FAQ). */
 static void __init kick_start_clock(void)
@@ -751,7 +704,7 @@ retry:
        return -EOPNOTSUPP;
 }
 
-static int __init clock_model_matches(char *model)
+static int __init clock_model_matches(const char *model)
 {
        if (strcmp(model, "mk48t02") &&
            strcmp(model, "mk48t08") &&
@@ -768,7 +721,7 @@ static int __init clock_model_matches(char *model)
 static int __devinit clock_probe(struct of_device *op, const struct of_device_id *match)
 {
        struct device_node *dp = op->node;
-       char *model = of_get_property(dp, "model", NULL);
+       const char *model = of_get_property(dp, "model", NULL);
        unsigned long size, flags;
        void __iomem *regs;
 
@@ -900,7 +853,6 @@ static unsigned long sparc64_init_timers(void)
                prop = of_find_property(dp, "stick-frequency", NULL);
        }
        clock = *(unsigned int *) prop->value;
-       timer_tick_offset = clock / HZ;
 
 #ifdef CONFIG_SMP
        smp_tick_init();
@@ -909,26 +861,6 @@ static unsigned long sparc64_init_timers(void)
        return clock;
 }
 
-static void sparc64_start_timers(void)
-{
-       unsigned long pstate;
-
-       /* Guarantee that the following sequences execute
-        * uninterrupted.
-        */
-       __asm__ __volatile__("rdpr      %%pstate, %0\n\t"
-                            "wrpr      %0, %1, %%pstate"
-                            : "=r" (pstate)
-                            : "i" (PSTATE_IE));
-
-       tick_ops->init_tick(timer_tick_offset);
-
-       /* Restore PSTATE_IE. */
-       __asm__ __volatile__("wrpr      %0, 0x0, %%pstate"
-                            : /* no outputs */
-                            : "r" (pstate));
-}
-
 struct freq_table {
        unsigned long clock_tick_ref;
        unsigned int ref_freq;
@@ -975,29 +907,148 @@ static struct notifier_block sparc64_cpufreq_notifier_block = {
 
 #endif /* CONFIG_CPU_FREQ */
 
-static struct time_interpolator sparc64_cpu_interpolator = {
-       .source         =       TIME_SOURCE_CPU,
-       .shift          =       16,
-       .mask           =       0xffffffffffffffffLL
+static int sparc64_next_event(unsigned long delta,
+                             struct clock_event_device *evt)
+{
+       return tick_ops->add_compare(delta) ? -ETIME : 0;
+}
+
+static void sparc64_timer_setup(enum clock_event_mode mode,
+                               struct clock_event_device *evt)
+{
+       switch (mode) {
+       case CLOCK_EVT_MODE_ONESHOT:
+               break;
+
+       case CLOCK_EVT_MODE_SHUTDOWN:
+               tick_ops->disable_irq();
+               break;
+
+       case CLOCK_EVT_MODE_PERIODIC:
+       case CLOCK_EVT_MODE_UNUSED:
+               WARN_ON(1);
+               break;
+       };
+}
+
+static struct clock_event_device sparc64_clockevent = {
+       .features       = CLOCK_EVT_FEAT_ONESHOT,
+       .set_mode       = sparc64_timer_setup,
+       .set_next_event = sparc64_next_event,
+       .rating         = 100,
+       .shift          = 30,
+       .irq            = -1,
 };
+static DEFINE_PER_CPU(struct clock_event_device, sparc64_events);
 
-/* The quotient formula is taken from the IA64 port. */
-#define SPARC64_NSEC_PER_CYC_SHIFT     10UL
-void __init time_init(void)
+void timer_interrupt(int irq, struct pt_regs *regs)
 {
-       unsigned long clock = sparc64_init_timers();
+       struct pt_regs *old_regs = set_irq_regs(regs);
+       unsigned long tick_mask = tick_ops->softint_mask;
+       int cpu = smp_processor_id();
+       struct clock_event_device *evt = &per_cpu(sparc64_events, cpu);
+
+       clear_softint(tick_mask);
+
+       irq_enter();
 
-       sparc64_cpu_interpolator.frequency = clock;
-       register_time_interpolator(&sparc64_cpu_interpolator);
+       kstat_this_cpu.irqs[0]++;
 
-       /* Now that the interpolator is registered, it is
-        * safe to start the timer ticking.
+       if (unlikely(!evt->event_handler)) {
+               printk(KERN_WARNING
+                      "Spurious SPARC64 timer interrupt on cpu %d\n", cpu);
+       } else
+               evt->event_handler(evt);
+
+       irq_exit();
+
+       set_irq_regs(old_regs);
+}
+
+void __devinit setup_sparc64_timer(void)
+{
+       struct clock_event_device *sevt;
+       unsigned long pstate;
+
+       /* Guarantee that the following sequences execute
+        * uninterrupted.
         */
-       sparc64_start_timers();
+       __asm__ __volatile__("rdpr      %%pstate, %0\n\t"
+                            "wrpr      %0, %1, %%pstate"
+                            : "=r" (pstate)
+                            : "i" (PSTATE_IE));
+
+       tick_ops->init_tick();
+
+       /* Restore PSTATE_IE. */
+       __asm__ __volatile__("wrpr      %0, 0x0, %%pstate"
+                            : /* no outputs */
+                            : "r" (pstate));
+
+       sevt = &__get_cpu_var(sparc64_events);
+
+       memcpy(sevt, &sparc64_clockevent, sizeof(*sevt));
+       sevt->cpumask = cpumask_of_cpu(smp_processor_id());
+
+       clockevents_register_device(sevt);
+}
+
+#define SPARC64_NSEC_PER_CYC_SHIFT     32UL
+
+static struct clocksource clocksource_tick = {
+       .rating         = 100,
+       .mask           = CLOCKSOURCE_MASK(64),
+       .shift          = 16,
+       .flags          = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static void __init setup_clockevent_multiplier(unsigned long hz)
+{
+       unsigned long mult, shift = 32;
+
+       while (1) {
+               mult = div_sc(hz, NSEC_PER_SEC, shift);
+               if (mult && (mult >> 32UL) == 0UL)
+                       break;
+
+               shift--;
+       }
+
+       sparc64_clockevent.shift = shift;
+       sparc64_clockevent.mult = mult;
+}
+
+void __init time_init(void)
+{
+       unsigned long clock = sparc64_init_timers();
 
        timer_ticks_per_nsec_quotient =
-               (((NSEC_PER_SEC << SPARC64_NSEC_PER_CYC_SHIFT) +
-                 (clock / 2)) / clock);
+               clocksource_hz2mult(clock, SPARC64_NSEC_PER_CYC_SHIFT);
+
+       clocksource_tick.name = tick_ops->name;
+       clocksource_tick.mult =
+               clocksource_hz2mult(clock,
+                                   clocksource_tick.shift);
+       clocksource_tick.read = tick_ops->get_tick;
+
+       printk("clocksource: mult[%x] shift[%d]\n",
+              clocksource_tick.mult, clocksource_tick.shift);
+
+       clocksource_register(&clocksource_tick);
+
+       sparc64_clockevent.name = tick_ops->name;
+
+       setup_clockevent_multiplier(clock);
+
+       sparc64_clockevent.max_delta_ns =
+               clockevent_delta2ns(0x7fffffffffffffff, &sparc64_clockevent);
+       sparc64_clockevent.min_delta_ns =
+               clockevent_delta2ns(0xF, &sparc64_clockevent);
+
+       printk("clockevent: mult[%lx] shift[%d]\n",
+              sparc64_clockevent.mult, sparc64_clockevent.shift);
+
+       setup_sparc64_timer();
 
 #ifdef CONFIG_CPU_FREQ
        cpufreq_register_notifier(&sparc64_cpufreq_notifier_block,
@@ -1126,10 +1177,6 @@ static int set_rtc_mmss(unsigned long nowtime)
 #define RTC_IS_OPEN            0x01    /* means /dev/rtc is in use     */
 static unsigned char mini_rtc_status;  /* bitmapped status byte.       */
 
-/* months start at 0 now */
-static unsigned char days_in_mo[] =
-{31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31};
-
 #define FEBRUARY       2
 #define        STARTOFTIME     1970
 #define SECDAY         86400L
@@ -1278,8 +1325,7 @@ static int mini_rtc_ioctl(struct inode *inode, struct file *file,
 
        case RTC_SET_TIME:      /* Set the RTC */
            {
-               int year;
-               unsigned char leap_yr;
+               int year, days;
 
                if (!capable(CAP_SYS_TIME))
                        return -EACCES;
@@ -1288,14 +1334,14 @@ static int mini_rtc_ioctl(struct inode *inode, struct file *file,
                        return -EFAULT;
 
                year = wtime.tm_year + 1900;
-               leap_yr = ((!(year % 4) && (year % 100)) ||
-                          !(year % 400));
+               days = month_days[wtime.tm_mon] +
+                      ((wtime.tm_mon == 1) && leapyear(year));
 
-               if ((wtime.tm_mon < 0 || wtime.tm_mon > 11) || (wtime.tm_mday < 1))
+               if ((wtime.tm_mon < 0 || wtime.tm_mon > 11) ||
+                   (wtime.tm_mday < 1))
                        return -EINVAL;
 
-               if (wtime.tm_mday < 0 || wtime.tm_mday >
-                   (days_in_mo[wtime.tm_mon] + ((wtime.tm_mon == 1) && leap_yr)))
+               if (wtime.tm_mday < 0 || wtime.tm_mday > days)
                        return -EINVAL;
 
                if (wtime.tm_hour < 0 || wtime.tm_hour >= 24 ||
index d7d2a8b..7575aa3 100644 (file)
@@ -60,11 +60,7 @@ tl0_irq4:    BTRAP(0x44)
 tl0_irq5:      TRAP_IRQ(handler_irq, 5)
 tl0_irq6:      BTRAP(0x46) BTRAP(0x47) BTRAP(0x48) BTRAP(0x49)
 tl0_irq10:     BTRAP(0x4a) BTRAP(0x4b) BTRAP(0x4c) BTRAP(0x4d)
-#ifndef CONFIG_SMP
-tl0_irq14:     TRAP_IRQ(timer_irq, 14)
-#else
-tl0_irq14:     TICK_SMP_IRQ
-#endif
+tl0_irq14:     TRAP_IRQ(timer_interrupt, 14)
 tl0_irq15:     TRAP_IRQ(handler_irq, 15)
 tl0_resv050:   BTRAP(0x50) BTRAP(0x51) BTRAP(0x52) BTRAP(0x53) BTRAP(0x54) BTRAP(0x55)
 tl0_resv056:   BTRAP(0x56) BTRAP(0x57) BTRAP(0x58) BTRAP(0x59) BTRAP(0x5a) BTRAP(0x5b)
index f146071..cafadcb 100644 (file)
@@ -122,24 +122,19 @@ static void __init read_obp_memory(const char *property,
                                size = 0UL;
                        base = new_base;
                }
-               regs[i].phys_addr = base;
-               regs[i].reg_size = size;
-       }
-
-       for (i = 0; i < ents; i++) {
-               if (regs[i].reg_size == 0UL) {
-                       int j;
-
-                       for (j = i; j < ents - 1; j++) {
-                               regs[j].phys_addr =
-                                       regs[j+1].phys_addr;
-                               regs[j].reg_size =
-                                       regs[j+1].reg_size;
-                       }
-
-                       ents--;
+               if (size == 0UL) {
+                       /* If it is empty, simply get rid of it.
+                        * This simplifies the logic of the other
+                        * functions that process these arrays.
+                        */
+                       memmove(&regs[i], &regs[i + 1],
+                               (ents - i - 1) * sizeof(regs[0]));
                        i--;
+                       ents--;
+                       continue;
                }
+               regs[i].phys_addr = base;
+               regs[i].reg_size = size;
        }
 
        *num_ents = ents;
@@ -154,15 +149,6 @@ unsigned long *sparc64_valid_addr_bitmap __read_mostly;
 unsigned long kern_base __read_mostly;
 unsigned long kern_size __read_mostly;
 
-/* get_new_mmu_context() uses "cache + 1".  */
-DEFINE_SPINLOCK(ctx_alloc_lock);
-unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1;
-#define CTX_BMAP_SLOTS (1UL << (CTX_NR_BITS - 6))
-unsigned long mmu_context_bmap[CTX_BMAP_SLOTS];
-
-/* References to special section boundaries */
-extern char  _start[], _end[];
-
 /* Initial ramdisk setup */
 extern unsigned long sparc_ramdisk_image64;
 extern unsigned int sparc_ramdisk_image;
@@ -406,19 +392,70 @@ void __kprobes flush_icache_range(unsigned long start, unsigned long end)
        if (tlb_type == spitfire) {
                unsigned long kaddr;
 
-               for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE)
-                       __flush_icache_page(__get_phys(kaddr));
+               /* This code only runs on Spitfire cpus so this is
+                * why we can assume _PAGE_PADDR_4U.
+                */
+               for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE) {
+                       unsigned long paddr, mask = _PAGE_PADDR_4U;
+
+                       if (kaddr >= PAGE_OFFSET)
+                               paddr = kaddr & mask;
+                       else {
+                               pgd_t *pgdp = pgd_offset_k(kaddr);
+                               pud_t *pudp = pud_offset(pgdp, kaddr);
+                               pmd_t *pmdp = pmd_offset(pudp, kaddr);
+                               pte_t *ptep = pte_offset_kernel(pmdp, kaddr);
+
+                               paddr = pte_val(*ptep) & mask;
+                       }
+                       __flush_icache_page(paddr);
+               }
        }
 }
 
 void show_mem(void)
 {
-       printk("Mem-info:\n");
+       unsigned long total = 0, reserved = 0;
+       unsigned long shared = 0, cached = 0;
+       pg_data_t *pgdat;
+
+       printk(KERN_INFO "Mem-info:\n");
        show_free_areas();
-       printk("Free swap:       %6ldkB\n",
+       printk(KERN_INFO "Free swap:       %6ldkB\n",
               nr_swap_pages << (PAGE_SHIFT-10));
-       printk("%ld pages of RAM\n", num_physpages);
-       printk("%lu free pages\n", nr_free_pages());
+       for_each_online_pgdat(pgdat) {
+               unsigned long i, flags;
+
+               pgdat_resize_lock(pgdat, &flags);
+               for (i = 0; i < pgdat->node_spanned_pages; i++) {
+                       struct page *page = pgdat_page_nr(pgdat, i);
+                       total++;
+                       if (PageReserved(page))
+                               reserved++;
+                       else if (PageSwapCache(page))
+                               cached++;
+                       else if (page_count(page))
+                               shared += page_count(page) - 1;
+               }
+               pgdat_resize_unlock(pgdat, &flags);
+       }
+
+       printk(KERN_INFO "%lu pages of RAM\n", total);
+       printk(KERN_INFO "%lu reserved pages\n", reserved);
+       printk(KERN_INFO "%lu pages shared\n", shared);
+       printk(KERN_INFO "%lu pages swap cached\n", cached);
+
+       printk(KERN_INFO "%lu pages dirty\n",
+              global_page_state(NR_FILE_DIRTY));
+       printk(KERN_INFO "%lu pages writeback\n",
+              global_page_state(NR_WRITEBACK));
+       printk(KERN_INFO "%lu pages mapped\n",
+              global_page_state(NR_FILE_MAPPED));
+       printk(KERN_INFO "%lu pages slab\n",
+               global_page_state(NR_SLAB_RECLAIMABLE) +
+               global_page_state(NR_SLAB_UNRECLAIMABLE));
+       printk(KERN_INFO "%lu pages pagetables\n",
+              global_page_state(NR_PAGETABLE));
 }
 
 void mmu_info(struct seq_file *m)
@@ -658,6 +695,13 @@ void __flush_dcache_range(unsigned long start, unsigned long end)
 }
 #endif /* DCACHE_ALIASING_POSSIBLE */
 
+/* get_new_mmu_context() uses "cache + 1".  */
+DEFINE_SPINLOCK(ctx_alloc_lock);
+unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1;
+#define MAX_CTX_NR     (1UL << CTX_NR_BITS)
+#define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR)
+DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR);
+
 /* Caller does TLB context flushing on local CPU if necessary.
  * The caller also ensures that CTX_VALID(mm->context) is false.
  *
@@ -717,95 +761,6 @@ out:
                smp_new_mmu_context_version();
 }
 
-void sparc_ultra_dump_itlb(void)
-{
-        int slot;
-
-       if (tlb_type == spitfire) {
-               printk ("Contents of itlb: ");
-               for (slot = 0; slot < 14; slot++) printk ("    ");
-               printk ("%2x:%016lx,%016lx\n",
-                       0,
-                       spitfire_get_itlb_tag(0), spitfire_get_itlb_data(0));
-               for (slot = 1; slot < 64; slot+=3) {
-                       printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx %2x:%016lx,%016lx\n", 
-                               slot,
-                               spitfire_get_itlb_tag(slot), spitfire_get_itlb_data(slot),
-                               slot+1,
-                               spitfire_get_itlb_tag(slot+1), spitfire_get_itlb_data(slot+1),
-                               slot+2,
-                               spitfire_get_itlb_tag(slot+2), spitfire_get_itlb_data(slot+2));
-               }
-       } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
-               printk ("Contents of itlb0:\n");
-               for (slot = 0; slot < 16; slot+=2) {
-                       printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
-                               slot,
-                               cheetah_get_litlb_tag(slot), cheetah_get_litlb_data(slot),
-                               slot+1,
-                               cheetah_get_litlb_tag(slot+1), cheetah_get_litlb_data(slot+1));
-               }
-               printk ("Contents of itlb2:\n");
-               for (slot = 0; slot < 128; slot+=2) {
-                       printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
-                               slot,
-                               cheetah_get_itlb_tag(slot), cheetah_get_itlb_data(slot),
-                               slot+1,
-                               cheetah_get_itlb_tag(slot+1), cheetah_get_itlb_data(slot+1));
-               }
-       }
-}
-
-void sparc_ultra_dump_dtlb(void)
-{
-        int slot;
-
-       if (tlb_type == spitfire) {
-               printk ("Contents of dtlb: ");
-               for (slot = 0; slot < 14; slot++) printk ("    ");
-               printk ("%2x:%016lx,%016lx\n", 0,
-                       spitfire_get_dtlb_tag(0), spitfire_get_dtlb_data(0));
-               for (slot = 1; slot < 64; slot+=3) {
-                       printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx %2x:%016lx,%016lx\n", 
-                               slot,
-                               spitfire_get_dtlb_tag(slot), spitfire_get_dtlb_data(slot),
-                               slot+1,
-                               spitfire_get_dtlb_tag(slot+1), spitfire_get_dtlb_data(slot+1),
-                               slot+2,
-                               spitfire_get_dtlb_tag(slot+2), spitfire_get_dtlb_data(slot+2));
-               }
-       } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
-               printk ("Contents of dtlb0:\n");
-               for (slot = 0; slot < 16; slot+=2) {
-                       printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
-                               slot,
-                               cheetah_get_ldtlb_tag(slot), cheetah_get_ldtlb_data(slot),
-                               slot+1,
-                               cheetah_get_ldtlb_tag(slot+1), cheetah_get_ldtlb_data(slot+1));
-               }
-               printk ("Contents of dtlb2:\n");
-               for (slot = 0; slot < 512; slot+=2) {
-                       printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
-                               slot,
-                               cheetah_get_dtlb_tag(slot, 2), cheetah_get_dtlb_data(slot, 2),
-                               slot+1,
-                               cheetah_get_dtlb_tag(slot+1, 2), cheetah_get_dtlb_data(slot+1, 2));
-               }
-               if (tlb_type == cheetah_plus) {
-                       printk ("Contents of dtlb3:\n");
-                       for (slot = 0; slot < 512; slot+=2) {
-                               printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
-                                       slot,
-                                       cheetah_get_dtlb_tag(slot, 3), cheetah_get_dtlb_data(slot, 3),
-                                       slot+1,
-                                       cheetah_get_dtlb_tag(slot+1, 3), cheetah_get_dtlb_data(slot+1, 3));
-                       }
-               }
-       }
-}
-
-extern unsigned long cmdline_memory_size;
-
 /* Find a free area for the bootmem map, avoiding the kernel image
  * and the initial ramdisk.
  */
@@ -815,8 +770,8 @@ static unsigned long __init choose_bootmap_pfn(unsigned long start_pfn,
        unsigned long avoid_start, avoid_end, bootmap_size;
        int i;
 
-       bootmap_size = ((end_pfn - start_pfn) + 7) / 8;
-       bootmap_size = ALIGN(bootmap_size, sizeof(long));
+       bootmap_size = bootmem_bootmap_pages(end_pfn - start_pfn);
+       bootmap_size <<= PAGE_SHIFT;
 
        avoid_start = avoid_end = 0;
 #ifdef CONFIG_BLK_DEV_INITRD
@@ -983,6 +938,20 @@ static void __init trim_pavail(unsigned long *cur_size_p,
        }
 }
 
+/* About pages_avail, this is the value we will use to calculate
+ * the zholes_size[] argument given to free_area_init_node().  The
+ * page allocator uses this to calculate nr_kernel_pages,
+ * nr_all_pages and zone->present_pages.  On NUMA it is used
+ * to calculate zone->min_unmapped_pages and zone->min_slab_pages.
+ *
+ * So this number should really be set to what the page allocator
+ * actually ends up with.  This means:
+ * 1) It should include bootmem map pages, we'll release those.
+ * 2) It should not include the kernel image, except for the
+ *    __init sections which we will also release.
+ * 3) It should include the initrd image, since we'll release
+ *    that too.
+ */
 static unsigned long __init bootmem_init(unsigned long *pages_avail,
                                         unsigned long phys_base)
 {
@@ -1069,7 +1038,6 @@ static unsigned long __init bootmem_init(unsigned long *pages_avail,
                        initrd_start, initrd_end);
 #endif
                reserve_bootmem(initrd_start, size);
-               *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT;
 
                initrd_start += PAGE_OFFSET;
                initrd_end += PAGE_OFFSET;
@@ -1082,6 +1050,11 @@ static unsigned long __init bootmem_init(unsigned long *pages_avail,
        reserve_bootmem(kern_base, kern_size);
        *pages_avail -= PAGE_ALIGN(kern_size) >> PAGE_SHIFT;
 
+       /* Add back in the initmem pages. */
+       size = ((unsigned long)(__init_end) & PAGE_MASK) -
+               PAGE_ALIGN((unsigned long)__init_begin);
+       *pages_avail += size >> PAGE_SHIFT;
+
        /* Reserve the bootmem map.   We do not account for it
         * in pages_avail because we will release that memory
         * in free_all_bootmem.
@@ -1092,7 +1065,6 @@ static unsigned long __init bootmem_init(unsigned long *pages_avail,
                    (bootmap_pfn << PAGE_SHIFT), size);
 #endif
        reserve_bootmem((bootmap_pfn << PAGE_SHIFT), size);
-       *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT;
 
        for (i = 0; i < pavail_ents; i++) {
                unsigned long start_pfn, end_pfn;
@@ -1584,6 +1556,10 @@ void __init mem_init(void)
 #ifdef CONFIG_DEBUG_BOOTMEM
        prom_printf("mem_init: Calling free_all_bootmem().\n");
 #endif
+
+       /* We subtract one to account for the mem_map_zero page
+        * allocated below.
+        */
        totalram_pages = num_physpages = free_all_bootmem() - 1;
 
        /*
@@ -1883,62 +1859,6 @@ static unsigned long kern_large_tte(unsigned long paddr)
        return val | paddr;
 }
 
-/*
- * Translate PROM's mapping we capture at boot time into physical address.
- * The second parameter is only set from prom_callback() invocations.
- */
-unsigned long prom_virt_to_phys(unsigned long promva, int *error)
-{
-       unsigned long mask;
-       int i;
-
-       mask = _PAGE_PADDR_4U;
-       if (tlb_type == hypervisor)
-               mask = _PAGE_PADDR_4V;
-
-       for (i = 0; i < prom_trans_ents; i++) {
-               struct linux_prom_translation *p = &prom_trans[i];
-
-               if (promva >= p->virt &&
-                   promva < (p->virt + p->size)) {
-                       unsigned long base = p->data & mask;
-
-                       if (error)
-                               *error = 0;
-                       return base + (promva & (8192 - 1));
-               }
-       }
-       if (error)
-               *error = 1;
-       return 0UL;
-}
-
-/* XXX We should kill off this ugly thing at so me point. XXX */
-unsigned long sun4u_get_pte(unsigned long addr)
-{
-       pgd_t *pgdp;
-       pud_t *pudp;
-       pmd_t *pmdp;
-       pte_t *ptep;
-       unsigned long mask = _PAGE_PADDR_4U;
-
-       if (tlb_type == hypervisor)
-               mask = _PAGE_PADDR_4V;
-
-       if (addr >= PAGE_OFFSET)
-               return addr & mask;
-
-       if ((addr >= LOW_OBP_ADDRESS) && (addr < HI_OBP_ADDRESS))
-               return prom_virt_to_phys(addr, NULL);
-
-       pgdp = pgd_offset_k(addr);
-       pudp = pud_offset(pgdp, addr);
-       pmdp = pmd_offset(pudp, addr);
-       ptep = pte_offset_kernel(pmdp, addr);
-
-       return pte_val(*ptep) & mask;
-}
-
 /* If not locked, zap it. */
 void __flush_tlb_all(void)
 {
index 9fcaad6..542c808 100644 (file)
@@ -224,7 +224,8 @@ static char *serial(char *buffer, int sz)
 
        *buffer = 0;
        if (dp) {
-               char *val = of_get_property(dp, "system-board-serial#", &len);
+               const char *val =
+                       of_get_property(dp, "system-board-serial#", &len);
 
                if (val && len > 0) {
                        if (len > sz)
index 9c2e7a7..adeece1 100644 (file)
@@ -46,7 +46,7 @@ static int daemon_read(int fd, struct sk_buff **skb,
 {
        *skb = ether_adjust_skb(*skb, ETH_HEADER_OTHER);
        if(*skb == NULL) return(-ENOMEM);
-       return(net_recvfrom(fd, (*skb)->mac.raw, 
+       return(net_recvfrom(fd, skb_mac_header(*skb),
                            (*skb)->dev->mtu + ETH_HEADER_OTHER));
 }
 
index 52ccb7b..e6b8e0d 100644 (file)
@@ -50,7 +50,7 @@ static int mcast_read(int fd, struct sk_buff **skb, struct uml_net_private *lp)
 {
        *skb = ether_adjust_skb(*skb, ETH_HEADER_OTHER);
        if(*skb == NULL) return(-ENOMEM);
-       return(net_recvfrom(fd, (*skb)->mac.raw, 
+       return(net_recvfrom(fd, skb_mac_header(*skb),
                            (*skb)->dev->mtu + ETH_HEADER_OTHER));
 }
 
index 04e31f8..8593037 100644 (file)
@@ -55,7 +55,7 @@ static int uml_net_rx(struct net_device *dev)
 
        skb->dev = dev;
        skb_put(skb, dev->mtu);
-       skb->mac.raw = skb->data;
+       skb_reset_mac_header(skb);
        pkt_len = (*lp->read)(lp->fd, &skb, lp);
 
        if (pkt_len > 0) {
index e67362a..9488493 100644 (file)
@@ -36,7 +36,7 @@ static int pcap_read(int fd, struct sk_buff **skb,
 {
        *skb = ether_adjust_skb(*skb, ETH_HEADER_OTHER);
        if(*skb == NULL) return(-ENOMEM);
-       return(pcap_user_read(fd, (*skb)->mac.raw, 
+       return(pcap_user_read(fd, skb_mac_header(*skb),
                              (*skb)->dev->mtu + ETH_HEADER_OTHER,
                              (struct pcap_data *) &lp->user));
 }
index 25634bd..125c44f 100644 (file)
@@ -49,7 +49,7 @@ static unsigned short slip_protocol(struct sk_buff *skbuff)
 static int slip_read(int fd, struct sk_buff **skb, 
                       struct uml_net_private *lp)
 {
-       return(slip_user_read(fd, (*skb)->mac.raw, (*skb)->dev->mtu, 
+       return(slip_user_read(fd, skb_mac_header(*skb), (*skb)->dev->mtu,
                              (struct slip_data *) &lp->user));
 }
 
index b3ed8fb..0a0324a 100644 (file)
@@ -53,7 +53,7 @@ static unsigned short slirp_protocol(struct sk_buff *skbuff)
 static int slirp_read(int fd, struct sk_buff **skb, 
                       struct uml_net_private *lp)
 {
-       return(slirp_user_read(fd, (*skb)->mac.raw, (*skb)->dev->mtu, 
+       return(slirp_user_read(fd, skb_mac_header(*skb), (*skb)->dev->mtu,
                              (struct slirp_data *) &lp->user));
 }
 
index 7054182..1268914 100644 (file)
@@ -43,7 +43,7 @@ static int etap_read(int fd, struct sk_buff **skb, struct uml_net_private *lp)
 
        *skb = ether_adjust_skb(*skb, ETH_HEADER_ETHERTAP);
        if(*skb == NULL) return(-ENOMEM);
-       len = net_recvfrom(fd, (*skb)->mac.raw, 
+       len = net_recvfrom(fd, skb_mac_header(*skb),
                           (*skb)->dev->mtu + 2 * ETH_HEADER_ETHERTAP);
        if(len <= 0) return(len);
        skb_pull(*skb, 2);
index 76570a2..f1714e7 100644 (file)
@@ -43,7 +43,7 @@ static int tuntap_read(int fd, struct sk_buff **skb,
 {
        *skb = ether_adjust_skb(*skb, ETH_HEADER_OTHER);
        if(*skb == NULL) return(-ENOMEM);
-       return(net_read(fd, (*skb)->mac.raw, 
+       return(net_read(fd, skb_mac_header(*skb),
                        (*skb)->dev->mtu + ETH_HEADER_OTHER));
 }
 
index 2bac8c6..0bae862 100644 (file)
@@ -519,7 +519,11 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
        gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32); 
        gatt = (void *)__get_free_pages(GFP_KERNEL, get_order(gatt_size)); 
        if (!gatt) 
-               panic("Cannot allocate GATT table"); 
+               panic("Cannot allocate GATT table");
+       if (change_page_attr_addr((unsigned long)gatt, gatt_size >> PAGE_SHIFT, PAGE_KERNEL_NOCACHE))
+               panic("Could not set GART PTEs to uncacheable pages");
+       global_flush_tlb();
+
        memset(gatt, 0, gatt_size); 
        agp_gatt_table = gatt;
 
index 65c5eaa..081409a 100644 (file)
@@ -81,8 +81,8 @@ static void flush_kernel_map(void *arg)
                void *adr = page_address(pg);
                if (cpu_has_clflush)
                        cache_flush_page(adr);
-               __flush_tlb_one(adr);
        }
+       __flush_tlb_all();
 }
 
 static inline void flush_map(struct list_head *l)
index ed935b5..6c4fdd8 100644 (file)
@@ -2,6 +2,6 @@
 # Makefile for Xtensa-specific library files.
 #
 
-lib-y  += memcopy.o memset.o checksum.o strcasecmp.o \
+lib-y  += memcopy.o memset.o checksum.o \
           usercopy.o strncpy_user.o strnlen_user.o
 lib-$(CONFIG_PCI) += pci-auto.o
diff --git a/arch/xtensa/lib/strcasecmp.c b/arch/xtensa/lib/strcasecmp.c
deleted file mode 100644 (file)
index 165b2d6..0000000
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- *  linux/arch/xtensa/lib/strcasecmp.c
- *
- *  This file is subject to the terms and conditions of the GNU General
- *  Public License.  See the file "COPYING" in the main directory of
- *  this archive for more details.
- *
- *  Copyright (C) 2002 Tensilica Inc.
- */
-
-#include <linux/string.h>
-
-
-/* We handle nothing here except the C locale.  Since this is used in
-   only one place, on strings known to contain only 7 bit ASCII, this
-   is ok.  */
-
-int strcasecmp(const char *a, const char *b)
-{
-       int ca, cb;
-
-       do {
-               ca = *a++ & 0xff;
-               cb = *b++ & 0xff;
-               if (ca >= 'A' && ca <= 'Z')
-                       ca += 'a' - 'A';
-               if (cb >= 'A' && cb <= 'Z')
-                       cb += 'a' - 'A';
-       } while (ca == cb && ca != '\0');
-
-       return ca - cb;
-}
index 8ebfc87..ab05bff 100644 (file)
@@ -386,7 +386,7 @@ static int iss_net_rx(struct net_device *dev)
        /* Setup skb */
 
        skb->dev = dev;
-       skb->mac.raw = skb->data;
+       skb_reset_mac_header(skb);
        pkt_len = lp->tp.read(lp, &skb);
        skb_put(skb, pkt_len);
 
index b6491c0..f92ba2a 100644 (file)
@@ -532,6 +532,12 @@ static void cfq_add_rq_rb(struct request *rq)
 
        if (!cfq_cfqq_on_rr(cfqq))
                cfq_add_cfqq_rr(cfqd, cfqq);
+
+       /*
+        * check if this request is a better next-serve candidate
+        */
+       cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq);
+       BUG_ON(!cfqq->next_rq);
 }
 
 static inline void
@@ -986,9 +992,9 @@ __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
         * expire an async queue immediately if it has used up its slice. idle
         * queue always expire after 1 dispatch round.
         */
-       if ((!cfq_cfqq_sync(cfqq) &&
+       if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
            cfqd->dispatch_slice >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
-           cfq_class_idle(cfqq)) {
+           cfq_class_idle(cfqq))) {
                cfqq->slice_end = jiffies + 1;
                cfq_slice_expired(cfqd, 0, 0);
        }
@@ -1051,19 +1057,21 @@ cfq_dispatch_requests(request_queue_t *q, int force)
        while ((cfqq = cfq_select_queue(cfqd)) != NULL) {
                int max_dispatch;
 
-               /*
-                * Don't repeat dispatch from the previous queue.
-                */
-               if (prev_cfqq == cfqq)
-                       break;
+               if (cfqd->busy_queues > 1) {
+                       /*
+                        * Don't repeat dispatch from the previous queue.
+                        */
+                       if (prev_cfqq == cfqq)
+                               break;
 
-               /*
-                * So we have dispatched before in this round, if the
-                * next queue has idling enabled (must be sync), don't
-                * allow it service until the previous have continued.
-                */
-               if (cfqd->rq_in_driver && cfq_cfqq_idle_window(cfqq))
-                       break;
+                       /*
+                        * So we have dispatched before in this round, if the
+                        * next queue has idling enabled (must be sync), don't
+                        * allow it service until the previous have continued.
+                        */
+                       if (cfqd->rq_in_driver && cfq_cfqq_idle_window(cfqq))
+                               break;
+               }
 
                cfq_clear_cfqq_must_dispatch(cfqq);
                cfq_clear_cfqq_wait_request(cfqq);
@@ -1370,7 +1378,9 @@ retry:
                atomic_set(&cfqq->ref, 0);
                cfqq->cfqd = cfqd;
 
-               cfq_mark_cfqq_idle_window(cfqq);
+               if (key != CFQ_KEY_ASYNC)
+                       cfq_mark_cfqq_idle_window(cfqq);
+
                cfq_mark_cfqq_prio_changed(cfqq);
                cfq_mark_cfqq_queue_new(cfqq);
                cfq_init_prio_data(cfqq);
@@ -1634,12 +1644,6 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
        if (rq_is_meta(rq))
                cfqq->meta_pending++;
 
-       /*
-        * check if this request is a better next-serve candidate)) {
-        */
-       cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq);
-       BUG_ON(!cfqq->next_rq);
-
        /*
         * we never wait for an async request and we don't allow preemption
         * of an async request. so just return early
index 0ae8b93..589b98b 100644 (file)
@@ -758,7 +758,8 @@ static void acpi_thermal_check(void *data)
                        del_timer(&(tz->timer));
        } else {
                if (timer_pending(&(tz->timer)))
-                       mod_timer(&(tz->timer), (HZ * sleep_time) / 1000);
+                       mod_timer(&(tz->timer),
+                                       jiffies + (HZ * sleep_time) / 1000);
                else {
                        tz->timer.data = (unsigned long)tz;
                        tz->timer.function = acpi_thermal_run;
index f482078..8dc3bc4 100644 (file)
@@ -878,6 +878,7 @@ static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
        struct ata_port_info *port;
        struct pci_dev *host = NULL;
        struct sis_chipset *chipset = NULL;
+       struct sis_chipset *sets;
 
        static struct sis_chipset sis_chipsets[] = {
 
@@ -932,10 +933,11 @@ static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
 
        /* We have to find the bridge first */
 
-       for (chipset = &sis_chipsets[0]; chipset->device; chipset++) {
-               host = pci_get_device(PCI_VENDOR_ID_SI, chipset->device, NULL);
+       for (sets = &sis_chipsets[0]; sets->device; sets++) {
+               host = pci_get_device(PCI_VENDOR_ID_SI, sets->device, NULL);
                if (host != NULL) {
-                       if (chipset->device == 0x630) { /* SIS630 */
+                       chipset = sets;                 /* Match found */
+                       if (sets->device == 0x630) {    /* SIS630 */
                                u8 host_rev;
                                pci_read_config_byte(host, PCI_REVISION_ID, &host_rev);
                                if (host_rev >= 0x30)   /* 630 ET */
@@ -946,7 +948,7 @@ static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
        }
 
        /* Look for concealed bridges */
-       if (host == NULL) {
+       if (chipset == NULL) {
                /* Second check */
                u32 idemisc;
                u16 trueid;
index 3c372e0..59651ab 100644 (file)
@@ -821,7 +821,7 @@ static inline void fill_rx_pool (amb_dev * dev, unsigned char pool,
     }
     // cast needed as there is no %? for pointer differences
     PRINTD (DBG_SKB, "allocated skb at %p, head %p, area %li",
-           skb, skb->head, (long) (skb->end - skb->head));
+           skb, skb->head, (long) (skb_end_pointer(skb) - skb->head));
     rx.handle = virt_to_bus (skb);
     rx.host_address = cpu_to_be32 (virt_to_bus (skb->data));
     if (rx_give (dev, &rx, pool))
index fc518d8..02ad83d 100644 (file)
@@ -221,7 +221,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
        hdr->vpi = htons(vcc->vpi);
        hdr->vci = htons(vcc->vci);
        hdr->length = htonl(skb->len);
-       memcpy(skb_put(new_skb,skb->len),skb->data,skb->len);
+       skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
        if (vcc->pop) vcc->pop(vcc,skb);
        else dev_kfree_skb(skb);
        out_vcc->push(out_vcc,new_skb);
@@ -310,7 +310,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
                goto done;
        }
        __net_timestamp(new_skb);
-       memcpy(skb_put(new_skb,skb->len),skb->data,skb->len);
+       skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
        out_vcc->push(out_vcc,new_skb);
        atomic_inc(&vcc->stats->tx);
        atomic_inc(&out_vcc->stats->rx);
@@ -352,7 +352,7 @@ static struct atm_dev atmtcp_control_dev = {
        .ops            = &atmtcp_c_dev_ops,
        .type           = "atmtcp",
        .number         = 999,
-       .lock           = SPIN_LOCK_UNLOCKED
+       .lock           = __SPIN_LOCK_UNLOCKED(atmtcp_control_dev.lock)
 };
 
 
index 8fccf01..0d3a38b 100644 (file)
@@ -536,7 +536,7 @@ static int rx_aal0(struct atm_vcc *vcc)
                return 0;
        }
        skb_put(skb,length);
-       skb_set_timestamp(skb, &eni_vcc->timestamp);
+       skb->tstamp = eni_vcc->timestamp;
        DPRINTK("got len %ld\n",length);
        if (do_rx_dma(vcc,skb,1,length >> 2,length >> 2)) return 1;
        eni_vcc->rxing++;
@@ -701,7 +701,7 @@ static void get_service(struct atm_dev *dev)
                        DPRINTK("Grr, servicing VCC %ld twice\n",vci);
                        continue;
                }
-               do_gettimeofday(&ENI_VCC(vcc)->timestamp);
+               ENI_VCC(vcc)->timestamp = ktime_get_real();
                ENI_VCC(vcc)->next = NULL;
                if (vcc->qos.rxtp.traffic_class == ATM_CBR) {
                        if (eni_dev->fast)
index 385090c..d04fefb 100644 (file)
@@ -59,7 +59,7 @@ struct eni_vcc {
        int rxing;                      /* number of pending PDUs */
        int servicing;                  /* number of waiting VCs (0 or 1) */
        int txing;                      /* number of pending TX bytes */
-       struct timeval timestamp;       /* for RX timing */
+       ktime_t timestamp;              /* for RX timing */
        struct atm_vcc *next;           /* next pending RX */
        struct sk_buff *last;           /* last PDU being DMAed (used to carry
                                           discard information) */
index a7c0ed3..405ee5e 100644 (file)
@@ -1,6 +1,4 @@
 /*
-  $Id: fore200e.c,v 1.5 2000/04/14 10:10:34 davem Exp $
-
   A FORE Systems 200E-series driver for ATM on Linux.
   Christophe Lizzi (lizzi@cnam.fr), October 1999-March 2003.
 
@@ -1502,9 +1500,9 @@ fore200e_open(struct atm_vcc *vcc)
     /* pseudo-CBR bandwidth requested? */
     if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) {
        
-       down(&fore200e->rate_sf);
+       mutex_lock(&fore200e->rate_mtx);
        if (fore200e->available_cell_rate < vcc->qos.txtp.max_pcr) {
-           up(&fore200e->rate_sf);
+           mutex_unlock(&fore200e->rate_mtx);
 
            kfree(fore200e_vcc);
            vc_map->vcc = NULL;
@@ -1513,7 +1511,7 @@ fore200e_open(struct atm_vcc *vcc)
 
        /* reserve bandwidth */
        fore200e->available_cell_rate -= vcc->qos.txtp.max_pcr;
-       up(&fore200e->rate_sf);
+       mutex_unlock(&fore200e->rate_mtx);
     }
     
     vcc->itf = vcc->dev->number;
@@ -1599,9 +1597,9 @@ fore200e_close(struct atm_vcc* vcc)
     /* release reserved bandwidth, if any */
     if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) {
 
-       down(&fore200e->rate_sf);
+       mutex_lock(&fore200e->rate_mtx);
        fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
-       up(&fore200e->rate_sf);
+       mutex_unlock(&fore200e->rate_mtx);
 
        clear_bit(ATM_VF_HASQOS, &vcc->flags);
     }
@@ -2064,16 +2062,16 @@ fore200e_change_qos(struct atm_vcc* vcc,struct atm_qos* qos, int flags)
 
     if ((qos->txtp.traffic_class == ATM_CBR) && (qos->txtp.max_pcr > 0)) {
 
-       down(&fore200e->rate_sf);
+       mutex_lock(&fore200e->rate_mtx);
        if (fore200e->available_cell_rate + vcc->qos.txtp.max_pcr < qos->txtp.max_pcr) {
-           up(&fore200e->rate_sf);
+           mutex_unlock(&fore200e->rate_mtx);
            return -EAGAIN;
        }
 
        fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
        fore200e->available_cell_rate -= qos->txtp.max_pcr;
 
-       up(&fore200e->rate_sf);
+       mutex_unlock(&fore200e->rate_mtx);
        
        memcpy(&vcc->qos, qos, sizeof(struct atm_qos));
        
@@ -2459,7 +2457,7 @@ fore200e_initialize(struct fore200e* fore200e)
 
     DPRINTK(2, "device %s being initialized\n", fore200e->name);
 
-    init_MUTEX(&fore200e->rate_sf);
+    mutex_init(&fore200e->rate_mtx);
     spin_lock_init(&fore200e->q_lock);
 
     cpq = fore200e->cp_queues = fore200e->virt_base + FORE200E_CP_QUEUES_OFFSET;
index f9abfda..b85a546 100644 (file)
@@ -869,7 +869,7 @@ typedef struct fore200e {
 
     struct stats*              stats;                  /* last snapshot of the stats         */
     
-    struct semaphore           rate_sf;                /* protects rate reservation ops      */
+    struct mutex               rate_mtx;               /* protects rate reservation ops      */
     spinlock_t                 q_lock;                 /* protects queue ops                 */
 #ifdef FORE200E_USE_TASKLET
     struct tasklet_struct      tx_tasklet;             /* performs tx interrupt work         */
index 8510026..d33aba6 100644 (file)
@@ -1901,13 +1901,13 @@ he_service_rbrq(struct he_dev *he_dev, int group)
                        case ATM_AAL0:
                                /* 2.10.1.5 raw cell receive */
                                skb->len = ATM_AAL0_SDU;
-                               skb->tail = skb->data + skb->len;
+                               skb_set_tail_pointer(skb, skb->len);
                                break;
                        case ATM_AAL5:
                                /* 2.10.1.2 aal5 receive */
 
                                skb->len = AAL5_LEN(skb->data, he_vcc->pdu_len);
-                               skb->tail = skb->data + skb->len;
+                               skb_set_tail_pointer(skb, skb->len);
 #ifdef USE_CHECKSUM_HW
                                if (vcc->vpi == 0 && vcc->vci >= ATM_NOT_RSV_VCI) {
                                        skb->ip_summed = CHECKSUM_COMPLETE;
index b4b8014..057efbc 100644 (file)
@@ -1065,7 +1065,8 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
        vcc = vc->rx_vcc;
 
        pci_dma_sync_single_for_cpu(card->pcidev, IDT77252_PRV_PADDR(skb),
-                                   skb->end - skb->data, PCI_DMA_FROMDEVICE);
+                                   skb_end_pointer(skb) - skb->data,
+                                   PCI_DMA_FROMDEVICE);
 
        if ((vcc->qos.aal == ATM_AAL0) ||
            (vcc->qos.aal == ATM_AAL34)) {
@@ -1194,7 +1195,8 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
                }
 
                pci_unmap_single(card->pcidev, IDT77252_PRV_PADDR(skb),
-                                skb->end - skb->data, PCI_DMA_FROMDEVICE);
+                                skb_end_pointer(skb) - skb->data,
+                                PCI_DMA_FROMDEVICE);
                sb_pool_remove(card, skb);
 
                skb_trim(skb, len);
@@ -1267,7 +1269,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
        tail = readl(SAR_REG_RAWCT);
 
        pci_dma_sync_single_for_cpu(card->pcidev, IDT77252_PRV_PADDR(queue),
-                                   queue->end - queue->head - 16,
+                                   skb_end_pointer(queue) - queue->head - 16,
                                    PCI_DMA_FROMDEVICE);
 
        while (head != tail) {
@@ -1363,7 +1365,8 @@ drop:
                                queue = card->raw_cell_head;
                                pci_dma_sync_single_for_cpu(card->pcidev,
                                                            IDT77252_PRV_PADDR(queue),
-                                                           queue->end - queue->data,
+                                                           (skb_end_pointer(queue) -
+                                                            queue->data),
                                                            PCI_DMA_FROMDEVICE);
                        } else {
                                card->raw_cell_head = NULL;
@@ -1816,7 +1819,8 @@ push_rx_skb(struct idt77252_dev *card, struct sk_buff *skb, int queue)
        u32 handle;
        u32 addr;
 
-       skb->data = skb->tail = skb->head;
+       skb->data = skb->head;
+       skb_reset_tail_pointer(skb);
        skb->len = 0;
 
        skb_reserve(skb, 16);
@@ -1835,7 +1839,6 @@ push_rx_skb(struct idt77252_dev *card, struct sk_buff *skb, int queue)
                skb_put(skb, SAR_FB_SIZE_3);
                break;
        default:
-               dev_kfree_skb(skb);
                return -1;
        }
 
@@ -1874,7 +1877,7 @@ add_rx_skb(struct idt77252_dev *card, int queue,
                }
 
                paddr = pci_map_single(card->pcidev, skb->data,
-                                      skb->end - skb->data,
+                                      skb_end_pointer(skb) - skb->data,
                                       PCI_DMA_FROMDEVICE);
                IDT77252_PRV_PADDR(skb) = paddr;
 
@@ -1888,7 +1891,7 @@ add_rx_skb(struct idt77252_dev *card, int queue,
 
 outunmap:
        pci_unmap_single(card->pcidev, IDT77252_PRV_PADDR(skb),
-                        skb->end - skb->data, PCI_DMA_FROMDEVICE);
+                        skb_end_pointer(skb) - skb->data, PCI_DMA_FROMDEVICE);
 
        handle = IDT77252_PRV_POOL(skb);
        card->sbpool[POOL_QUEUE(handle)].skb[POOL_INDEX(handle)] = NULL;
@@ -1905,12 +1908,14 @@ recycle_rx_skb(struct idt77252_dev *card, struct sk_buff *skb)
        int err;
 
        pci_dma_sync_single_for_device(card->pcidev, IDT77252_PRV_PADDR(skb),
-                                      skb->end - skb->data, PCI_DMA_FROMDEVICE);
+                                      skb_end_pointer(skb) - skb->data,
+                                      PCI_DMA_FROMDEVICE);
 
        err = push_rx_skb(card, skb, POOL_QUEUE(handle));
        if (err) {
                pci_unmap_single(card->pcidev, IDT77252_PRV_PADDR(skb),
-                                skb->end - skb->data, PCI_DMA_FROMDEVICE);
+                                skb_end_pointer(skb) - skb->data,
+                                PCI_DMA_FROMDEVICE);
                sb_pool_remove(card, skb);
                dev_kfree_skb(skb);
        }
@@ -3122,7 +3127,8 @@ deinit_card(struct idt77252_dev *card)
                        if (skb) {
                                pci_unmap_single(card->pcidev,
                                                 IDT77252_PRV_PADDR(skb),
-                                                skb->end - skb->data,
+                                                (skb_end_pointer(skb) -
+                                                 skb->data),
                                                 PCI_DMA_FROMDEVICE);
                                card->sbpool[i].skb[j] = NULL;
                                dev_kfree_skb(skb);
index aab9b37..14ced85 100644 (file)
@@ -2208,7 +2208,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
          if (i == 1 && ns_rsqe_eopdu(rsqe))
             *((u32 *) sb->data) |= 0x00000002;
          skb_put(sb, NS_AAL0_HEADER);
-         memcpy(sb->tail, cell, ATM_CELL_PAYLOAD);
+         memcpy(skb_tail_pointer(sb), cell, ATM_CELL_PAYLOAD);
          skb_put(sb, ATM_CELL_PAYLOAD);
          ATM_SKB(sb)->vcc = vcc;
         __net_timestamp(sb);
@@ -2252,7 +2252,8 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
       vc->rx_iov = iovb;
       NS_SKB(iovb)->iovcnt = 0;
       iovb->len = 0;
-      iovb->tail = iovb->data = iovb->head;
+      iovb->data = iovb->head;
+      skb_reset_tail_pointer(iovb);
       NS_SKB(iovb)->vcc = vcc;
       /* IMPORTANT: a pointer to the sk_buff containing the small or large
                     buffer is stored as iovec base, NOT a pointer to the 
@@ -2265,7 +2266,8 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
       recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_MAX_IOVECS);
       NS_SKB(iovb)->iovcnt = 0;
       iovb->len = 0;
-      iovb->tail = iovb->data = iovb->head;
+      iovb->data = iovb->head;
+      skb_reset_tail_pointer(iovb);
       NS_SKB(iovb)->vcc = vcc;
    }
    iov = &((struct iovec *) iovb->data)[NS_SKB(iovb)->iovcnt++];
@@ -2393,7 +2395,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
                skb->destructor = ns_lb_destructor;
 #endif /* NS_USE_DESTRUCTORS */
                skb_push(skb, NS_SMBUFSIZE);
-               memcpy(skb->data, sb->data, NS_SMBUFSIZE);
+               skb_copy_from_linear_data(sb, skb->data, NS_SMBUFSIZE);
                skb_put(skb, len - NS_SMBUFSIZE);
                ATM_SKB(skb)->vcc = vcc;
               __net_timestamp(skb);
@@ -2477,7 +2479,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
         {
             /* Copy the small buffer to the huge buffer */
             sb = (struct sk_buff *) iov->iov_base;
-            memcpy(hb->data, sb->data, iov->iov_len);
+            skb_copy_from_linear_data(sb, hb->data, iov->iov_len);
             skb_put(hb, iov->iov_len);
             remaining = len - iov->iov_len;
             iov++;
@@ -2489,7 +2491,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
             {
                lb = (struct sk_buff *) iov->iov_base;
                tocopy = min_t(int, remaining, iov->iov_len);
-               memcpy(hb->tail, lb->data, tocopy);
+               skb_copy_from_linear_data(lb, skb_tail_pointer(hb), tocopy);
                skb_put(hb, tocopy);
                iov++;
                remaining -= tocopy;
index 2308e83..1d84668 100644 (file)
@@ -48,6 +48,15 @@ struct aoe_hdr {
        __be32 tag;
 };
 
+#ifdef __KERNEL__
+#include <linux/skbuff.h>
+
+static inline struct aoe_hdr *aoe_hdr(const struct sk_buff *skb)
+{
+       return (struct aoe_hdr *)skb_mac_header(skb);
+}
+#endif
+
 struct aoe_atahdr {
        unsigned char aflags;
        unsigned char errfeat;
index 8d17d8d..1a6aeac 100644 (file)
@@ -27,7 +27,8 @@ new_skb(ulong len)
 
        skb = alloc_skb(len, GFP_ATOMIC);
        if (skb) {
-               skb->nh.raw = skb->mac.raw = skb->data;
+               skb_reset_mac_header(skb);
+               skb_reset_network_header(skb);
                skb->protocol = __constant_htons(ETH_P_AOE);
                skb->priority = 0;
                skb->next = skb->prev = NULL;
@@ -118,7 +119,7 @@ aoecmd_ata_rw(struct aoedev *d, struct frame *f)
 
        /* initialize the headers & frame */
        skb = f->skb;
-       h = (struct aoe_hdr *) skb->mac.raw;
+       h = aoe_hdr(skb);
        ah = (struct aoe_atahdr *) (h+1);
        skb_put(skb, sizeof *h + sizeof *ah);
        memset(h, 0, skb->len);
@@ -207,7 +208,7 @@ aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff **tail)
                skb->dev = ifp;
                if (sl_tail == NULL)
                        sl_tail = skb;
-               h = (struct aoe_hdr *) skb->mac.raw;
+               h = aoe_hdr(skb);
                memset(h, 0, sizeof *h + sizeof *ch);
 
                memset(h->dst, 0xff, sizeof h->dst);
@@ -300,7 +301,7 @@ rexmit(struct aoedev *d, struct frame *f)
        aoechr_error(buf);
 
        skb = f->skb;
-       h = (struct aoe_hdr *) skb->mac.raw;
+       h = aoe_hdr(skb);
        ah = (struct aoe_atahdr *) (h+1);
        f->tag = n;
        h->tag = cpu_to_be32(n);
@@ -529,7 +530,7 @@ aoecmd_ata_rsp(struct sk_buff *skb)
        char ebuf[128];
        u16 aoemajor;
 
-       hin = (struct aoe_hdr *) skb->mac.raw;
+       hin = aoe_hdr(skb);
        aoemajor = be16_to_cpu(get_unaligned(&hin->major));
        d = aoedev_by_aoeaddr(aoemajor, hin->minor);
        if (d == NULL) {
@@ -561,7 +562,7 @@ aoecmd_ata_rsp(struct sk_buff *skb)
        calc_rttavg(d, tsince(f->tag));
 
        ahin = (struct aoe_atahdr *) (hin+1);
-       hout = (struct aoe_hdr *) f->skb->mac.raw;
+       hout = aoe_hdr(f->skb);
        ahout = (struct aoe_atahdr *) (hout+1);
        buf = f->buf;
 
@@ -695,7 +696,7 @@ aoecmd_ata_id(struct aoedev *d)
 
        /* initialize the headers & frame */
        skb = f->skb;
-       h = (struct aoe_hdr *) skb->mac.raw;
+       h = aoe_hdr(skb);
        ah = (struct aoe_atahdr *) (h+1);
        skb_put(skb, sizeof *h + sizeof *ah);
        memset(h, 0, skb->len);
@@ -726,7 +727,7 @@ aoecmd_cfg_rsp(struct sk_buff *skb)
        enum { MAXFRAMES = 16 };
        u16 n;
 
-       h = (struct aoe_hdr *) skb->mac.raw;
+       h = aoe_hdr(skb);
        ch = (struct aoe_cfghdr *) (h+1);
 
        /*
index aab6d91..f9ddfda 100644 (file)
@@ -123,7 +123,7 @@ aoenet_rcv(struct sk_buff *skb, struct net_device *ifp, struct packet_type *pt,
                goto exit;
        skb_push(skb, ETH_HLEN);        /* (1) */
 
-       h = (struct aoe_hdr *) skb->mac.raw;
+       h = aoe_hdr(skb);
        n = be32_to_cpu(get_unaligned(&h->tag));
        if ((h->verfl & AOEFL_RSP) == 0 || (n & 1<<31))
                goto exit;
index c852eed..1eeb8f2 100644 (file)
@@ -140,7 +140,7 @@ enum {D_PRT, D_PRO, D_UNI, D_MOD, D_SLV, D_DLY};
 #include <linux/blkdev.h>
 #include <asm/uaccess.h>
 
-static spinlock_t pcd_lock;
+static DEFINE_SPINLOCK(pcd_lock);
 
 module_param(verbose, bool, 0644);
 module_param(major, int, 0);
index 7cdaa19..5826508 100644 (file)
@@ -154,7 +154,7 @@ enum {D_PRT, D_PRO, D_UNI, D_MOD, D_SLV, D_LUN, D_DLY};
 #include <linux/blkpg.h>
 #include <asm/uaccess.h>
 
-static spinlock_t pf_spin_lock;
+static DEFINE_SPINLOCK(pf_spin_lock);
 
 module_param(verbose, bool, 0644);
 module_param(major, int, 0);
index a4fb703..f1b9dd7 100644 (file)
@@ -777,7 +777,8 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *
                rq->cmd_flags |= REQ_QUIET;
 
        blk_execute_rq(rq->q, pd->bdev->bd_disk, rq, 0);
-       ret = rq->errors;
+       if (rq->errors)
+               ret = -EIO;
 out:
        blk_put_request(rq);
        return ret;
index 4c766f3..b990805 100644 (file)
@@ -527,7 +527,7 @@ static int bfusb_send_frame(struct sk_buff *skb)
                buf[2] = (size == BFUSB_MAX_BLOCK_SIZE) ? 0 : size;
 
                memcpy(skb_put(nskb, 3), buf, 3);
-               memcpy(skb_put(nskb, size), skb->data + sent, size);
+               skb_copy_from_linear_data_offset(skb, sent, skb_put(nskb, size), size);
 
                sent  += size;
                count -= size;
index acfb6a4..851de4d 100644 (file)
@@ -461,20 +461,20 @@ static void bluecard_receive(bluecard_info_t *info, unsigned int offset)
                                switch (info->rx_state) {
 
                                case RECV_WAIT_EVENT_HEADER:
-                                       eh = (struct hci_event_hdr *)(info->rx_skb->data);
+                                       eh = hci_event_hdr(info->rx_skb);
                                        info->rx_state = RECV_WAIT_DATA;
                                        info->rx_count = eh->plen;
                                        break;
 
                                case RECV_WAIT_ACL_HEADER:
-                                       ah = (struct hci_acl_hdr *)(info->rx_skb->data);
+                                       ah = hci_acl_hdr(info->rx_skb);
                                        dlen = __le16_to_cpu(ah->dlen);
                                        info->rx_state = RECV_WAIT_DATA;
                                        info->rx_count = dlen;
                                        break;
 
                                case RECV_WAIT_SCO_HEADER:
-                                       sh = (struct hci_sco_hdr *)(info->rx_skb->data);
+                                       sh = hci_sco_hdr(info->rx_skb);
                                        info->rx_state = RECV_WAIT_DATA;
                                        info->rx_count = sh->dlen;
                                        break;
index 9fca651..e8ebd5d 100644 (file)
@@ -231,7 +231,7 @@ static void bpa10x_wakeup(struct bpa10x_data *data)
                cr = (struct usb_ctrlrequest *) urb->setup_packet;
                cr->wLength = __cpu_to_le16(skb->len);
 
-               memcpy(urb->transfer_buffer, skb->data, skb->len);
+               skb_copy_from_linear_data(skb, urb->transfer_buffer, skb->len);
                urb->transfer_buffer_length = skb->len;
 
                err = usb_submit_urb(urb, GFP_ATOMIC);
@@ -250,7 +250,7 @@ static void bpa10x_wakeup(struct bpa10x_data *data)
                skb = skb_dequeue(&data->tx_queue);
 
        if (skb) {
-               memcpy(urb->transfer_buffer, skb->data, skb->len);
+               skb_copy_from_linear_data(skb, urb->transfer_buffer, skb->len);
                urb->transfer_buffer_length = skb->len;
 
                err = usb_submit_urb(urb, GFP_ATOMIC);
index 18b0f39..3951607 100644 (file)
@@ -303,20 +303,20 @@ static void bt3c_receive(bt3c_info_t *info)
                                switch (info->rx_state) {
 
                                case RECV_WAIT_EVENT_HEADER:
-                                       eh = (struct hci_event_hdr *)(info->rx_skb->data);
+                                       eh = hci_event_hdr(info->rx_skb);
                                        info->rx_state = RECV_WAIT_DATA;
                                        info->rx_count = eh->plen;
                                        break;
 
                                case RECV_WAIT_ACL_HEADER:
-                                       ah = (struct hci_acl_hdr *)(info->rx_skb->data);
+                                       ah = hci_acl_hdr(info->rx_skb);
                                        dlen = __le16_to_cpu(ah->dlen);
                                        info->rx_state = RECV_WAIT_DATA;
                                        info->rx_count = dlen;
                                        break;
 
                                case RECV_WAIT_SCO_HEADER:
-                                       sh = (struct hci_sco_hdr *)(info->rx_skb->data);
+                                       sh = hci_sco_hdr(info->rx_skb);
                                        info->rx_state = RECV_WAIT_DATA;
                                        info->rx_count = sh->dlen;
                                        break;
index c1bce75..d7d2ea0 100644 (file)
@@ -250,20 +250,20 @@ static void btuart_receive(btuart_info_t *info)
                                switch (info->rx_state) {
 
                                case RECV_WAIT_EVENT_HEADER:
-                                       eh = (struct hci_event_hdr *)(info->rx_skb->data);
+                                       eh = hci_event_hdr(info->rx_skb);
                                        info->rx_state = RECV_WAIT_DATA;
                                        info->rx_count = eh->plen;
                                        break;
 
                                case RECV_WAIT_ACL_HEADER:
-                                       ah = (struct hci_acl_hdr *)(info->rx_skb->data);
+                                       ah = hci_acl_hdr(info->rx_skb);
                                        dlen = __le16_to_cpu(ah->dlen);
                                        info->rx_state = RECV_WAIT_DATA;
                                        info->rx_count = dlen;
                                        break;
 
                                case RECV_WAIT_SCO_HEADER:
-                                       sh = (struct hci_sco_hdr *)(info->rx_skb->data);
+                                       sh = hci_sco_hdr(info->rx_skb);
                                        info->rx_state = RECV_WAIT_DATA;
                                        info->rx_count = sh->dlen;
                                        break;
index 459aa97..7f9c54b 100644 (file)
@@ -425,7 +425,7 @@ static int dtl1_hci_send_frame(struct sk_buff *skb)
                return -ENOMEM;
 
        skb_reserve(s, NSHL);
-       memcpy(skb_put(s, skb->len), skb->data, skb->len);
+       skb_copy_from_linear_data(skb, skb_put(s, skb->len), skb->len);
        if (skb->len & 0x0001)
                *skb_put(s, 1) = 0;     /* PAD */
 
index 34f0afc..bfbae14 100644 (file)
@@ -188,7 +188,7 @@ static int h4_recv(struct hci_uart *hu, void *data, int count)
                                continue;
 
                        case H4_W4_EVENT_HDR:
-                               eh = (struct hci_event_hdr *) h4->rx_skb->data;
+                               eh = hci_event_hdr(h4->rx_skb);
 
                                BT_DBG("Event header: evt 0x%2.2x plen %d", eh->evt, eh->plen);
 
@@ -196,7 +196,7 @@ static int h4_recv(struct hci_uart *hu, void *data, int count)
                                continue;
 
                        case H4_W4_ACL_HDR:
-                               ah = (struct hci_acl_hdr *) h4->rx_skb->data;
+                               ah = hci_acl_hdr(h4->rx_skb);
                                dlen = __le16_to_cpu(ah->dlen);
 
                                BT_DBG("ACL header: dlen %d", dlen);
@@ -205,7 +205,7 @@ static int h4_recv(struct hci_uart *hu, void *data, int count)
                                continue;
 
                        case H4_W4_SCO_HDR:
-                               sh = (struct hci_sco_hdr *) h4->rx_skb->data;
+                               sh = hci_sco_hdr(h4->rx_skb);
 
                                BT_DBG("SCO header: dlen %d", sh->dlen);
 
index a61fb6d..80a0115 100644 (file)
@@ -1338,43 +1338,23 @@ static int mxser_ioctl(struct tty_struct *tty, struct file *file, unsigned int c
                 *   (use |'ed TIOCM_RNG/DSR/CD/CTS for masking)
                 * Caller should use TIOCGICOUNT to see which one it was
                 */
-       case TIOCMIWAIT: {
-                       DECLARE_WAITQUEUE(wait, current);
-                       int ret;
+       case TIOCMIWAIT:
+               spin_lock_irqsave(&info->slock, flags);
+               cnow = info->icount;    /* note the counters on entry */
+               spin_unlock_irqrestore(&info->slock, flags);
+
+               wait_event_interruptible(info->delta_msr_wait, ({
+                       cprev = cnow;
                        spin_lock_irqsave(&info->slock, flags);
-                       cprev = info->icount;   /* note the counters on entry */
+                       cnow = info->icount;    /* atomic copy */
                        spin_unlock_irqrestore(&info->slock, flags);
 
-                       add_wait_queue(&info->delta_msr_wait, &wait);
-                       while (1) {
-                               spin_lock_irqsave(&info->slock, flags);
-                               cnow = info->icount;    /* atomic copy */
-                               spin_unlock_irqrestore(&info->slock, flags);
-
-                               set_current_state(TASK_INTERRUPTIBLE);
-                               if (((arg & TIOCM_RNG) &&
-                                               (cnow.rng != cprev.rng)) ||
-                                               ((arg & TIOCM_DSR) &&
-                                               (cnow.dsr != cprev.dsr)) ||
-                                               ((arg & TIOCM_CD) &&
-                                               (cnow.dcd != cprev.dcd)) ||
-                                               ((arg & TIOCM_CTS) &&
-                                               (cnow.cts != cprev.cts))) {
-                                       ret = 0;
-                                       break;
-                               }
-                               /* see if a signal did it */
-                               if (signal_pending(current)) {
-                                       ret = -ERESTARTSYS;
-                                       break;
-                               }
-                               cprev = cnow;
-                       }
-                       current->state = TASK_RUNNING;
-                       remove_wait_queue(&info->delta_msr_wait, &wait);
-                       break;
-               }
-               /* NOTREACHED */
+                       ((arg & TIOCM_RNG) && (cnow.rng != cprev.rng)) ||
+                       ((arg & TIOCM_DSR) && (cnow.dsr != cprev.dsr)) ||
+                       ((arg & TIOCM_CD)  && (cnow.dcd != cprev.dcd)) ||
+                       ((arg & TIOCM_CTS) && (cnow.cts != cprev.cts));
+               }));
+               break;
                /*
                 * Get counter of input serial line interrupts (DCD,RI,DSR,CTS)
                 * Return: write counters to the user passed counter struct
index 9af07e4..f7603b6 100644 (file)
@@ -1758,43 +1758,23 @@ static int mxser_ioctl(struct tty_struct *tty, struct file *file,
                 *   (use |'ed TIOCM_RNG/DSR/CD/CTS for masking)
                 * Caller should use TIOCGICOUNT to see which one it was
                 */
-       case TIOCMIWAIT: {
-               DECLARE_WAITQUEUE(wait, current);
-               int ret;
+       case TIOCMIWAIT:
                spin_lock_irqsave(&info->slock, flags);
-               cprev = info->icount;   /* note the counters on entry */
+               cnow = info->icount;    /* note the counters on entry */
                spin_unlock_irqrestore(&info->slock, flags);
 
-               add_wait_queue(&info->delta_msr_wait, &wait);
-               while (1) {
+               wait_event_interruptible(info->delta_msr_wait, ({
+                       cprev = cnow;
                        spin_lock_irqsave(&info->slock, flags);
                        cnow = info->icount;    /* atomic copy */
                        spin_unlock_irqrestore(&info->slock, flags);
 
-                       set_current_state(TASK_INTERRUPTIBLE);
-                       if (((arg & TIOCM_RNG) &&
-                                       (cnow.rng != cprev.rng)) ||
-                                       ((arg & TIOCM_DSR) &&
-                                       (cnow.dsr != cprev.dsr)) ||
-                                       ((arg & TIOCM_CD) &&
-                                       (cnow.dcd != cprev.dcd)) ||
-                                       ((arg & TIOCM_CTS) &&
-                                       (cnow.cts != cprev.cts))) {
-                               ret = 0;
-                               break;
-                       }
-                       /* see if a signal did it */
-                       if (signal_pending(current)) {
-                               ret = -ERESTARTSYS;
-                               break;
-                       }
-                       cprev = cnow;
-               }
-               current->state = TASK_RUNNING;
-               remove_wait_queue(&info->delta_msr_wait, &wait);
+                       ((arg & TIOCM_RNG) && (cnow.rng != cprev.rng)) ||
+                       ((arg & TIOCM_DSR) && (cnow.dsr != cprev.dsr)) ||
+                       ((arg & TIOCM_CD)  && (cnow.dcd != cprev.dcd)) ||
+                       ((arg & TIOCM_CTS) && (cnow.cts != cprev.cts));
+               }));
                break;
-       }
-       /* NOTREACHED */
        /*
         * Get counter of input serial line interrupts (DCD,RI,DSR,CTS)
         * Return: write counters to the user passed counter struct
@@ -2230,7 +2210,14 @@ end_intr:
        port->mon_data.rxcnt += cnt;
        port->mon_data.up_rxcnt += cnt;
 
+       /*
+        * We are called from an interrupt context with &port->slock
+        * being held. Drop it temporarily in order to prevent
+        * recursive locking.
+        */
+       spin_unlock(&port->slock);
        tty_flip_buffer_push(tty);
+       spin_lock(&port->slock);
 }
 
 static void mxser_transmit_chars(struct mxser_port *port)
index 8d025e9..157b1d0 100644 (file)
@@ -4169,7 +4169,7 @@ static int hdlcdev_xmit(struct sk_buff *skb, struct net_device *dev)
        netif_stop_queue(dev);
 
        /* copy data to device buffers */
-       memcpy(info->tx_buf, skb->data, skb->len);
+       skb_copy_from_linear_data(skb, info->tx_buf, skb->len);
        info->tx_get = 0;
        info->tx_put = info->tx_count = skb->len;
 
index b9dc7aa..46c1b97 100644 (file)
@@ -881,15 +881,15 @@ EXPORT_SYMBOL(get_random_bytes);
  */
 static void init_std_data(struct entropy_store *r)
 {
-       struct timeval tv;
+       ktime_t now;
        unsigned long flags;
 
        spin_lock_irqsave(&r->lock, flags);
        r->entropy_count = 0;
        spin_unlock_irqrestore(&r->lock, flags);
 
-       do_gettimeofday(&tv);
-       add_entropy_words(r, (__u32 *)&tv, sizeof(tv)/4);
+       now = ktime_get_real();
+       add_entropy_words(r, (__u32 *)&now, sizeof(now)/4);
        add_entropy_words(r, (__u32 *)utsname(),
                          sizeof(*(utsname()))/4);
 }
@@ -911,14 +911,12 @@ void rand_initialize_irq(int irq)
                return;
 
        /*
-        * If kmalloc returns null, we just won't use that entropy
+        * If kzalloc returns null, we just won't use that entropy
         * source.
         */
-       state = kmalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
-       if (state) {
-               memset(state, 0, sizeof(struct timer_rand_state));
+       state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
+       if (state)
                irq_timer_state[irq] = state;
-       }
 }
 
 #ifdef CONFIG_BLOCK
@@ -927,14 +925,12 @@ void rand_initialize_disk(struct gendisk *disk)
        struct timer_rand_state *state;
 
        /*
-        * If kmalloc returns null, we just won't use that entropy
+        * If kzalloc returns null, we just won't use that entropy
         * source.
         */
-       state = kmalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
-       if (state) {
-               memset(state, 0, sizeof(struct timer_rand_state));
+       state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
+       if (state)
                disk->random = state;
-       }
 }
 #endif
 
@@ -1469,7 +1465,6 @@ late_initcall(seqgen_init);
 __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
                                   __be16 sport, __be16 dport)
 {
-       struct timeval tv;
        __u32 seq;
        __u32 hash[12];
        struct keydata *keyptr = get_keyptr();
@@ -1485,8 +1480,7 @@ __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
        seq = twothirdsMD4Transform((const __u32 *)daddr, hash) & HASH_MASK;
        seq += keyptr->count;
 
-       do_gettimeofday(&tv);
-       seq += tv.tv_usec + tv.tv_sec * 1000000;
+       seq += ktime_get_real().tv64;
 
        return seq;
 }
@@ -1521,7 +1515,6 @@ __u32 secure_ip_id(__be32 daddr)
 __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
                                 __be16 sport, __be16 dport)
 {
-       struct timeval tv;
        __u32 seq;
        __u32 hash[4];
        struct keydata *keyptr = get_keyptr();
@@ -1543,12 +1536,11 @@ __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
         *      As close as possible to RFC 793, which
         *      suggests using a 250 kHz clock.
         *      Further reading shows this assumes 2 Mb/s networks.
-        *      For 10 Mb/s Ethernet, a 1 MHz clock is appropriate.
+        *      For 10 Gb/s Ethernet, a 1 GHz clock is appropriate.
         *      That's funny, Linux has one built in!  Use it!
         *      (Networks are faster now - should this be increased?)
         */
-       do_gettimeofday(&tv);
-       seq += tv.tv_usec + tv.tv_sec * 1000000;
+       seq += ktime_get_real().tv64;
 #if 0
        printk("init_seq(%lx, %lx, %d, %d) = %d\n",
               saddr, daddr, sport, dport, seq);
@@ -1556,8 +1548,6 @@ __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
        return seq;
 }
 
-EXPORT_SYMBOL(secure_tcp_sequence_number);
-
 /* Generate secure starting point for ephemeral IPV4 transport port search */
 u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport)
 {
@@ -1598,7 +1588,6 @@ u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr, __be16
 u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
                                __be16 sport, __be16 dport)
 {
-       struct timeval tv;
        u64 seq;
        __u32 hash[4];
        struct keydata *keyptr = get_keyptr();
@@ -1611,8 +1600,7 @@ u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
        seq = half_md4_transform(hash, keyptr->secret);
        seq |= ((u64)keyptr->count) << (32 - HASH_BITS);
 
-       do_gettimeofday(&tv);
-       seq += tv.tv_usec + tv.tv_sec * 1000000;
+       seq += ktime_get_real().tv64;
        seq &= (1ull << 48) - 1;
 #if 0
        printk("dccp init_seq(%lx, %lx, %d, %d) = %d\n",
index a905f78..a7b9e9b 100644 (file)
@@ -212,7 +212,7 @@ static void cn_rx_skb(struct sk_buff *__skb)
        skb = skb_get(__skb);
 
        if (skb->len >= NLMSG_SPACE(0)) {
-               nlh = (struct nlmsghdr *)skb->data;
+               nlh = nlmsg_hdr(skb);
 
                if (nlh->nlmsg_len < sizeof(struct cn_msg) ||
                    skb->len < nlh->nlmsg_len ||
@@ -448,7 +448,7 @@ static int __devinit cn_init(void)
 
        dev->nls = netlink_kernel_create(NETLINK_CONNECTOR,
                                         CN_NETLINK_USERS + 0xf,
-                                        dev->input, THIS_MODULE);
+                                        dev->input, NULL, THIS_MODULE);
        if (!dev->nls)
                return -EIO;
 
index 01206eb..30a7640 100644 (file)
@@ -121,9 +121,9 @@ superio_exit(void)
  * ISA constants
  */
 
-#define REGION_ALIGNMENT       ~7
-#define REGION_OFFSET          5
-#define REGION_LENGTH          2
+#define IOREGION_ALIGNMENT     ~7
+#define IOREGION_OFFSET                5
+#define IOREGION_LENGTH                2
 #define ADDR_REG_OFFSET                5
 #define DATA_REG_OFFSET                6
 
@@ -1194,7 +1194,7 @@ static int w83627ehf_detect(struct i2c_adapter *adapter)
        u8 fan4pin, fan5pin;
        int i, err = 0;
 
-       if (!request_region(address + REGION_OFFSET, REGION_LENGTH,
+       if (!request_region(address + IOREGION_OFFSET, IOREGION_LENGTH,
                            w83627ehf_driver.driver.name)) {
                err = -EBUSY;
                goto exit;
@@ -1322,7 +1322,7 @@ exit_remove:
 exit_free:
        kfree(data);
 exit_release:
-       release_region(address + REGION_OFFSET, REGION_LENGTH);
+       release_region(address + IOREGION_OFFSET, IOREGION_LENGTH);
 exit:
        return err;
 }
@@ -1337,7 +1337,7 @@ static int w83627ehf_detach_client(struct i2c_client *client)
 
        if ((err = i2c_detach_client(client)))
                return err;
-       release_region(client->addr + REGION_OFFSET, REGION_LENGTH);
+       release_region(client->addr + IOREGION_OFFSET, IOREGION_LENGTH);
        kfree(data);
 
        return 0;
@@ -1380,7 +1380,7 @@ static int __init w83627ehf_find(int sioaddr, unsigned short *addr)
        superio_select(W83627EHF_LD_HWM);
        val = (superio_inb(SIO_REG_ADDR) << 8)
            | superio_inb(SIO_REG_ADDR + 1);
-       *addr = val & REGION_ALIGNMENT;
+       *addr = val & IOREGION_ALIGNMENT;
        if (*addr == 0) {
                superio_exit();
                return -ENODEV;
index ca2e4f8..5bdf64b 100644 (file)
@@ -57,6 +57,7 @@ if IDE
 config IDE_MAX_HWIFS
        int "Max IDE interfaces"
        depends on ALPHA || SUPERH || IA64 || EMBEDDED
+       range 1 10
        default 4
        help
          This is the maximum number of IDE hardware interfaces that will
index d4b753e..dd7ec37 100644 (file)
@@ -108,6 +108,7 @@ delkin_cb_remove (struct pci_dev *dev)
 
 static struct pci_device_id delkin_cb_pci_tbl[] __devinitdata = {
        { 0x1145, 0xf021, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+       { 0x1145, 0xf024, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
        { 0, },
 };
 MODULE_DEVICE_TABLE(pci, delkin_cb_pci_tbl);
index 60ecdc2..ab6fa27 100644 (file)
@@ -1,10 +1,10 @@
 /*
- * linux/drivers/ide/pci/hpt366.c              Version 1.01    Dec 23, 2006
+ * linux/drivers/ide/pci/hpt366.c              Version 1.02    Apr 18, 2007
  *
  * Copyright (C) 1999-2003             Andre Hedrick <andre@linux-ide.org>
  * Portions Copyright (C) 2001         Sun Microsystems, Inc.
  * Portions Copyright (C) 2003         Red Hat Inc
- * Portions Copyright (C) 2005-2006    MontaVista Software, Inc.
+ * Portions Copyright (C) 2005-2007    MontaVista Software, Inc.
  *
  * Thanks to HighPoint Technologies for their assistance, and hardware.
  * Special Thanks to Jon Burchmore in SanDiego for the deep pockets, his
@@ -494,6 +494,7 @@ static struct hpt_info hpt302n __devinitdata = {
        .chip_type      = HPT302N,
        .max_mode       = HPT302_ALLOW_ATA133_6 ? 4 : 3,
        .dpll_clk       = 77,
+       .settings       = hpt37x_settings
 };
 
 static struct hpt_info hpt371n __devinitdata = {
index 03e44b3..a364003 100644 (file)
@@ -834,7 +834,7 @@ static inline u16 ether1394_type_trans(struct sk_buff *skb,
        struct eth1394hdr *eth;
        unsigned char *rawp;
 
-       skb->mac.raw = skb->data;
+       skb_reset_mac_header(skb);
        skb_pull (skb, ETH1394_HLEN);
        eth = eth1394_hdr(skb);
 
@@ -1668,7 +1668,7 @@ static int ether1394_tx (struct sk_buff *skb, struct net_device *dev)
        if (memcmp(eth->h_dest, dev->broadcast, ETH1394_ALEN) == 0 ||
            proto == htons(ETH_P_ARP) ||
            (proto == htons(ETH_P_IP) &&
-            IN_MULTICAST(ntohl(skb->nh.iph->daddr)))) {
+            IN_MULTICAST(ntohl(ip_hdr(skb)->daddr)))) {
                tx_type = ETH1394_GASP;
                dest_node = LOCAL_BUS | ALL_NODES;
                max_payload = priv->bc_maxpayload - ETHER1394_GASP_OVERHEAD;
index c45cbff..1e83565 100644 (file)
@@ -90,7 +90,7 @@ struct eth1394hdr {
 
 static inline struct eth1394hdr *eth1394_hdr(const struct sk_buff *skb)
 {
-       return (struct eth1394hdr *)skb->mac.raw;
+       return (struct eth1394hdr *)skb_mac_header(skb);
 }
 #endif
 
index 59243d9..58bc272 100644 (file)
@@ -439,7 +439,8 @@ static void c2_rx_error(struct c2_port *c2_port, struct c2_element *elem)
        }
 
        /* Setup the skb for reuse since we're dropping this pkt */
-       elem->skb->tail = elem->skb->data = elem->skb->head;
+       elem->skb->data = elem->skb->head;
+       skb_reset_tail_pointer(elem->skb);
 
        /* Zero out the rxp hdr in the sk_buff */
        memset(elem->skb->data, 0, sizeof(*rxp_hdr));
@@ -521,9 +522,8 @@ static void c2_rx_interrupt(struct net_device *netdev)
                 * "sizeof(struct c2_rxp_hdr)".
                 */
                skb->data += sizeof(*rxp_hdr);
-               skb->tail = skb->data + buflen;
+               skb_set_tail_pointer(skb, buflen);
                skb->len = buflen;
-               skb->dev = netdev;
                skb->protocol = eth_type_trans(skb, netdev);
 
                netif_rx(skb);
index 2d2de9b..3b4b0ac 100644 (file)
@@ -477,7 +477,7 @@ static void send_mpa_req(struct iwch_ep *ep, struct sk_buff *skb)
        BUG_ON(skb_cloned(skb));
 
        mpalen = sizeof(*mpa) + ep->plen;
-       if (skb->data + mpalen + sizeof(*req) > skb->end) {
+       if (skb->data + mpalen + sizeof(*req) > skb_end_pointer(skb)) {
                kfree_skb(skb);
                skb=alloc_skb(mpalen + sizeof(*req), GFP_KERNEL);
                if (!skb) {
@@ -507,7 +507,7 @@ static void send_mpa_req(struct iwch_ep *ep, struct sk_buff *skb)
         */
        skb_get(skb);
        set_arp_failure_handler(skb, arp_failure_discard);
-       skb->h.raw = skb->data;
+       skb_reset_transport_header(skb);
        len = skb->len;
        req = (struct tx_data_wr *) skb_push(skb, sizeof(*req));
        req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA));
@@ -559,7 +559,7 @@ static int send_mpa_reject(struct iwch_ep *ep, const void *pdata, u8 plen)
        skb_get(skb);
        skb->priority = CPL_PRIORITY_DATA;
        set_arp_failure_handler(skb, arp_failure_discard);
-       skb->h.raw = skb->data;
+       skb_reset_transport_header(skb);
        req = (struct tx_data_wr *) skb_push(skb, sizeof(*req));
        req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA));
        req->wr_lo = htonl(V_WR_TID(ep->hwtid));
@@ -610,7 +610,7 @@ static int send_mpa_reply(struct iwch_ep *ep, const void *pdata, u8 plen)
         */
        skb_get(skb);
        set_arp_failure_handler(skb, arp_failure_discard);
-       skb->h.raw = skb->data;
+       skb_reset_transport_header(skb);
        len = skb->len;
        req = (struct tx_data_wr *) skb_push(skb, sizeof(*req));
        req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA));
@@ -821,7 +821,8 @@ static void process_mpa_reply(struct iwch_ep *ep, struct sk_buff *skb)
        /*
         * copy the new data into our accumulation buffer.
         */
-       memcpy(&(ep->mpa_pkt[ep->mpa_pkt_len]), skb->data, skb->len);
+       skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
+                                 skb->len);
        ep->mpa_pkt_len += skb->len;
 
        /*
@@ -940,7 +941,8 @@ static void process_mpa_request(struct iwch_ep *ep, struct sk_buff *skb)
        /*
         * Copy the new data into our accumulation buffer.
         */
-       memcpy(&(ep->mpa_pkt[ep->mpa_pkt_len]), skb->data, skb->len);
+       skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
+                                 skb->len);
        ep->mpa_pkt_len += skb->len;
 
        /*
@@ -1619,7 +1621,8 @@ static int terminate(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
        PDBG("%s ep %p\n", __FUNCTION__, ep);
        skb_pull(skb, sizeof(struct cpl_rdma_terminate));
        PDBG("%s saving %d bytes of term msg\n", __FUNCTION__, skb->len);
-       memcpy(ep->com.qp->attr.terminate_buffer, skb->data, skb->len);
+       skb_copy_from_linear_data(skb, ep->com.qp->attr.terminate_buffer,
+                                 skb->len);
        ep->com.qp->attr.terminate_msg_len = skb->len;
        ep->com.qp->attr.is_terminate_local = 0;
        return CPL_RET_BUF_DONE;
index da7e102..0c4e59b 100644 (file)
@@ -406,7 +406,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
        skb_put_frags(skb, IPOIB_CM_HEAD_SIZE, wc->byte_len, newskb);
 
        skb->protocol = ((struct ipoib_header *) skb->data)->proto;
-       skb->mac.raw = skb->data;
+       skb_reset_mac_header(skb);
        skb_pull(skb, IPOIB_ENCAP_LEN);
 
        dev->last_rx = jiffies;
index c17e777..1bdb910 100644 (file)
@@ -216,7 +216,7 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
        if (wc->slid != priv->local_lid ||
            wc->src_qp != priv->qp->qp_num) {
                skb->protocol = ((struct ipoib_header *) skb->data)->proto;
-               skb->mac.raw = skb->data;
+               skb_reset_mac_header(skb);
                skb_pull(skb, IPOIB_ENCAP_LEN);
 
                dev->last_rx = jiffies;
index e3e5c13..ee2b0b9 100644 (file)
@@ -442,7 +442,7 @@ act2000_sendbuf(act2000_card *card, int channel, int ack, struct sk_buff *skb)
                        return 0;
                }
                skb_reserve(xmit_skb, 19);
-               memcpy(skb_put(xmit_skb, len), skb->data, len);
+               skb_copy_from_linear_data(skb, skb_put(xmit_skb, len), len);
        } else {
                xmit_skb = skb_clone(skb, GFP_ATOMIC);
                if (!xmit_skb) {
index 2baef34..c8e1c35 100644 (file)
@@ -652,7 +652,7 @@ static int write_modem(struct cardstate *cs)
         * transmit data
         */
        count = min(bcs->tx_skb->len, (unsigned) ucs->bulk_out_size);
-       memcpy(ucs->bulk_out_buffer, bcs->tx_skb->data, count);
+       skb_copy_from_linear_data(bcs->tx_skb, ucs->bulk_out_buffer, count);
        skb_pull(bcs->tx_skb, count);
        atomic_set(&ucs->busy, 1);
        gig_dbg(DEBUG_OUTPUT, "write_modem: send %d bytes", count);
index 1e2d38e..428872b 100644 (file)
@@ -404,7 +404,8 @@ static void b1dma_dispatch_tx(avmcard *card)
                printk(KERN_DEBUG "tx: put 0x%x len=%d\n", 
                       skb->data[2], txlen);
 #endif
-               memcpy(dma->sendbuf.dmabuf, skb->data+2, skb->len-2);
+               skb_copy_from_linear_data_offset(skb, 2, dma->sendbuf.dmabuf,
+                                                skb->len - 2);
        }
        txlen = (txlen + 3) & ~3;
 
index 6f5efa8..d58f927 100644 (file)
@@ -457,7 +457,8 @@ static void c4_dispatch_tx(avmcard *card)
                printk(KERN_DEBUG "%s: tx put 0x%x len=%d\n",
                                card->name, skb->data[2], txlen);
 #endif
-               memcpy(dma->sendbuf.dmabuf, skb->data+2, skb->len-2);
+               skb_copy_from_linear_data_offset(skb, 2, dma->sendbuf.dmabuf,
+                                                skb->len - 2);
        }
        txlen = (txlen + 3) & ~3;
 
index ae377e8..1642dca 100644 (file)
@@ -254,14 +254,16 @@ write_modem(struct BCState *bcs) {
        count = len;
        if (count > MAX_MODEM_BUF - fp) {
                count = MAX_MODEM_BUF - fp;
-               memcpy(cs->hw.elsa.transbuf + fp, bcs->tx_skb->data, count);
+               skb_copy_from_linear_data(bcs->tx_skb,
+                                         cs->hw.elsa.transbuf + fp, count);
                skb_pull(bcs->tx_skb, count);
                cs->hw.elsa.transcnt += count;
                ret = count;
                count = len - count;
                fp = 0;
        }
-       memcpy((cs->hw.elsa.transbuf + fp), bcs->tx_skb->data, count);
+       skb_copy_from_linear_data(bcs->tx_skb,
+                                 cs->hw.elsa.transbuf + fp, count);
        skb_pull(bcs->tx_skb, count);
        cs->hw.elsa.transcnt += count;
        ret += count;
index cd3b5ad..3446f24 100644 (file)
@@ -1293,7 +1293,8 @@ l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
                oskb = skb;
                skb = alloc_skb(oskb->len + i, GFP_ATOMIC);
                memcpy(skb_put(skb, i), header, i);
-               memcpy(skb_put(skb, oskb->len), oskb->data, oskb->len);
+               skb_copy_from_linear_data(oskb,
+                                         skb_put(skb, oskb->len), oskb->len);
                dev_kfree_skb(oskb);
        }
        st->l2.l2l1(st, PH_PULL | INDICATION, skb);
index b2ae4ec..f854501 100644 (file)
@@ -398,8 +398,9 @@ static u16 hycapi_send_message(struct capi_ctr *ctrl, struct sk_buff *skb)
                        _len = CAPIMSG_LEN(skb->data);
                        if (_len > 22) {
                                _len2 = _len - 22;
-                               memcpy(msghead, skb->data, 22);
-                               memcpy(skb->data + _len2, msghead, 22);
+                               skb_copy_from_linear_data(skb, msghead, 22);
+                               skb_copy_to_linear_data_offset(skb, _len2,
+                                                              msghead, 22);
                                skb_pull(skb, _len2);
                                CAPIMSG_SETLEN(skb->data, 22);
                                retval = capilib_data_b3_req(&cinfo->ncci_head,
index 557d96c..cfa8fa5 100644 (file)
@@ -214,8 +214,6 @@ hysdn_rx_netpkt(hysdn_card * card, unsigned char *buf, unsigned short len)
                lp->stats.rx_dropped++;
                return;
        }
-       skb->dev = &lp->netdev;
-
        /* copy the data */
        memcpy(skb_put(skb, len), buf, len);
 
index b7b5aa4..81db4a1 100644 (file)
@@ -113,7 +113,8 @@ hysdn_sched_tx(hysdn_card *card, unsigned char *buf,
            (skb = hysdn_tx_netget(card)) != NULL) 
        {
                if (skb->len <= maxlen) {
-                       memcpy(buf, skb->data, skb->len);       /* copy the packet to the buffer */
+                       /* copy the packet to the buffer */
+                       skb_copy_from_linear_data(skb, buf, skb->len);
                        *len = skb->len;
                        *chan = CHAN_NDIS_DATA;
                        card->net_tx_busy = 1;  /* we are busy sending network data */
@@ -126,7 +127,7 @@ hysdn_sched_tx(hysdn_card *card, unsigned char *buf,
            ((skb = hycapi_tx_capiget(card)) != NULL) )
        {
                if (skb->len <= maxlen) {
-                       memcpy(buf, skb->data, skb->len);
+                       skb_copy_from_linear_data(skb, buf, skb->len);
                        *len = skb->len;
                        *chan = CHAN_CAPI;
                        hycapi_tx_capiack(card);
index 9c926e4..c97330b 100644 (file)
@@ -829,7 +829,7 @@ isdn_readbchan(int di, int channel, u_char * buf, u_char * fp, int len, wait_que
                                dflag = 0;
                        }
                        count_put = count_pull;
-                       memcpy(cp, skb->data, count_put);
+                       skb_copy_from_linear_data(skb, cp, count_put);
                        cp += count_put;
                        len -= count_put;
 #ifdef CONFIG_ISDN_AUDIO
index 838b373..aa83277 100644 (file)
@@ -872,7 +872,8 @@ typedef struct {
 static void
 isdn_net_log_skb(struct sk_buff * skb, isdn_net_local * lp)
 {
-       u_char *p = skb->nh.raw; /* hopefully, this was set correctly */
+       /* hopefully, this was set correctly */
+       const u_char *p = skb_network_header(skb);
        unsigned short proto = ntohs(skb->protocol);
        int data_ofs;
        ip_ports *ipp;
@@ -880,7 +881,7 @@ isdn_net_log_skb(struct sk_buff * skb, isdn_net_local * lp)
 
        addinfo[0] = '\0';
        /* This check stolen from 2.1.72 dev_queue_xmit_nit() */
-       if (skb->nh.raw < skb->data || skb->nh.raw >= skb->tail) {
+       if (p < skb->data || skb->network_header >= skb->tail) {
                /* fall back to old isdn_net_log_packet method() */
                char * buf = skb->data;
 
@@ -1121,7 +1122,7 @@ isdn_net_adjust_hdr(struct sk_buff *skb, struct net_device *dev)
        if (!skb)
                return;
        if (lp->p_encap == ISDN_NET_ENCAP_ETHER) {
-               int pullsize = (ulong)skb->nh.raw - (ulong)skb->data - ETH_HLEN;
+               const int pullsize = skb_network_offset(skb) - ETH_HLEN;
                if (pullsize > 0) {
                        printk(KERN_DEBUG "isdn_net: Pull junk %d\n", pullsize);
                        skb_pull(skb, pullsize);
@@ -1366,7 +1367,7 @@ isdn_net_type_trans(struct sk_buff *skb, struct net_device *dev)
        struct ethhdr *eth;
        unsigned char *rawp;
 
-       skb->mac.raw = skb->data;
+       skb_reset_mac_header(skb);
        skb_pull(skb, ETH_HLEN);
        eth = eth_hdr(skb);
 
@@ -1786,7 +1787,7 @@ isdn_net_receive(struct net_device *ndev, struct sk_buff *skb)
        }
        skb->dev = ndev;
        skb->pkt_type = PACKET_HOST;
-       skb->mac.raw = skb->data;
+       skb_reset_mac_header(skb);
 #ifdef ISDN_DEBUG_NET_DUMP
        isdn_dumppkt("R:", skb->data, skb->len, 40);
 #endif
index 1b2df80..387392c 100644 (file)
@@ -1100,7 +1100,8 @@ isdn_ppp_push_higher(isdn_net_dev * net_dev, isdn_net_local * lp, struct sk_buff
                                        goto drop_packet;
                                }
                                skb_put(skb, skb_old->len + 128);
-                               memcpy(skb->data, skb_old->data, skb_old->len);
+                               skb_copy_from_linear_data(skb_old, skb->data,
+                                                         skb_old->len);
                                if (net_dev->local->ppp_slot < 0) {
                                        printk(KERN_ERR "%s: net_dev->local->ppp_slot(%d) out of range\n",
                                                __FUNCTION__, net_dev->local->ppp_slot);
@@ -1167,7 +1168,7 @@ isdn_ppp_push_higher(isdn_net_dev * net_dev, isdn_net_local * lp, struct sk_buff
                mlp->huptimer = 0;
 #endif /* CONFIG_IPPP_FILTER */
        skb->dev = dev;
-       skb->mac.raw = skb->data;
+       skb_reset_mac_header(skb);
        netif_rx(skb);
        /* net_dev->local->stats.rx_packets++; done in isdn_net.c */
        return;
@@ -1902,7 +1903,9 @@ void isdn_ppp_mp_reassembly( isdn_net_dev * net_dev, isdn_net_local * lp,
                while( from != to ) {
                        unsigned int len = from->len - MP_HEADER_LEN;
 
-                       memcpy(skb_put(skb,len), from->data+MP_HEADER_LEN, len);
+                       skb_copy_from_linear_data_offset(from, MP_HEADER_LEN,
+                                                        skb_put(skb,len),
+                                                        len);
                        frag = from->next;
                        isdn_ppp_mp_free_skb(mp, from);
                        from = frag; 
index e3add27..e93ad59 100644 (file)
@@ -415,7 +415,8 @@ isdnloop_sendbuf(int channel, struct sk_buff *skb, isdnloop_card * card)
                spin_lock_irqsave(&card->isdnloop_lock, flags);
                nskb = dev_alloc_skb(skb->len);
                if (nskb) {
-                       memcpy(skb_put(nskb, len), skb->data, len);
+                       skb_copy_from_linear_data(skb,
+                                                 skb_put(nskb, len), len);
                        skb_queue_tail(&card->bqueue[channel], nskb);
                        dev_kfree_skb(skb);
                } else
index 47c59e9..7b55e15 100644 (file)
@@ -429,8 +429,9 @@ int capi_decode_conn_ind(struct pcbit_chan * chan,
                if (!(info->data.setup.CallingPN = kmalloc(len - count + 1, GFP_ATOMIC)))
                        return -1;
        
-               memcpy(info->data.setup.CallingPN, skb->data + count + 1, 
-                      len - count);
+               skb_copy_from_linear_data_offset(skb, count + 1,
+                                                info->data.setup.CallingPN,
+                                                len - count);
                info->data.setup.CallingPN[len - count] = 0;
 
        }
@@ -457,8 +458,9 @@ int capi_decode_conn_ind(struct pcbit_chan * chan,
                if (!(info->data.setup.CalledPN = kmalloc(len - count + 1, GFP_ATOMIC)))
                        return -1;
         
-               memcpy(info->data.setup.CalledPN, skb->data + count + 1, 
-                      len - count); 
+               skb_copy_from_linear_data_offset(skb, count + 1,
+                                                info->data.setup.CalledPN,
+                                                len - count);
                info->data.setup.CalledPN[len - count] = 0;
 
        }
@@ -539,7 +541,7 @@ int capi_decode_conn_actv_ind(struct pcbit_chan * chan, struct sk_buff *skb)
 
 #ifdef DEBUG
        if (len > 1 && len < 31) {
-               memcpy(str, skb->data + 2, len - 1);
+               skb_copy_from_linear_data_offset(skb, 2, str, len - 1);
                str[len] = 0;
                printk(KERN_DEBUG "Connected Party Number: %s\n", str);
        }
index e85b4c7..cab26f3 100644 (file)
@@ -1171,6 +1171,7 @@ void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes)
                         * and zap two pdes instead of one.
                         */
                        if (level == PT32_ROOT_LEVEL) {
+                               page_offset &= ~7; /* kill rounding error */
                                page_offset <<= 1;
                                npte = 2;
                        }
index 76e9c36..6a5ab40 100644 (file)
@@ -174,7 +174,7 @@ static unsigned short dvb_net_eth_type_trans(struct sk_buff *skb,
        struct ethhdr *eth;
        unsigned char *rawp;
 
-       skb->mac.raw=skb->data;
+       skb_reset_mac_header(skb);
        skb_pull(skb,dev->hard_header_len);
        eth = eth_hdr(skb);
 
@@ -600,6 +600,7 @@ static void dvb_net_ule( struct net_device *dev, const u8 *buf, size_t buf_len )
                        /* Check CRC32, we've got it in our skb already. */
                        unsigned short ulen = htons(priv->ule_sndu_len);
                        unsigned short utype = htons(priv->ule_sndu_type);
+                       const u8 *tail;
                        struct kvec iov[3] = {
                                { &ulen, sizeof ulen },
                                { &utype, sizeof utype },
@@ -613,10 +614,11 @@ static void dvb_net_ule( struct net_device *dev, const u8 *buf, size_t buf_len )
                        }
 
                        ule_crc = iov_crc32(ule_crc, iov, 3);
-                       expected_crc = *((u8 *)priv->ule_skb->tail - 4) << 24 |
-                                      *((u8 *)priv->ule_skb->tail - 3) << 16 |
-                                      *((u8 *)priv->ule_skb->tail - 2) << 8 |
-                                      *((u8 *)priv->ule_skb->tail - 1);
+                       tail = skb_tail_pointer(priv->ule_skb);
+                       expected_crc = *(tail - 4) << 24 |
+                                      *(tail - 3) << 16 |
+                                      *(tail - 2) << 8 |
+                                      *(tail - 1);
                        if (ule_crc != expected_crc) {
                                printk(KERN_WARNING "%lu: CRC32 check FAILED: %08x / %08x, SNDU len %d type %#x, ts_remain %d, next 2: %x.\n",
                                       priv->ts_count, ule_crc, expected_crc, priv->ule_sndu_len, priv->ule_sndu_type, ts_remain, ts_remain > 2 ? *(unsigned short *)from_where : 0);
@@ -695,7 +697,9 @@ static void dvb_net_ule( struct net_device *dev, const u8 *buf, size_t buf_len )
                                        }
                                        else
                                        {
-                                               memcpy(dest_addr,  priv->ule_skb->data, ETH_ALEN);
+                                               skb_copy_from_linear_data(priv->ule_skb,
+                                                             dest_addr,
+                                                             ETH_ALEN);
                                                skb_pull(priv->ule_skb, ETH_ALEN);
                                        }
                                }
index b691292..7dd34bd 100644 (file)
@@ -714,6 +714,7 @@ mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
        LANSendRequest_t *pSendReq;
        SGETransaction32_t *pTrans;
        SGESimple64_t *pSimple;
+       const unsigned char *mac;
        dma_addr_t dma;
        unsigned long flags;
        int ctx;
@@ -753,7 +754,7 @@ mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
        /* Set the mac.raw pointer, since this apparently isn't getting
         * done before we get the skb. Pull the data pointer past the mac data.
         */
-       skb->mac.raw = skb->data;
+       skb_reset_mac_header(skb);
        skb_pull(skb, 12);
 
         dma = pci_map_single(mpt_dev->pcidev, skb->data, skb->len,
@@ -784,6 +785,7 @@ mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
 //                     IOC_AND_NETDEV_NAMES_s_s(dev),
 //                     ctx, skb, skb->data));
 
+       mac = skb_mac_header(skb);
 #ifdef QLOGIC_NAA_WORKAROUND
 {
        struct NAA_Hosed *nh;
@@ -793,12 +795,12 @@ mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
           drops. */
        read_lock_irq(&bad_naa_lock);
        for (nh = mpt_bad_naa; nh != NULL; nh=nh->next) {
-               if ((nh->ieee[0] == skb->mac.raw[0]) &&
-                   (nh->ieee[1] == skb->mac.raw[1]) &&
-                   (nh->ieee[2] == skb->mac.raw[2]) &&
-                   (nh->ieee[3] == skb->mac.raw[3]) &&
-                   (nh->ieee[4] == skb->mac.raw[4]) &&
-                   (nh->ieee[5] == skb->mac.raw[5])) {
+               if ((nh->ieee[0] == mac[0]) &&
+                   (nh->ieee[1] == mac[1]) &&
+                   (nh->ieee[2] == mac[2]) &&
+                   (nh->ieee[3] == mac[3]) &&
+                   (nh->ieee[4] == mac[4]) &&
+                   (nh->ieee[5] == mac[5])) {
                        cur_naa = nh->NAA;
                        dlprintk ((KERN_INFO "mptlan/sdu_send: using NAA value "
                                  "= %04x.\n", cur_naa));
@@ -810,12 +812,12 @@ mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
 #endif
 
        pTrans->TransactionDetails[0] = cpu_to_le32((cur_naa         << 16) |
-                                                   (skb->mac.raw[0] <<  8) |
-                                                   (skb->mac.raw[1] <<  0));
-       pTrans->TransactionDetails[1] = cpu_to_le32((skb->mac.raw[2] << 24) |
-                                                   (skb->mac.raw[3] << 16) |
-                                                   (skb->mac.raw[4] <<  8) |
-                                                   (skb->mac.raw[5] <<  0));
+                                                   (mac[0] <<  8) |
+                                                   (mac[1] <<  0));
+       pTrans->TransactionDetails[1] = cpu_to_le32((mac[2] << 24) |
+                                                   (mac[3] << 16) |
+                                                   (mac[4] <<  8) |
+                                                   (mac[5] <<  0));
 
        pSimple = (SGESimple64_t *) &pTrans->TransactionDetails[2];
 
@@ -930,7 +932,7 @@ mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg)
                pci_dma_sync_single_for_cpu(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
                                            priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
 
-               memcpy(skb_put(skb, len), old_skb->data, len);
+               skb_copy_from_linear_data(old_skb, skb_put(skb, len), len);
 
                pci_dma_sync_single_for_device(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
                                               priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
@@ -1091,7 +1093,7 @@ mpt_lan_receive_post_reply(struct net_device *dev,
                                                    priv->RcvCtl[ctx].dma,
                                                    priv->RcvCtl[ctx].len,
                                                    PCI_DMA_FROMDEVICE);
-                       memcpy(skb_put(skb, l), old_skb->data, l);
+                       skb_copy_from_linear_data(old_skb, skb_put(skb, l), l);
 
                        pci_dma_sync_single_for_device(mpt_dev->pcidev,
                                                       priv->RcvCtl[ctx].dma,
@@ -1120,7 +1122,7 @@ mpt_lan_receive_post_reply(struct net_device *dev,
                                            priv->RcvCtl[ctx].len,
                                            PCI_DMA_FROMDEVICE);
 
-               memcpy(skb_put(skb, len), old_skb->data, len);
+               skb_copy_from_linear_data(old_skb, skb_put(skb, len), len);
 
                pci_dma_sync_single_for_device(mpt_dev->pcidev,
                                               priv->RcvCtl[ctx].dma,
@@ -1549,7 +1551,7 @@ mpt_lan_type_trans(struct sk_buff *skb, struct net_device *dev)
        struct mpt_lan_ohdr *fch = (struct mpt_lan_ohdr *)skb->data;
        struct fcllc *fcllc;
 
-       skb->mac.raw = skb->data;
+       skb_reset_mac_header(skb);
        skb_pull(skb, sizeof(struct mpt_lan_ohdr));
 
        if (fch->dtype == htons(0xffff)) {
index 4db2055..001af7f 100644 (file)
@@ -39,7 +39,7 @@ MODULE_VERSION("2.0");
 
 static LIST_HEAD(device_list);
 struct uflash_dev {
-       char                    *name;  /* device name */
+       const char              *name;  /* device name */
        struct map_info         map;    /* mtd map info */
        struct mtd_info         *mtd;   /* mtd info */
 };
@@ -80,7 +80,7 @@ int uflash_devinit(struct linux_ebus_device *edev, struct device_node *dp)
 
        up->name = of_get_property(dp, "model", NULL);
        if (up->name && 0 < strlen(up->name))
-               up->map.name = up->name;
+               up->map.name = (char *)up->name;
 
        up->map.phys = res->start;
 
index 06e3378..4bee99b 100644 (file)
@@ -735,7 +735,6 @@ static void el_receive(struct net_device *dev)
        else
        {
                skb_reserve(skb,2);     /* Force 16 byte alignment */
-               skb->dev = dev;
                /*
                 *      The read increments through the bytes. The interrupt
                 *      handler will fix the pointer when it returns to
index 702bfb2..e985a85 100644 (file)
@@ -615,7 +615,6 @@ static void receive_packet(struct net_device *dev, int len)
        if (test_and_set_bit(0, (void *) &adapter->dmaing))
                printk(KERN_ERR "%s: rx blocked, DMA in progress, dir %d\n", dev->name, adapter->current_dma.direction);
 
-       skb->dev = dev;
        adapter->current_dma.direction = 0;
        adapter->current_dma.length = rlen;
        adapter->current_dma.skb = skb;
@@ -1026,7 +1025,7 @@ static int send_packet(struct net_device *dev, struct sk_buff *skb)
        adapter->current_dma.start_time = jiffies;
 
        if ((unsigned long)(skb->data + nlen) >= MAX_DMA_ADDRESS || nlen != skb->len) {
-               memcpy(adapter->dma_buffer, skb->data, nlen);
+               skb_copy_from_linear_data(skb, adapter->dma_buffer, nlen);
                memset(adapter->dma_buffer+skb->len, 0, nlen-skb->len);
                target = isa_virt_to_bus(adapter->dma_buffer);
        }
index 54e1d5a..eed4299 100644 (file)
@@ -873,7 +873,6 @@ static void el16_rx(struct net_device *dev)
                        }
 
                        skb_reserve(skb,2);
-                       skb->dev = dev;
 
                        /* 'skb->data' points to the start of sk_buff data area. */
                        memcpy_fromio(skb_put(skb,pkt_len), data_frame + 10, pkt_len);
index f791bf0..c7511c4 100644 (file)
@@ -1091,7 +1091,6 @@ el3_rx(struct net_device *dev)
                                printk("Receiving packet size %d status %4.4x.\n",
                                           pkt_len, rx_status);
                        if (skb != NULL) {
-                               skb->dev = dev;
                                skb_reserve(skb, 2);     /* Align IP on 16 byte */
 
                                /* 'skb->data' points to the start of sk_buff data area. */
index c307ce6..290166d 100644 (file)
@@ -1292,7 +1292,6 @@ static int corkscrew_rx(struct net_device *dev)
                                printk("Receiving packet size %d status %4.4x.\n",
                                     pkt_len, rx_status);
                        if (skb != NULL) {
-                               skb->dev = dev;
                                skb_reserve(skb, 2);    /* Align IP on 16 byte boundaries */
                                /* 'skb_put()' points to the start of sk_buff data area. */
                                insl(ioaddr + RX_FIFO,
@@ -1363,7 +1362,6 @@ static int boomerang_rx(struct net_device *dev)
                           copying to a properly sized skbuff. */
                        if (pkt_len < rx_copybreak
                            && (skb = dev_alloc_skb(pkt_len + 4)) != 0) {
-                               skb->dev = dev;
                                skb_reserve(skb, 2);    /* Align IP on 16 byte boundaries */
                                /* 'skb_put()' points to the start of sk_buff data area. */
                                memcpy(skb_put(skb, pkt_len),
index 17d61eb..da1a22c 100644 (file)
@@ -988,7 +988,6 @@ static void elmc_rcv_int(struct net_device *dev)
                                rbd->status = 0;
                                skb = (struct sk_buff *) dev_alloc_skb(totlen + 2);
                                if (skb != NULL) {
-                                       skb->dev = dev;
                                        skb_reserve(skb, 2);    /* 16 byte alignment */
                                        skb_put(skb,totlen);
                                        eth_copy_and_sum(skb, (char *) p->base+(unsigned long) rbd->buffer,totlen,0);
@@ -1146,7 +1145,7 @@ static int elmc_send_packet(struct sk_buff *skb, struct net_device *dev)
 
        if (len != skb->len)
                memset((char *) p->xmit_cbuffs[p->xmit_count], 0, ETH_ZLEN);
-       memcpy((char *) p->xmit_cbuffs[p->xmit_count], (char *) (skb->data), skb->len);
+       skb_copy_from_linear_data(skb, (char *) p->xmit_cbuffs[p->xmit_count], skb->len);
 
 #if (NUM_XMIT_BUFFS == 1)
 #ifdef NO_NOPCOMMANDS
index 6c7437e..c7b571b 100644 (file)
@@ -1189,7 +1189,6 @@ static void mc32_rx_ring(struct net_device *dev)
                        }
 
                        skb->protocol=eth_type_trans(skb,dev);
-                       skb->dev=dev;
                        dev->last_rx = jiffies;
                        lp->net_stats.rx_packets++;
                        lp->net_stats.rx_bytes += length;
index b406ecf..80924f7 100644 (file)
@@ -2414,7 +2414,6 @@ static int vortex_rx(struct net_device *dev)
                                printk(KERN_DEBUG "Receiving packet size %d status %4.4x.\n",
                                           pkt_len, rx_status);
                        if (skb != NULL) {
-                               skb->dev = dev;
                                skb_reserve(skb, 2);    /* Align IP on 16 byte boundaries */
                                /* 'skb_put()' points to the start of sk_buff data area. */
                                if (vp->bus_master &&
@@ -2491,7 +2490,6 @@ boomerang_rx(struct net_device *dev)
                        /* Check if the packet is long enough to just accept without
                           copying to a properly sized skbuff. */
                        if (pkt_len < rx_copybreak && (skb = dev_alloc_skb(pkt_len + 2)) != 0) {
-                               skb->dev = dev;
                                skb_reserve(skb, 2);    /* Align IP on 16 byte boundaries */
                                pci_dma_sync_single_for_cpu(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
                                /* 'skb_put()' points to the start of sk_buff data area. */
index 1b3d11e..d396f99 100644 (file)
@@ -331,7 +331,6 @@ static int lance_rx (struct net_device *dev)
                                 return 0;
                         }
 
-                        skb->dev = dev;
                         skb_reserve (skb, 2);           /* 16 byte align */
                         skb_put (skb, len);             /* make room */
                         eth_copy_and_sum(skb,
@@ -568,7 +567,7 @@ int lance_start_xmit (struct sk_buff *skb, struct net_device *dev)
 
        if (skb->len < ETH_ZLEN)
                memset((char *)&ib->tx_buf[entry][0], 0, ETH_ZLEN);
-        memcpy ((char *)&ib->tx_buf [entry][0], skb->data, skblen);
+        skb_copy_from_linear_data(skb, &ib->tx_buf[entry][0], skblen);
 
         /* Now, give the packet to the lance */
         ib->btx_ring [entry].tmd1_bits = (LE_T1_POK|LE_T1_OWN);
index 12c8453..e8c9f27 100644 (file)
@@ -573,7 +573,6 @@ rx_status_loop:
                }
 
                skb_reserve(new_skb, RX_OFFSET);
-               new_skb->dev = dev;
 
                pci_unmap_single(cp->pdev, mapping,
                                 buflen, PCI_DMA_FROMDEVICE);
@@ -807,7 +806,7 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev)
                if (mss)
                        flags |= LargeSend | ((mss & MSSMask) << MSSShift);
                else if (skb->ip_summed == CHECKSUM_PARTIAL) {
-                       const struct iphdr *ip = skb->nh.iph;
+                       const struct iphdr *ip = ip_hdr(skb);
                        if (ip->protocol == IPPROTO_TCP)
                                flags |= IPCS | TCPCS;
                        else if (ip->protocol == IPPROTO_UDP)
@@ -826,7 +825,7 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev)
                u32 first_len, first_eor;
                dma_addr_t first_mapping;
                int frag, first_entry = entry;
-               const struct iphdr *ip = skb->nh.iph;
+               const struct iphdr *ip = ip_hdr(skb);
 
                /* We must give this initial chunk to the device last.
                 * Otherwise we could race with the device.
@@ -1082,7 +1081,6 @@ static int cp_refill_rx (struct cp_private *cp)
                if (!skb)
                        goto err_out;
 
-               skb->dev = cp->dev;
                skb_reserve(skb, RX_OFFSET);
 
                mapping = pci_map_single(cp->pdev, skb->data, cp->rx_buf_sz,
index 99304b2..a844b1f 100644 (file)
@@ -1904,10 +1904,10 @@ static __inline__ void wrap_copy(struct sk_buff *skb, const unsigned char *ring,
        u32 left = RX_BUF_LEN - offset;
 
        if (size > left) {
-               memcpy(skb->data, ring + offset, left);
-               memcpy(skb->data+left, ring, size - left);
+               skb_copy_to_linear_data(skb, ring + offset, left);
+               skb_copy_to_linear_data_offset(skb, left, ring, size - left);
        } else
-               memcpy(skb->data, ring + offset, size);
+               skb_copy_to_linear_data(skb, ring + offset, size);
 }
 #endif
 
@@ -2013,7 +2013,6 @@ no_early_rx:
 
                skb = dev_alloc_skb (pkt_size + 2);
                if (likely(skb)) {
-                       skb->dev = dev;
                        skb_reserve (skb, 2);   /* 16 byte align the IP fields. */
 #if RX_BUF_IDX == 3
                        wrap_copy(skb, rx_ring, ring_offset+4, pkt_size);
index 640d7ca..3ff1155 100644 (file)
@@ -830,7 +830,6 @@ memory_squeeze:
                                lp->stats.rx_dropped++;
                        }
                        else {
-                               skb->dev = dev;
                                if (!rx_in_place) {
                                        /* 16 byte align the data fields */
                                        skb_reserve(skb, 2);
index c3f9f59..a3d46ea 100644 (file)
@@ -2263,6 +2263,7 @@ config GIANFAR
        tristate "Gianfar Ethernet"
        depends on 85xx || 83xx || PPC_86xx
        select PHYLIB
+       select CRC32
        help
          This driver supports the Gigabit TSEC on the MPC83xx, MPC85xx,
          and MPC86xx family of chips, and the FEC on the 8540.
index 33af833..5852732 100644 (file)
@@ -206,7 +206,7 @@ obj-$(CONFIG_TR) += tokenring/
 obj-$(CONFIG_WAN) += wan/
 obj-$(CONFIG_ARCNET) += arcnet/
 obj-$(CONFIG_NET_PCMCIA) += pcmcia/
-obj-$(CONFIG_NET_RADIO) += wireless/
+obj-y += wireless/
 obj-$(CONFIG_NET_TULIP) += tulip/
 obj-$(CONFIG_HAMRADIO) += hamradio/
 obj-$(CONFIG_IRDA) += irda/
index d76548e..1226cbb 100644 (file)
@@ -320,7 +320,6 @@ static int lance_rx (struct net_device *dev)
                                return 0;
                        }
 
-                       skb->dev = dev;
                        skb_reserve (skb, 2);           /* 16 byte align */
                        skb_put (skb, len);             /* make room */
                        eth_copy_and_sum(skb,
@@ -599,7 +598,7 @@ static int lance_start_xmit (struct sk_buff *skb, struct net_device *dev)
        ib->btx_ring [entry].length = (-len) | 0xf000;
        ib->btx_ring [entry].misc = 0;
 
-       memcpy ((char *)&ib->tx_buf [entry][0], skb->data, skblen);
+       skb_copy_from_linear_data(skb, &ib->tx_buf [entry][0], skblen);
 
        /* Clear the slack of the packet, do I need this? */
        if (len != skblen)
index 7138e0e..7122b7b 100644 (file)
@@ -2027,7 +2027,6 @@ static void ace_rx_int(struct net_device *dev, u32 rxretprd, u32 rxretcsm)
                 */
                csum = retdesc->tcp_udp_csum;
 
-               skb->dev = dev;
                skb->protocol = eth_type_trans(skb, dev);
 
                /*
index 962c954..675fe91 100644 (file)
@@ -798,9 +798,7 @@ static int amd8111e_rx_poll(struct net_device *dev, int * budget)
                        pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[rx_index],
                                         lp->rx_buff_len-2, PCI_DMA_FROMDEVICE);
                        skb_put(skb, pkt_len);
-                       skb->dev = dev;
                        lp->rx_skbuff[rx_index] = new_skb;
-                       new_skb->dev = dev;
                        lp->rx_dma_addr[rx_index] = pci_map_single(lp->pci_dev,
                                                                   new_skb->data,
                                                                   lp->rx_buff_len-2,
@@ -926,9 +924,7 @@ static int amd8111e_rx(struct net_device *dev)
                pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[rx_index],
                        lp->rx_buff_len-2, PCI_DMA_FROMDEVICE);
                skb_put(skb, pkt_len);
-               skb->dev = dev;
                lp->rx_skbuff[rx_index] = new_skb;
-               new_skb->dev = dev;
                lp->rx_dma_addr[rx_index] = pci_map_single(lp->pci_dev,
                        new_skb->data, lp->rx_buff_len-2,PCI_DMA_FROMDEVICE);
 
index dba5e51..da6ffa8 100644 (file)
@@ -853,9 +853,9 @@ static void cops_rx(struct net_device *dev)
                 return;
         }
 
-        skb->mac.raw    = skb->data;    /* Point to entire packet. */
+        skb_reset_mac_header(skb);    /* Point to entire packet. */
         skb_pull(skb,3);
-        skb->h.raw      = skb->data;    /* Point to data (Skip header). */
+        skb_reset_transport_header(skb);    /* Point to data (Skip header). */
 
         /* Update the counters. */
         lp->stats.rx_packets++;
index 2ea44ce..6a6cbd3 100644 (file)
@@ -770,13 +770,13 @@ static int sendup_buffer (struct net_device *dev)
        skb->data[0] = dnode;
        skb->data[1] = snode;
        skb->data[2] = llaptype;
-       skb->mac.raw = skb->data;       /* save pointer to llap header */
+       skb_reset_mac_header(skb);      /* save pointer to llap header */
        skb_pull(skb,3);
 
        /* copy ddp(s,e)hdr + contents */
-       memcpy(skb->data,(void*)ltdmabuf,len);
+       skb_copy_to_linear_data(skb, ltdmabuf, len);
 
-       skb->h.raw = skb->data;
+       skb_reset_transport_header(skb);
 
        stats->rx_packets++;
        stats->rx_bytes+=skb->len;
@@ -917,13 +917,14 @@ static int ltpc_xmit(struct sk_buff *skb, struct net_device *dev)
 
        int i;
        struct lt_sendlap cbuf;
+       unsigned char *hdr;
 
        cbuf.command = LT_SENDLAP;
        cbuf.dnode = skb->data[0];
        cbuf.laptype = skb->data[2];
        skb_pull(skb,3);        /* skip past LLAP header */
        cbuf.length = skb->len; /* this is host order */
-       skb->h.raw=skb->data;
+       skb_reset_transport_header(skb);
 
        if(debug & DEBUG_UPPER) {
                printk("command ");
@@ -932,11 +933,13 @@ static int ltpc_xmit(struct sk_buff *skb, struct net_device *dev)
                printk("\n");
        }
 
-       do_write(dev,&cbuf,sizeof(cbuf),skb->h.raw,skb->len);
+       hdr = skb_transport_header(skb);
+       do_write(dev, &cbuf, sizeof(cbuf), hdr, skb->len);
 
        if(debug & DEBUG_UPPER) {
                printk("sent %d ddp bytes\n",skb->len);
-               for(i=0;i<skb->len;i++) printk("%02x ",skb->h.raw[i]);
+               for (i = 0; i < skb->len; i++)
+                       printk("%02x ", hdr[i]);
                printk("\n");
        }
 
index 6318814..e0a18e7 100644 (file)
@@ -110,7 +110,7 @@ static void rx(struct net_device *dev, int bufnum,
 
        pkt = (struct archdr *) skb->data;
 
-       skb->mac.raw = skb->data;
+       skb_reset_mac_header(skb);
        skb_pull(skb, ARC_HDR_SIZE);
 
        /* up to sizeof(pkt->soft) has already been copied from the card */
index 83004fd..681e20b 100644 (file)
@@ -519,9 +519,12 @@ static int arcnet_header(struct sk_buff *skb, struct net_device *dev,
                 * real header when we do rebuild_header.
                 */
                *(uint16_t *) skb_push(skb, 2) = type;
-               if (skb->nh.raw - skb->mac.raw != 2)
+               /*
+                * XXX: Why not use skb->mac_len?
+                */
+               if (skb->network_header - skb->mac_header != 2)
                        BUGMSG(D_NORMAL, "arcnet_header: Yikes!  diff (%d) is not 2!\n",
-                              (int)(skb->nh.raw - skb->mac.raw));
+                              (int)(skb->network_header - skb->mac_header));
                return -2;      /* return error -- can't transmit yet! */
        }
        else {
@@ -554,11 +557,13 @@ static int arcnet_rebuild_header(struct sk_buff *skb)
        unsigned short type;
        uint8_t daddr=0;
        struct ArcProto *proto;
-
-       if (skb->nh.raw - skb->mac.raw != 2) {
+       /*
+        * XXX: Why not use skb->mac_len?
+        */
+       if (skb->network_header - skb->mac_header != 2) {
                BUGMSG(D_NORMAL,
-                    "rebuild_header: shouldn't be here! (hdrsize=%d)\n",
-                    (int)(skb->nh.raw - skb->mac.raw));
+                      "rebuild_header: shouldn't be here! (hdrsize=%d)\n",
+                      (int)(skb->network_header - skb->mac_header));
                return 0;
        }
        type = *(uint16_t *) skb_pull(skb, 2);
index 6648558..cc4610d 100644 (file)
@@ -122,10 +122,8 @@ static void rx(struct net_device *dev, int bufnum,
        }
        skb_put(skb, length + ARC_HDR_SIZE + sizeof(int));
        skb->dev = dev;
-
-       pkt = (struct archdr *) skb->data;
-
-       skb->mac.raw = skb->data;
+       skb_reset_mac_header(skb);
+       pkt = (struct archdr *)skb_mac_header(skb);
        skb_pull(skb, ARC_HDR_SIZE);
 
        /* up to sizeof(pkt->soft) has already been copied from the card */
@@ -270,13 +268,13 @@ static int ack_tx(struct net_device *dev, int acked)
   skb_put(ackskb, length + ARC_HDR_SIZE );
   ackskb->dev = dev;
 
-  ackpkt = (struct archdr *) ackskb->data;
-
-  ackskb->mac.raw = ackskb->data;
+  skb_reset_mac_header(ackskb);
+  ackpkt = (struct archdr *)skb_mac_header(ackskb);
   /* skb_pull(ackskb, ARC_HDR_SIZE); */
 
 
-  memcpy(ackpkt, lp->outgoing.skb->data, ARC_HDR_SIZE+sizeof(struct arc_cap));
+  skb_copy_from_linear_data(lp->outgoing.skb, ackpkt,
+               ARC_HDR_SIZE + sizeof(struct arc_cap));
   ackpkt->soft.cap.proto=0; /* using protocol 0 for acknowledge */
   ackpkt->soft.cap.mes.ack=acked;
 
index 6d6c69f..2de8877 100644 (file)
@@ -94,7 +94,7 @@ static unsigned short type_trans(struct sk_buff *skb, struct net_device *dev)
        int hdr_size = ARC_HDR_SIZE + RFC1051_HDR_SIZE;
 
        /* Pull off the arcnet header. */
-       skb->mac.raw = skb->data;
+       skb_reset_mac_header(skb);
        skb_pull(skb, hdr_size);
 
        if (pkt->hard.dest == 0)
index bee3422..460a095 100644 (file)
@@ -96,7 +96,7 @@ static unsigned short type_trans(struct sk_buff *skb, struct net_device *dev)
        int hdr_size = ARC_HDR_SIZE + RFC1201_HDR_SIZE;
 
        /* Pull off the arcnet header. */
-       skb->mac.raw = skb->data;
+       skb_reset_mac_header(skb);
        skb_pull(skb, hdr_size);
 
        if (pkt->hard.dest == 0)
index 9dfc09b..a0e68e7 100644 (file)
@@ -743,7 +743,6 @@ static int ariadne_rx(struct net_device *dev)
            }
 
 
-           skb->dev = dev;
            skb_reserve(skb,2);         /* 16 byte align */
            skb_put(skb,pkt_len);       /* Make room */
            eth_copy_and_sum(skb, (char *)priv->rx_buff[entry], pkt_len,0);
index ddd12d4..8f0d7ce 100644 (file)
@@ -526,7 +526,6 @@ am79c961_rx(struct net_device *dev, struct dev_priv *priv)
                skb = dev_alloc_skb(len + 2);
 
                if (skb) {
-                       skb->dev = dev;
                        skb_reserve(skb, 2);
 
                        am_readbuffer(dev, pktaddr, skb_put(skb, len), len);
index 1621b8f..152fa7a 100644 (file)
@@ -858,7 +858,6 @@ static void at91ether_rx(struct net_device *dev)
                        skb_reserve(skb, 2);
                        memcpy(skb_put(skb, pktlen), p_recv, pktlen);
 
-                       skb->dev = dev;
                        skb->protocol = eth_type_trans(skb, dev);
                        dev->last_rx = jiffies;
                        lp->stats.rx_bytes += pktlen;
index dd698b0..2438c5b 100644 (file)
@@ -255,7 +255,6 @@ static int ep93xx_rx(struct net_device *dev, int *budget)
 
                skb = dev_alloc_skb(length + 2);
                if (likely(skb != NULL)) {
-                       skb->dev = dev;
                        skb_reserve(skb, 2);
                        dma_sync_single(NULL, ep->descs->rdesc[entry].buf_addr,
                                                length, DMA_FROM_DEVICE);
index a292188..f075ceb 100644 (file)
@@ -875,7 +875,6 @@ ether1_recv_done (struct net_device *dev)
                        skb = dev_alloc_skb (length + 2);
 
                        if (skb) {
-                               skb->dev = dev;
                                skb_reserve (skb, 2);
 
                                ether1_readbuffer (dev, skb_put (skb, length), rbd.rbd_bufl, length);
index 8411783..32da2eb 100644 (file)
@@ -661,7 +661,6 @@ if (next_ptr < RX_START || next_ptr >= RX_END) {
                        if (skb) {
                                unsigned char *buf;
 
-                               skb->dev = dev;
                                skb_reserve(skb, 2);
                                buf = skb_put(skb, length);
                                ether3_readbuffer(dev, buf + 12, length - 12);
index 56ae8ba..bed8e0e 100644 (file)
@@ -768,7 +768,6 @@ net_rx(struct net_device *dev)
                                lp->stats.rx_dropped++;
                                break;
                        }
-                       skb->dev = dev;
                        skb_reserve(skb,2);
 
                        insw(ioaddr + DATAPORT, skb_put(skb,pkt_len), (pkt_len + 1) >> 1);
index 4e3bf6a..3d87bd2 100644 (file)
@@ -453,7 +453,8 @@ bionet_send_packet(struct sk_buff *skb, struct net_device *dev) {
                stdma_lock(bionet_intr, NULL);
                local_irq_restore(flags);
                if( !STRAM_ADDR(buf+length-1) ) {
-                       memcpy(nic_packet->buffer, skb->data, length);
+                       skb_copy_from_linear_data(skb, nic_packet->buffer,
+                                                 length);
                        buf = (unsigned long)&((struct nic_pkt_s *)phys_nic_packet)->buffer;
                }
 
@@ -544,13 +545,13 @@ bionet_poll_rx(struct net_device *dev) {
                                break;
                        }
 
-                       skb->dev = dev;
                        skb_reserve( skb, 2 );          /* 16 Byte align  */
                        skb_put( skb, pkt_len );        /* make room */
 
                        /* 'skb->data' points to the start of sk_buff data area.
                         */
-                       memcpy(skb->data, nic_packet->buffer, pkt_len);
+                       skb_copy_to_linear_data(skb, nic_packet->buffer,
+                                               pkt_len);
                        skb->protocol = eth_type_trans( skb, dev );
                        netif_rx(skb);
                        dev->last_rx = jiffies;
index 3b54361..5471440 100644 (file)
@@ -717,7 +717,8 @@ pamsnet_send_packet(struct sk_buff *skb, struct net_device *dev) {
 
                local_irq_restore(flags);
                if( !STRAM_ADDR(buf+length-1) ) {
-                       memcpy(nic_packet->buffer, skb->data, length);
+                       skb_copy_from_linear_data(skb, nic_packet->buffer,
+                                                 length);
                        buf = (unsigned long)phys_nic_packet;
                }
 
@@ -792,7 +793,8 @@ pamsnet_poll_rx(struct net_device *dev) {
 
                        /* 'skb->data' points to the start of sk_buff data area.
                         */
-                       memcpy(skb->data, nic_packet->buffer, pkt_len);
+                       skb_copy_to_linear_data(skb, nic_packet->buffer,
+                                               pkt_len);
                        netif_rx(skb);
                        dev->last_rx = jiffies;
                        lp->stats.rx_packets++;
index 7e37ac8..dfa8b9b 100644 (file)
@@ -1047,7 +1047,6 @@ static int lance_rx( struct net_device *dev )
                                                   pkt_len );
                                }
 
-                               skb->dev = dev;
                                skb_reserve( skb, 2 );  /* 16 byte align */
                                skb_put( skb, pkt_len );        /* Make room */
                                lp->memcpy_f( skb->data, PKTBUF_ADDR(head), pkt_len );
index 8606eac..4b1d4d1 100644 (file)
@@ -408,7 +408,6 @@ static void atl1_rx_checksum(struct atl1_adapter *adapter,
 static u16 atl1_alloc_rx_buffers(struct atl1_adapter *adapter)
 {
        struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
-       struct net_device *netdev = adapter->netdev;
        struct pci_dev *pdev = adapter->pdev;
        struct page *page;
        unsigned long offset;
@@ -444,7 +443,6 @@ static u16 atl1_alloc_rx_buffers(struct atl1_adapter *adapter)
                 * the 14 byte MAC header is removed
                 */
                skb_reserve(skb, NET_IP_ALIGN);
-               skb->dev = netdev;
 
                buffer_info->alloced = 1;
                buffer_info->skb = skb;
@@ -1296,19 +1294,21 @@ static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb,
                }
 
                if (skb->protocol == ntohs(ETH_P_IP)) {
-                       skb->nh.iph->tot_len = 0;
-                       skb->nh.iph->check = 0;
-                       skb->h.th->check =
-                           ~csum_tcpudp_magic(skb->nh.iph->saddr,
-                                              skb->nh.iph->daddr, 0,
-                                              IPPROTO_TCP, 0);
-                       ipofst = skb->nh.raw - skb->data;
+                       struct iphdr *iph = ip_hdr(skb);
+
+                       iph->tot_len = 0;
+                       iph->check = 0;
+                       tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
+                                                                iph->daddr, 0,
+                                                                IPPROTO_TCP,
+                                                                0);
+                       ipofst = skb_network_offset(skb);
                        if (ipofst != ENET_HEADER_SIZE) /* 802.3 frame */
                                tso->tsopl |= 1 << TSO_PARAM_ETHTYPE_SHIFT;
 
-                       tso->tsopl |= (skb->nh.iph->ihl &
+                       tso->tsopl |= (iph->ihl &
                                CSUM_PARAM_IPHL_MASK) << CSUM_PARAM_IPHL_SHIFT;
-                       tso->tsopl |= ((skb->h.th->doff << 2) &
+                       tso->tsopl |= (tcp_hdrlen(skb) &
                                TSO_PARAM_TCPHDRLEN_MASK) << TSO_PARAM_TCPHDRLEN_SHIFT;
                        tso->tsopl |= (skb_shinfo(skb)->gso_size &
                                TSO_PARAM_MSS_MASK) << TSO_PARAM_MSS_SHIFT;
@@ -1327,8 +1327,8 @@ static int atl1_tx_csum(struct atl1_adapter *adapter, struct sk_buff *skb,
        u8 css, cso;
 
        if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
-               cso = skb->h.raw - skb->data;
-               css = (skb->h.raw + skb->csum_offset) - skb->data;
+               cso = skb_transport_offset(skb);
+               css = cso + skb->csum_offset;
                if (unlikely(cso & 0x1)) {
                        printk(KERN_DEBUG "%s: payload offset != even number\n",
                                atl1_driver_name);
@@ -1370,8 +1370,7 @@ static void atl1_tx_map(struct atl1_adapter *adapter,
 
        if (tcp_seg) {
                /* TSO/GSO */
-               proto_hdr_len =
-                   ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
+               proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
                buffer_info->length = proto_hdr_len;
                page = virt_to_page(skb->data);
                offset = (unsigned long)skb->data & ~PAGE_MASK;
@@ -1563,8 +1562,8 @@ static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
        mss = skb_shinfo(skb)->gso_size;
        if (mss) {
                if (skb->protocol == htons(ETH_P_IP)) {
-                       proto_hdr_len = ((skb->h.raw - skb->data) +
-                                        (skb->h.th->doff << 2));
+                       proto_hdr_len = (skb_transport_offset(skb) +
+                                        tcp_hdrlen(skb));
                        if (unlikely(proto_hdr_len > len)) {
                                dev_kfree_skb_any(skb);
                                return NETDEV_TX_OK;
index 2d306fc..18aba83 100644 (file)
@@ -793,7 +793,6 @@ static void net_rx(struct net_device *dev)
                        lp->stats.rx_dropped++;
                        goto done;
                }
-               skb->dev = dev;
 
                skb_reserve(skb, 2);    /* Align IP on 16 byte boundaries */
                read_block(ioaddr, pkt_len, skb_put(skb,pkt_len), dev->if_port);
index 69ae229..d10fb80 100644 (file)
@@ -1125,7 +1125,7 @@ static int au1000_tx(struct sk_buff *skb, struct net_device *dev)
        }
 
        pDB = aup->tx_db_inuse[aup->tx_head];
-       memcpy((void *)pDB->vaddr, skb->data, skb->len);
+       skb_copy_from_linear_data(skb, pDB->vaddr, skb->len);
        if (skb->len < ETH_ZLEN) {
                for (i=skb->len; i<ETH_ZLEN; i++) {
                        ((char *)pDB->vaddr)[i] = 0;
@@ -1205,7 +1205,6 @@ static int au1000_rx(struct net_device *dev)
                                aup->stats.rx_dropped++;
                                continue;
                        }
-                       skb->dev = dev;
                        skb_reserve(skb, 2);    /* 16 byte IP header align */
                        eth_copy_and_sum(skb,
                                (unsigned char *)pDB->vaddr, frmlen, 0);
index d742bfe..879a2ff 100644 (file)
@@ -825,12 +825,11 @@ static int b44_rx(struct b44 *bp, int budget)
                        if (copy_skb == NULL)
                                goto drop_it_no_recycle;
 
-                       copy_skb->dev = bp->dev;
                        skb_reserve(copy_skb, 2);
                        skb_put(copy_skb, len);
                        /* DMA sync done above, copy just the actual packet */
-                       memcpy(copy_skb->data, skb->data+bp->rx_offset, len);
-
+                       skb_copy_from_linear_data_offset(skb, bp->rx_offset,
+                                                        copy_skb->data, len);
                        skb = copy_skb;
                }
                skb->ip_summed = CHECKSUM_NONE;
@@ -1007,7 +1006,8 @@ static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
                        goto err_out;
                }
 
-               memcpy(skb_put(bounce_skb, len), skb->data, skb->len);
+               skb_copy_from_linear_data(skb, skb_put(bounce_skb, len),
+                                         skb->len);
                dev_kfree_skb_any(skb);
                skb = bounce_skb;
        }
index c143304..4612725 100644 (file)
@@ -715,7 +715,6 @@ static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id)
                if (skb != NULL) {
                        nb -= ETHERCRC;
                        skb_put(skb, nb);
-                       skb->dev = dev;
                        skb->protocol = eth_type_trans(skb, dev);
                        netif_rx(skb);
                        dev->last_rx = jiffies;
index 0b7aded..f98a220 100644 (file)
@@ -54,8 +54,8 @@
 
 #define DRV_MODULE_NAME                "bnx2"
 #define PFX DRV_MODULE_NAME    ": "
-#define DRV_MODULE_VERSION     "1.5.7"
-#define DRV_MODULE_RELDATE     "March 29, 2007"
+#define DRV_MODULE_VERSION     "1.5.8"
+#define DRV_MODULE_RELDATE     "April 24, 2007"
 
 #define RUN_AT(x) (jiffies + (x))
 
@@ -1884,10 +1884,8 @@ bnx2_rx_int(struct bnx2 *bp, int budget)
                                goto reuse_rx;
 
                        /* aligned copy */
-                       memcpy(new_skb->data,
-                               skb->data + bp->rx_offset - 2,
-                               len + 2);
-
+                       skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
+                                     new_skb->data, len + 2);
                        skb_reserve(new_skb, 2);
                        skb_put(new_skb, len);
 
@@ -3421,6 +3419,9 @@ bnx2_init_chip(struct bnx2 *bp)
        val = REG_RD(bp, BNX2_MQ_CONFIG);
        val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
        val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
+       if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
+               val |= BNX2_MQ_CONFIG_HALT_DIS;
+
        REG_WR(bp, BNX2_MQ_CONFIG, val);
 
        val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
@@ -4510,6 +4511,7 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
        if ((mss = skb_shinfo(skb)->gso_size) &&
                (skb->len > (bp->dev->mtu + ETH_HLEN))) {
                u32 tcp_opt_len, ip_tcp_len;
+               struct iphdr *iph;
 
                if (skb_header_cloned(skb) &&
                    pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
@@ -4517,25 +4519,23 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
                        return NETDEV_TX_OK;
                }
 
-               tcp_opt_len = ((skb->h.th->doff - 5) * 4);
                vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
 
                tcp_opt_len = 0;
-               if (skb->h.th->doff > 5) {
-                       tcp_opt_len = (skb->h.th->doff - 5) << 2;
-               }
-               ip_tcp_len = (skb->nh.iph->ihl << 2) + sizeof(struct tcphdr);
-
-               skb->nh.iph->check = 0;
-               skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
-               skb->h.th->check =
-                       ~csum_tcpudp_magic(skb->nh.iph->saddr,
-                                           skb->nh.iph->daddr,
-                                           0, IPPROTO_TCP, 0);
-
-               if (tcp_opt_len || (skb->nh.iph->ihl > 5)) {
-                       vlan_tag_flags |= ((skb->nh.iph->ihl - 5) +
-                               (tcp_opt_len >> 2)) << 8;
+               if (tcp_hdr(skb)->doff > 5)
+                       tcp_opt_len = tcp_optlen(skb);
+
+               ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
+
+               iph = ip_hdr(skb);
+               iph->check = 0;
+               iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
+               tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
+                                                        iph->daddr, 0,
+                                                        IPPROTO_TCP, 0);
+               if (tcp_opt_len || (iph->ihl > 5)) {
+                       vlan_tag_flags |= ((iph->ihl - 5) +
+                                          (tcp_opt_len >> 2)) << 8;
                }
        }
        else
index ccbdf81..878eee5 100644 (file)
@@ -6518,6 +6518,7 @@ struct bnx2 {
 #define CHIP_ID_5708_B0                        0x57081000
 #define CHIP_ID_5708_B1                        0x57081010
 #define CHIP_ID_5709_A0                        0x57090000
+#define CHIP_ID_5709_A1                        0x57090010
 
 #define CHIP_BOND_ID(bp)               (((bp)->chip_id) & 0xf)
 
index 3fb354d..7e03f41 100644 (file)
@@ -884,8 +884,8 @@ static int ad_lacpdu_send(struct port *port)
        }
 
        skb->dev = slave->dev;
-       skb->mac.raw = skb->data;
-       skb->nh.raw = skb->data + ETH_HLEN;
+       skb_reset_mac_header(skb);
+       skb->network_header = skb->mac_header + ETH_HLEN;
        skb->protocol = PKT_TYPE_LACPDU;
        skb->priority = TC_PRIO_CONTROL;
 
@@ -928,8 +928,8 @@ static int ad_marker_send(struct port *port, struct marker *marker)
        skb_reserve(skb, 16);
 
        skb->dev = slave->dev;
-       skb->mac.raw = skb->data;
-       skb->nh.raw = skb->data + ETH_HLEN;
+       skb_reset_mac_header(skb);
+       skb->network_header = skb->mac_header + ETH_HLEN;
        skb->protocol = PKT_TYPE_LACPDU;
 
        marker_header = (struct marker_header *)skb_put(skb, length);
index 217a2ee..92c3b6f 100644 (file)
@@ -104,10 +104,15 @@ struct arp_pkt {
 };
 #pragma pack()
 
+static inline struct arp_pkt *arp_pkt(const struct sk_buff *skb)
+{
+       return (struct arp_pkt *)skb_network_header(skb);
+}
+
 /* Forward declaration */
 static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[]);
 
-static inline u8 _simple_hash(u8 *hash_start, int hash_size)
+static inline u8 _simple_hash(const u8 *hash_start, int hash_size)
 {
        int i;
        u8 hash = 0;
@@ -613,7 +618,7 @@ static void rlb_req_update_subnet_clients(struct bonding *bond, u32 src_ip)
 static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bond)
 {
        struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
-       struct arp_pkt *arp = (struct arp_pkt *)skb->nh.raw;
+       struct arp_pkt *arp = arp_pkt(skb);
        struct slave *assigned_slave;
        struct rlb_client_info *client_info;
        u32 hash_index = 0;
@@ -701,7 +706,7 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon
  */
 static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
 {
-       struct arp_pkt *arp = (struct arp_pkt *)skb->nh.raw;
+       struct arp_pkt *arp = arp_pkt(skb);
        struct slave *tx_slave = NULL;
 
        if (arp->op_code == __constant_htons(ARPOP_REPLY)) {
@@ -890,8 +895,8 @@ static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[])
                data = skb_put(skb, size);
                memcpy(data, &pkt, size);
 
-               skb->mac.raw = data;
-               skb->nh.raw = data + ETH_HLEN;
+               skb_reset_mac_header(skb);
+               skb->network_header = skb->mac_header + ETH_HLEN;
                skb->protocol = pkt.type;
                skb->priority = TC_PRIO_CONTROL;
                skb->dev = slave->dev;
@@ -1263,10 +1268,10 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
        int hash_size = 0;
        int do_tx_balance = 1;
        u32 hash_index = 0;
-       u8 *hash_start = NULL;
+       const u8 *hash_start = NULL;
        int res = 1;
 
-       skb->mac.raw = (unsigned char *)skb->data;
+       skb_reset_mac_header(skb);
        eth_data = eth_hdr(skb);
 
        /* make sure that the curr_active_slave and the slaves list do
@@ -1280,15 +1285,18 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
        }
 
        switch (ntohs(skb->protocol)) {
-       case ETH_P_IP:
+       case ETH_P_IP: {
+               const struct iphdr *iph = ip_hdr(skb);
+
                if ((memcmp(eth_data->h_dest, mac_bcast, ETH_ALEN) == 0) ||
-                   (skb->nh.iph->daddr == ip_bcast) ||
-                   (skb->nh.iph->protocol == IPPROTO_IGMP)) {
+                   (iph->daddr == ip_bcast) ||
+                   (iph->protocol == IPPROTO_IGMP)) {
                        do_tx_balance = 0;
                        break;
                }
-               hash_start = (char*)&(skb->nh.iph->daddr);
-               hash_size = sizeof(skb->nh.iph->daddr);
+               hash_start = (char *)&(iph->daddr);
+               hash_size = sizeof(iph->daddr);
+       }
                break;
        case ETH_P_IPV6:
                if (memcmp(eth_data->h_dest, mac_bcast, ETH_ALEN) == 0) {
@@ -1296,8 +1304,8 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
                        break;
                }
 
-               hash_start = (char*)&(skb->nh.ipv6h->daddr);
-               hash_size = sizeof(skb->nh.ipv6h->daddr);
+               hash_start = (char *)&(ipv6_hdr(skb)->daddr);
+               hash_size = sizeof(ipv6_hdr(skb)->daddr);
                break;
        case ETH_P_IPX:
                if (ipx_hdr(skb)->ipx_checksum !=
index e4724d8..cea3783 100644 (file)
@@ -2524,7 +2524,7 @@ static int bond_arp_rcv(struct sk_buff *skb, struct net_device *dev, struct pack
                                 (2 * sizeof(u32)))))
                goto out_unlock;
 
-       arp = skb->nh.arph;
+       arp = arp_hdr(skb);
        if (arp->ar_hln != dev->addr_len ||
            skb->pkt_type == PACKET_OTHERHOST ||
            skb->pkt_type == PACKET_LOOPBACK ||
@@ -3476,7 +3476,7 @@ static int bond_xmit_hash_policy_l34(struct sk_buff *skb,
                                    struct net_device *bond_dev, int count)
 {
        struct ethhdr *data = (struct ethhdr *)skb->data;
-       struct iphdr *iph = skb->nh.iph;
+       struct iphdr *iph = ip_hdr(skb);
        u16 *layer4hdr = (u16 *)((u32 *)iph + iph->ihl);
        int layer4_xor = 0;
 
@@ -3640,9 +3640,8 @@ static struct net_device_stats *bond_get_stats(struct net_device *bond_dev)
        read_lock_bh(&bond->lock);
 
        bond_for_each_slave(bond, slave, i) {
-               if (slave->dev->get_stats) {
-                       sstats = slave->dev->get_stats(slave->dev);
-
+               sstats = slave->dev->get_stats(slave->dev);
+               if (sstats) {
                        stats->rx_packets += sstats->rx_packets;
                        stats->rx_bytes += sstats->rx_bytes;
                        stats->rx_errors += sstats->rx_errors;
index c812648..4aec747 100644 (file)
@@ -1995,7 +1995,6 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
                return -1;
 
        *skbref = skb;
-       skb->dev = cp->dev;
        skb_reserve(skb, swivel);
 
        p = skb->data;
@@ -2822,10 +2821,8 @@ static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
 
        ctrl = 0;
        if (skb->ip_summed == CHECKSUM_PARTIAL) {
-               u64 csum_start_off, csum_stuff_off;
-
-               csum_start_off = (u64) (skb->h.raw - skb->data);
-               csum_stuff_off = csum_start_off + skb->csum_offset;
+               const u64 csum_start_off = skb_transport_offset(skb);
+               const u64 csum_stuff_off = csum_start_off + skb->csum_offset;
 
                ctrl =  TX_DESC_CSUM_EN |
                        CAS_BASE(TX_DESC_CSUM_START, csum_start_off) |
@@ -2849,8 +2846,8 @@ static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
                              ctrl | TX_DESC_SOF, 0);
                entry = TX_DESC_NEXT(ring, entry);
 
-               memcpy(tx_tiny_buf(cp, ring, entry), skb->data +
-                      len - tabort, tabort);
+               skb_copy_from_linear_data_offset(skb, len - tabort,
+                             tx_tiny_buf(cp, ring, entry), tabort);
                mapping = tx_tiny_map(cp, ring, entry, tentry);
                cas_write_txd(cp, ring, entry, mapping, tabort, ctrl,
                              (nr_frags == 0));
index 326d4a6..e4f874a 100644 (file)
@@ -1062,7 +1062,7 @@ static inline struct sk_buff *get_packet(struct pci_dev *pdev,
                                            pci_unmap_addr(ce, dma_addr),
                                            pci_unmap_len(ce, dma_len),
                                            PCI_DMA_FROMDEVICE);
-               memcpy(skb->data, ce->skb->data, len);
+               skb_copy_from_linear_data(ce->skb, skb->data, len);
                pci_dma_sync_single_for_device(pdev,
                                               pci_unmap_addr(ce, dma_addr),
                                               pci_unmap_len(ce, dma_len),
@@ -1379,12 +1379,11 @@ static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
        }
        __skb_pull(skb, sizeof(*p));
 
-       skb->dev = adapter->port[p->iff].dev;
        skb->dev->last_rx = jiffies;
        st = per_cpu_ptr(sge->port_stats[p->iff], smp_processor_id());
        st->rx_packets++;
 
-       skb->protocol = eth_type_trans(skb, skb->dev);
+       skb->protocol = eth_type_trans(skb, adapter->port[p->iff].dev);
        if ((adapter->flags & RX_CSUM_ENABLED) && p->csum == 0xffff &&
            skb->protocol == htons(ETH_P_IP) &&
            (skb->data[9] == IPPROTO_TCP || skb->data[9] == IPPROTO_UDP)) {
@@ -1866,14 +1865,14 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
                ++st->tx_tso;
 
-               eth_type = skb->nh.raw - skb->data == ETH_HLEN ?
+               eth_type = skb_network_offset(skb) == ETH_HLEN ?
                        CPL_ETH_II : CPL_ETH_II_VLAN;
 
                hdr = (struct cpl_tx_pkt_lso *)skb_push(skb, sizeof(*hdr));
                hdr->opcode = CPL_TX_PKT_LSO;
                hdr->ip_csum_dis = hdr->l4_csum_dis = 0;
-               hdr->ip_hdr_words = skb->nh.iph->ihl;
-               hdr->tcp_hdr_words = skb->h.th->doff;
+               hdr->ip_hdr_words = ip_hdr(skb)->ihl;
+               hdr->tcp_hdr_words = tcp_hdr(skb)->doff;
                hdr->eth_type_mss = htons(MK_ETH_TYPE_MSS(eth_type,
                                                          skb_shinfo(skb)->gso_size));
                hdr->len = htonl(skb->len - sizeof(*hdr));
@@ -1913,7 +1912,7 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
                if (!(adapter->flags & UDP_CSUM_CAPABLE) &&
                    skb->ip_summed == CHECKSUM_PARTIAL &&
-                   skb->nh.iph->protocol == IPPROTO_UDP) {
+                   ip_hdr(skb)->protocol == IPPROTO_UDP) {
                        if (unlikely(skb_checksum_help(skb))) {
                                pr_debug("%s: unable to do udp checksum\n", dev->name);
                                dev_kfree_skb_any(skb);
@@ -1926,7 +1925,7 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
                 */
                if ((unlikely(!adapter->sge->espibug_skb[dev->if_port]))) {
                        if (skb->protocol == htons(ETH_P_ARP) &&
-                           skb->nh.arph->ar_op == htons(ARPOP_REQUEST)) {
+                           arp_hdr(skb)->ar_op == htons(ARPOP_REQUEST)) {
                                adapter->sge->espibug_skb[dev->if_port] = skb;
                                /* We want to re-use this skb later. We
                                 * simply bump the reference count and it
@@ -2096,10 +2095,14 @@ static void espibug_workaround_t204(unsigned long data)
                                        0x0, 0x7, 0x43, 0x0, 0x0, 0x0
                                };
 
-                               memcpy(skb->data + sizeof(struct cpl_tx_pkt),
-                                       ch_mac_addr, ETH_ALEN);
-                               memcpy(skb->data + skb->len - 10,
-                                       ch_mac_addr, ETH_ALEN);
+                               skb_copy_to_linear_data_offset(skb,
+                                                   sizeof(struct cpl_tx_pkt),
+                                                              ch_mac_addr,
+                                                              ETH_ALEN);
+                               skb_copy_to_linear_data_offset(skb,
+                                                              skb->len - 10,
+                                                              ch_mac_addr,
+                                                              ETH_ALEN);
                                skb->cb[0] = 0xff;
                        }
 
@@ -2126,10 +2129,14 @@ static void espibug_workaround(unsigned long data)
                        if (!skb->cb[0]) {
                                u8 ch_mac_addr[ETH_ALEN] =
                                    {0x0, 0x7, 0x43, 0x0, 0x0, 0x0};
-                               memcpy(skb->data + sizeof(struct cpl_tx_pkt),
-                                      ch_mac_addr, ETH_ALEN);
-                               memcpy(skb->data + skb->len - 10, ch_mac_addr,
-                                      ETH_ALEN);
+                               skb_copy_to_linear_data_offset(skb,
+                                                    sizeof(struct cpl_tx_pkt),
+                                                              ch_mac_addr,
+                                                              ETH_ALEN);
+                               skb_copy_to_linear_data_offset(skb,
+                                                              skb->len - 10,
+                                                              ch_mac_addr,
+                                                              ETH_ALEN);
                                skb->cb[0] = 0xff;
                        }
 
index 8eb5712..5bdf5ca 100644 (file)
@@ -1348,7 +1348,8 @@ e100_rx(struct net_device *dev)
 
 #ifdef ETHDEBUG
                printk("head = 0x%x, data = 0x%x, tail = 0x%x, end = 0x%x\n",
-                 skb->head, skb->data, skb->tail, skb->end);
+                      skb->head, skb->data, skb_tail_pointer(skb),
+                      skb_end_pointer(skb));
                printk("copying packet to 0x%x.\n", skb_data_ptr);
 #endif
 
@@ -1375,7 +1376,6 @@ e100_rx(struct net_device *dev)
                myNextRxDesc->descr.buf = L1_CACHE_ALIGN(virt_to_phys(myNextRxDesc->skb->data));
        }
 
-       skb->dev = dev;
        skb->protocol = eth_type_trans(skb, dev);
 
        /* Send the packet to the upper layers */
index 4612f71..9774bb1 100644 (file)
@@ -1004,7 +1004,6 @@ skip_this_frame:
                return;
        }
        skb_reserve(skb, 2);    /* longword align L3 header */
-       skb->dev = dev;
 
        if (bp + length > lp->end_dma_buff) {
                int semi_cnt = lp->end_dma_buff - bp;
@@ -1702,7 +1701,6 @@ net_rx(struct net_device *dev)
                return;
        }
        skb_reserve(skb, 2);    /* longword align L3 header */
-       skb->dev = dev;
 
        readwords(ioaddr, RX_FRAME_PORT, skb_put(skb, length), length >> 1);
        if (length & 1)
index e14862b..483a594 100644 (file)
@@ -67,7 +67,10 @@ static inline union listen_entry *stid2entry(const struct tid_info *t,
 static inline struct t3c_tid_entry *lookup_tid(const struct tid_info *t,
                                               unsigned int tid)
 {
-       return tid < t->ntids ? &(t->tid_tab[tid]) : NULL;
+       struct t3c_tid_entry *t3c_tid = tid < t->ntids ?
+           &(t->tid_tab[tid]) : NULL;
+
+       return (t3c_tid && t3c_tid->client) ? t3c_tid : NULL;
 }
 
 /*
index 4864924..ebcf35e 100644 (file)
@@ -508,6 +508,7 @@ void cxgb3_queue_tid_release(struct t3cdev *tdev, unsigned int tid)
 
        spin_lock_bh(&td->tid_release_lock);
        p->ctx = (void *)td->tid_release_list;
+       p->client = NULL;
        td->tid_release_list = p;
        if (!p->ctx)
                schedule_work(&td->tid_release_task);
@@ -623,7 +624,8 @@ static int do_act_open_rpl(struct t3cdev *dev, struct sk_buff *skb)
        struct t3c_tid_entry *t3c_tid;
 
        t3c_tid = lookup_atid(&(T3C_DATA(dev))->tid_maps, atid);
-       if (t3c_tid->ctx && t3c_tid->client && t3c_tid->client->handlers &&
+       if (t3c_tid && t3c_tid->ctx && t3c_tid->client &&
+           t3c_tid->client->handlers &&
            t3c_tid->client->handlers[CPL_ACT_OPEN_RPL]) {
                return t3c_tid->client->handlers[CPL_ACT_OPEN_RPL] (dev, skb,
                                                                    t3c_tid->
@@ -642,7 +644,7 @@ static int do_stid_rpl(struct t3cdev *dev, struct sk_buff *skb)
        struct t3c_tid_entry *t3c_tid;
 
        t3c_tid = lookup_stid(&(T3C_DATA(dev))->tid_maps, stid);
-       if (t3c_tid->ctx && t3c_tid->client->handlers &&
+       if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
            t3c_tid->client->handlers[p->opcode]) {
                return t3c_tid->client->handlers[p->opcode] (dev, skb,
                                                             t3c_tid->ctx);
@@ -660,7 +662,7 @@ static int do_hwtid_rpl(struct t3cdev *dev, struct sk_buff *skb)
        struct t3c_tid_entry *t3c_tid;
 
        t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
-       if (t3c_tid->ctx && t3c_tid->client->handlers &&
+       if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
            t3c_tid->client->handlers[p->opcode]) {
                return t3c_tid->client->handlers[p->opcode]
                    (dev, skb, t3c_tid->ctx);
@@ -689,6 +691,28 @@ static int do_cr(struct t3cdev *dev, struct sk_buff *skb)
        }
 }
 
+/*
+ * Returns an sk_buff for a reply CPL message of size len.  If the input
+ * sk_buff has no other users it is trimmed and reused, otherwise a new buffer
+ * is allocated.  The input skb must be of size at least len.  Note that this
+ * operation does not destroy the original skb data even if it decides to reuse
+ * the buffer.
+ */
+static struct sk_buff *cxgb3_get_cpl_reply_skb(struct sk_buff *skb, size_t len,
+                                              int gfp)
+{
+       if (likely(!skb_cloned(skb))) {
+               BUG_ON(skb->len < len);
+               __skb_trim(skb, len);
+               skb_get(skb);
+       } else {
+               skb = alloc_skb(len, gfp);
+               if (skb)
+                       __skb_put(skb, len);
+       }
+       return skb;
+}
+
 static int do_abort_req_rss(struct t3cdev *dev, struct sk_buff *skb)
 {
        union opcode_tid *p = cplhdr(skb);
@@ -696,30 +720,39 @@ static int do_abort_req_rss(struct t3cdev *dev, struct sk_buff *skb)
        struct t3c_tid_entry *t3c_tid;
 
        t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
-       if (t3c_tid->ctx && t3c_tid->client->handlers &&
+       if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
            t3c_tid->client->handlers[p->opcode]) {
                return t3c_tid->client->handlers[p->opcode]
                    (dev, skb, t3c_tid->ctx);
        } else {
                struct cpl_abort_req_rss *req = cplhdr(skb);
                struct cpl_abort_rpl *rpl;
+               struct sk_buff *reply_skb;
+               unsigned int tid = GET_TID(req);
+               u8 cmd = req->status;
+
+               if (req->status == CPL_ERR_RTX_NEG_ADVICE ||
+                   req->status == CPL_ERR_PERSIST_NEG_ADVICE)
+                       goto out;
 
-               struct sk_buff *skb =
-                   alloc_skb(sizeof(struct cpl_abort_rpl), GFP_ATOMIC);
-               if (!skb) {
+               reply_skb = cxgb3_get_cpl_reply_skb(skb,
+                                                   sizeof(struct
+                                                          cpl_abort_rpl),
+                                                   GFP_ATOMIC);
+
+               if (!reply_skb) {
                        printk("do_abort_req_rss: couldn't get skb!\n");
                        goto out;
                }
-               skb->priority = CPL_PRIORITY_DATA;
-               __skb_put(skb, sizeof(struct cpl_abort_rpl));
-               rpl = cplhdr(skb);
+               reply_skb->priority = CPL_PRIORITY_DATA;
+               __skb_put(reply_skb, sizeof(struct cpl_abort_rpl));
+               rpl = cplhdr(reply_skb);
                rpl->wr.wr_hi =
                    htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL));
-               rpl->wr.wr_lo = htonl(V_WR_TID(GET_TID(req)));
-               OPCODE_TID(rpl) =
-                   htonl(MK_OPCODE_TID(CPL_ABORT_RPL, GET_TID(req)));
-               rpl->cmd = req->status;
-               cxgb3_ofld_send(dev, skb);
+               rpl->wr.wr_lo = htonl(V_WR_TID(tid));
+               OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
+               rpl->cmd = cmd;
+               cxgb3_ofld_send(dev, reply_skb);
 out:
                return CPL_RET_BUF_DONE;
        }
@@ -732,7 +765,7 @@ static int do_act_establish(struct t3cdev *dev, struct sk_buff *skb)
        struct t3c_tid_entry *t3c_tid;
 
        t3c_tid = lookup_atid(&(T3C_DATA(dev))->tid_maps, atid);
-       if (t3c_tid->ctx && t3c_tid->client->handlers &&
+       if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
            t3c_tid->client->handlers[CPL_ACT_ESTABLISH]) {
                return t3c_tid->client->handlers[CPL_ACT_ESTABLISH]
                    (dev, skb, t3c_tid->ctx);
@@ -750,7 +783,7 @@ static int do_trace(struct t3cdev *dev, struct sk_buff *skb)
        skb->protocol = htons(0xffff);
        skb->dev = dev->lldev;
        skb_pull(skb, sizeof(*p));
-       skb->mac.raw = skb->data;
+       skb_reset_mac_header(skb);
        netif_receive_skb(skb);
        return 0;
 }
@@ -762,7 +795,7 @@ static int do_term(struct t3cdev *dev, struct sk_buff *skb)
        struct t3c_tid_entry *t3c_tid;
 
        t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
-       if (t3c_tid->ctx && t3c_tid->client->handlers &&
+       if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
            t3c_tid->client->handlers[opcode]) {
                return t3c_tid->client->handlers[opcode] (dev, skb,
                                                          t3c_tid->ctx);
@@ -961,7 +994,7 @@ void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
        for (tid = 0; tid < ti->ntids; tid++) {
                te = lookup_tid(ti, tid);
                BUG_ON(!te);
-               if (te->ctx && te->client && te->client->redirect) {
+               if (te && te->ctx && te->client && te->client->redirect) {
                        update_tcb = te->client->redirect(te->ctx, old, new, e);
                        if (update_tcb) {
                                l2t_hold(L2DATA(tdev), e);
index 027ab2c..3666586 100644 (file)
@@ -661,7 +661,7 @@ static inline struct sk_buff *get_imm_packet(const struct rsp_desc *resp)
 
        if (skb) {
                __skb_put(skb, IMMED_PKT_SIZE);
-               memcpy(skb->data, resp->imm_data, IMMED_PKT_SIZE);
+               skb_copy_to_linear_data(skb, resp->imm_data, IMMED_PKT_SIZE);
        }
        return skb;
 }
@@ -897,11 +897,11 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
                d->flit[2] = 0;
                cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO);
                hdr->cntrl = htonl(cntrl);
-               eth_type = skb->nh.raw - skb->data == ETH_HLEN ?
+               eth_type = skb_network_offset(skb) == ETH_HLEN ?
                    CPL_ETH_II : CPL_ETH_II_VLAN;
                tso_info |= V_LSO_ETH_TYPE(eth_type) |
-                   V_LSO_IPHDR_WORDS(skb->nh.iph->ihl) |
-                   V_LSO_TCPHDR_WORDS(skb->h.th->doff);
+                   V_LSO_IPHDR_WORDS(ip_hdr(skb)->ihl) |
+                   V_LSO_TCPHDR_WORDS(tcp_hdr(skb)->doff);
                hdr->lso_info = htonl(tso_info);
                flits = 3;
        } else {
@@ -913,7 +913,8 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
                if (skb->len <= WR_LEN - sizeof(*cpl)) {
                        q->sdesc[pidx].skb = NULL;
                        if (!skb->data_len)
-                               memcpy(&d->flit[2], skb->data, skb->len);
+                               skb_copy_from_linear_data(skb, &d->flit[2],
+                                                         skb->len);
                        else
                                skb_copy_bits(skb, 0, &d->flit[2], skb->len);
 
@@ -1319,16 +1320,19 @@ static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
        /* Only TX_DATA builds SGLs */
 
        from = (struct work_request_hdr *)skb->data;
-       memcpy(&d->flit[1], &from[1], skb->h.raw - skb->data - sizeof(*from));
+       memcpy(&d->flit[1], &from[1],
+              skb_transport_offset(skb) - sizeof(*from));
 
-       flits = (skb->h.raw - skb->data) / 8;
+       flits = skb_transport_offset(skb) / 8;
        sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
-       sgl_flits = make_sgl(skb, sgp, skb->h.raw, skb->tail - skb->h.raw,
+       sgl_flits = make_sgl(skb, sgp, skb_transport_header(skb),
+                            skb->tail - skb->transport_header,
                             adap->pdev);
        if (need_skb_unmap()) {
                setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits);
                skb->destructor = deferred_unmap_destructor;
-               ((struct unmap_info *)skb->cb)->len = skb->tail - skb->h.raw;
+               ((struct unmap_info *)skb->cb)->len = (skb->tail -
+                                                      skb->transport_header);
        }
 
        write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits,
@@ -1349,8 +1353,8 @@ static inline unsigned int calc_tx_descs_ofld(const struct sk_buff *skb)
        if (skb->len <= WR_LEN && cnt == 0)
                return 1;       /* packet fits as immediate data */
 
-       flits = (skb->h.raw - skb->data) / 8;   /* headers */
-       if (skb->tail != skb->h.raw)
+       flits = skb_transport_offset(skb) / 8;  /* headers */
+       if (skb->tail != skb->transport_header)
                cnt++;
        return flits_to_desc(flits + sgl_len(cnt));
 }
@@ -1620,7 +1624,9 @@ static inline int rx_offload(struct t3cdev *tdev, struct sge_rspq *rq,
                             unsigned int gather_idx)
 {
        rq->offload_pkts++;
-       skb->mac.raw = skb->nh.raw = skb->h.raw = skb->data;
+       skb_reset_mac_header(skb);
+       skb_reset_network_header(skb);
+       skb_reset_transport_header(skb);
 
        if (rq->polling) {
                rx_gather[gather_idx++] = skb;
@@ -1684,9 +1690,8 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
        struct port_info *pi;
 
        skb_pull(skb, sizeof(*p) + pad);
-       skb->dev = adap->port[p->iff];
        skb->dev->last_rx = jiffies;
-       skb->protocol = eth_type_trans(skb, skb->dev);
+       skb->protocol = eth_type_trans(skb, adap->port[p->iff]);
        pi = netdev_priv(skb->dev);
        if (pi->rx_csum_offload && p->csum_valid && p->csum == 0xffff &&
            !p->fragment) {
@@ -1717,11 +1722,11 @@ static void skb_data_init(struct sk_buff *skb, struct sge_fl_page *p,
 {
        skb->len = len;
        if (len <= SKB_DATA_SIZE) {
-               memcpy(skb->data, p->va, len);
+               skb_copy_to_linear_data(skb, p->va, len);
                skb->tail += len;
                put_page(p->frag.page);
        } else {
-               memcpy(skb->data, p->va, SKB_DATA_SIZE);
+               skb_copy_to_linear_data(skb, p->va, SKB_DATA_SIZE);
                skb_shinfo(skb)->frags[0].page = p->frag.page;
                skb_shinfo(skb)->frags[0].page_offset =
                    p->frag.page_offset + SKB_DATA_SIZE;
@@ -1767,7 +1772,7 @@ static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
                        __skb_put(skb, len);
                        pci_dma_sync_single_for_cpu(adap->pdev, mapping, len,
                                                    PCI_DMA_FROMDEVICE);
-                       memcpy(skb->data, sd->t.skb->data, len);
+                       skb_copy_from_linear_data(sd->t.skb, skb->data, len);
                        pci_dma_sync_single_for_device(adap->pdev, mapping, len,
                                                       PCI_DMA_FROMDEVICE);
                } else if (!drop_thres)
index d83f075..fb485d0 100644 (file)
@@ -1523,19 +1523,25 @@ static int mac_intr_handler(struct adapter *adap, unsigned int idx)
  */
 int t3_phy_intr_handler(struct adapter *adapter)
 {
-       static const int intr_gpio_bits[] = { 8, 0x20 };
-
+       u32 mask, gpi = adapter_info(adapter)->gpio_intr;
        u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
 
        for_each_port(adapter, i) {
-               if (cause & intr_gpio_bits[i]) {
-                       struct cphy *phy = &adap2pinfo(adapter, i)->phy;
-                       int phy_cause = phy->ops->intr_handler(phy);
+               struct port_info *p = adap2pinfo(adapter, i);
+
+               mask = gpi - (gpi & (gpi - 1));
+               gpi -= mask;
+
+               if (!(p->port_type->caps & SUPPORTED_IRQ))
+                       continue;
+
+               if (cause & mask) {
+                       int phy_cause = p->phy.ops->intr_handler(&p->phy);
 
                        if (phy_cause & cphy_cause_link_change)
                                t3_link_changed(adapter, i);
                        if (phy_cause & cphy_cause_fifo_error)
-                               phy->fifo_errors++;
+                               p->phy.fifo_errors++;
                }
        }
 
index e547ce1..dae97b8 100644 (file)
@@ -359,7 +359,6 @@ static void de600_rx_intr(struct net_device *dev)
        }
        /* else */
 
-       skb->dev = dev;
        skb_reserve(skb,2);     /* Align */
 
        /* 'skb->data' points to the start of sk_buff data area. */
index b6ad0cb..dc48924 100644 (file)
@@ -697,7 +697,6 @@ static int de620_rx_intr(struct net_device *dev)
                }
                else { /* Yep! Go get it! */
                        skb_reserve(skb,2);     /* Align */
-                       skb->dev = dev;
                        /* skb->data points to the start of sk_buff data area */
                        buffer = skb_put(skb,size);
                        /* copy the packet into the buffer */
index 9f7e1db..95d854e 100644 (file)
@@ -616,7 +616,6 @@ static int lance_rx(struct net_device *dev)
                        }
                        lp->stats.rx_bytes += len;
 
-                       skb->dev = dev;
                        skb_reserve(skb, 2);    /* 16 byte align */
                        skb_put(skb, len);      /* make room */
 
index 07d2731..571d82f 100644 (file)
@@ -3091,13 +3091,13 @@ static void dfx_rcv_queue_process(
                                        {
                                                /* Receive buffer allocated, pass receive packet up */
 
-                                               memcpy(skb->data, p_buff + RCV_BUFF_K_PADDING, pkt_len+3);
+                                               skb_copy_to_linear_data(skb,
+                                                              p_buff + RCV_BUFF_K_PADDING,
+                                                              pkt_len + 3);
                                        }
 
                                        skb_reserve(skb,3);             /* adjust data field so that it points to FC byte */
                                        skb_put(skb, pkt_len);          /* pass up packet length, NOT including CRC */
-                                       skb->dev = bp->dev;             /* pass up device pointer */
-
                                        skb->protocol = fddi_type_trans(skb, bp->dev);
                                        bp->rcv_total_bytes += skb->len;
                                        netif_rx(skb);
index 5113eef..1834970 100644 (file)
@@ -1044,7 +1044,6 @@ static int depca_rx(struct net_device *dev)
                                        unsigned char *buf;
                                        skb_reserve(skb, 2);    /* 16 byte align the IP header */
                                        buf = skb_put(skb, pkt_len);
-                                       skb->dev = dev;
                                        if (entry < lp->rx_old) {       /* Wrapped buffer */
                                                len = (lp->rxRingMask - lp->rx_old + 1) * RX_BUFF_SZ;
                                                memcpy_fromio(buf, lp->rx_buff[lp->rx_old], len);
@@ -1491,8 +1490,9 @@ static void __init depca_platform_probe (void)
                depca_io_ports[i].device = pldev;
 
                if (platform_device_add(pldev)) {
-                       platform_device_put(pldev);
                        depca_io_ports[i].device = NULL;
+                       pldev->dev.platform_data = NULL;
+                       platform_device_put(pldev);
                        continue;
                }
 
index a795202..df62c02 100644 (file)
@@ -503,7 +503,6 @@ dgrs_rcv_frame(
                /* discarding the frame */
                goto out;
        }
-       skb->dev = devN;
        skb_reserve(skb, 2);    /* Align IP header */
 
 again:
@@ -742,7 +741,7 @@ static int dgrs_start_xmit(struct sk_buff *skb, struct net_device *devN)
                }
 
                amt = min_t(unsigned int, len, rbdp->size - count);
-               memcpy( (char *) S2H(rbdp->buf) + count, skb->data + i, amt);
+               skb_copy_from_linear_data_offset(skb, i, S2H(rbdp->buf) + count, amt);
                i += amt;
                count += amt;
                len -= amt;
index 9d446a0..74ec64a 100644 (file)
@@ -504,7 +504,6 @@ rio_timer (unsigned long data)
                                        break;
                                }
                                np->rx_skbuff[entry] = skb;
-                               skb->dev = dev;
                                /* 16 byte align the IP header */
                                skb_reserve (skb, 2);
                                np->rx_ring[entry].fraginfo =
@@ -575,7 +574,6 @@ alloc_list (struct net_device *dev)
                                dev->name);
                        break;
                }
-               skb->dev = dev; /* Mark as being used by this device. */
                skb_reserve (skb, 2);   /* 16 byte align the IP header. */
                /* Rubicon now supports 40 bits of addressing space. */
                np->rx_ring[i].fraginfo =
@@ -866,7 +864,6 @@ receive_packet (struct net_device *dev)
                                                                DMA_48BIT_MASK,
                                                            np->rx_buf_sz,
                                                            PCI_DMA_FROMDEVICE);
-                               skb->dev = dev;
                                /* 16 byte align the IP header */
                                skb_reserve (skb, 2);
                                eth_copy_and_sum (skb,
@@ -910,7 +907,6 @@ receive_packet (struct net_device *dev)
                                break;
                        }
                        np->rx_skbuff[entry] = skb;
-                       skb->dev = dev;
                        /* 16 byte align the IP header */
                        skb_reserve (skb, 2);
                        np->rx_ring[entry].fraginfo =
index 615d2b1..8cc1174 100644 (file)
@@ -954,7 +954,6 @@ dm9000_rx(struct net_device *dev)
                /* Move data from DM9000 */
                if (GoodPacket
                    && ((skb = dev_alloc_skb(RxLen + 4)) != NULL)) {
-                       skb->dev = dev;
                        skb_reserve(skb, 2);
                        rdptr = (u8 *) skb_put(skb, RxLen - 4);
 
index 0cefef5..4d0e0ae 100644 (file)
@@ -1769,7 +1769,7 @@ static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
 
        /* Align, init, and map the RFD. */
        skb_reserve(rx->skb, NET_IP_ALIGN);
-       memcpy(rx->skb->data, &nic->blank_rfd, sizeof(struct rfd));
+       skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd));
        rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data,
                RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
 
index 1d08e93..48e2ade 100644 (file)
@@ -2887,33 +2887,30 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
                                return err;
                }
 
-               hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
+               hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
                mss = skb_shinfo(skb)->gso_size;
                if (skb->protocol == htons(ETH_P_IP)) {
-                       skb->nh.iph->tot_len = 0;
-                       skb->nh.iph->check = 0;
-                       skb->h.th->check =
-                               ~csum_tcpudp_magic(skb->nh.iph->saddr,
-                                                  skb->nh.iph->daddr,
-                                                  0,
-                                                  IPPROTO_TCP,
-                                                  0);
+                       struct iphdr *iph = ip_hdr(skb);
+                       iph->tot_len = 0;
+                       iph->check = 0;
+                       tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
+                                                                iph->daddr, 0,
+                                                                IPPROTO_TCP,
+                                                                0);
                        cmd_length = E1000_TXD_CMD_IP;
-                       ipcse = skb->h.raw - skb->data - 1;
+                       ipcse = skb_transport_offset(skb) - 1;
                } else if (skb->protocol == htons(ETH_P_IPV6)) {
-                       skb->nh.ipv6h->payload_len = 0;
-                       skb->h.th->check =
-                               ~csum_ipv6_magic(&skb->nh.ipv6h->saddr,
-                                                &skb->nh.ipv6h->daddr,
-                                                0,
-                                                IPPROTO_TCP,
-                                                0);
+                       ipv6_hdr(skb)->payload_len = 0;
+                       tcp_hdr(skb)->check =
+                               ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+                                                &ipv6_hdr(skb)->daddr,
+                                                0, IPPROTO_TCP, 0);
                        ipcse = 0;
                }
-               ipcss = skb->nh.raw - skb->data;
-               ipcso = (void *)&(skb->nh.iph->check) - (void *)skb->data;
-               tucss = skb->h.raw - skb->data;
-               tucso = (void *)&(skb->h.th->check) - (void *)skb->data;
+               ipcss = skb_network_offset(skb);
+               ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
+               tucss = skb_transport_offset(skb);
+               tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
                tucse = 0;
 
                cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
@@ -2954,7 +2951,7 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
        uint8_t css;
 
        if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
-               css = skb->h.raw - skb->data;
+               css = skb_transport_offset(skb);
 
                i = tx_ring->next_to_use;
                buffer_info = &tx_ring->buffer_info[i];
@@ -2962,7 +2959,8 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
 
                context_desc->lower_setup.ip_config = 0;
                context_desc->upper_setup.tcp_fields.tucss = css;
-               context_desc->upper_setup.tcp_fields.tucso = css + skb->csum;
+               context_desc->upper_setup.tcp_fields.tucso =
+                       css + skb->csum_offset;
                context_desc->upper_setup.tcp_fields.tucse = 0;
                context_desc->tcp_seg_setup.data = 0;
                context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT);
@@ -3296,7 +3294,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
                /* TSO Workaround for 82571/2/3 Controllers -- if skb->data
                * points to just header, pull a few bytes of payload from
                * frags into skb->data */
-               hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
+               hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
                if (skb->data_len && (hdr_len == (skb->len - skb->data_len))) {
                        switch (adapter->hw.mac_type) {
                                unsigned int pull_size;
@@ -3307,7 +3305,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
                                 * NOTE: this is a TSO only workaround
                                 * if end byte alignment not correct move us
                                 * into the next dword */
-                               if ((unsigned long)(skb->tail - 1) & 4)
+                               if ((unsigned long)(skb_tail_pointer(skb) - 1) & 4)
                                        break;
                                /* fall through */
                        case e1000_82571:
@@ -3796,7 +3794,7 @@ e1000_intr_msi(int irq, void *data)
 
        for (i = 0; i < E1000_MAX_INTR; i++)
                if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) &
-                  e1000_clean_tx_irq(adapter, adapter->tx_ring)))
+                  !e1000_clean_tx_irq(adapter, adapter->tx_ring)))
                        break;
 
        if (likely(adapter->itr_setting & 3))
@@ -3899,7 +3897,7 @@ e1000_intr(int irq, void *data)
 
        for (i = 0; i < E1000_MAX_INTR; i++)
                if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) &
-                  e1000_clean_tx_irq(adapter, adapter->tx_ring)))
+                  !e1000_clean_tx_irq(adapter, adapter->tx_ring)))
                        break;
 
        if (likely(adapter->itr_setting & 3))
@@ -3949,7 +3947,7 @@ e1000_clean(struct net_device *poll_dev, int *budget)
        poll_dev->quota -= work_done;
 
        /* If no Tx and not enough Rx work done, exit the polling mode */
-       if ((tx_cleaned && (work_done < work_to_do)) ||
+       if ((!tx_cleaned && (work_done == 0)) ||
           !netif_running(poll_dev)) {
 quit_polling:
                if (likely(adapter->itr_setting & 3))
@@ -3979,7 +3977,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
 #ifdef CONFIG_E1000_NAPI
        unsigned int count = 0;
 #endif
-       boolean_t cleaned = TRUE;
+       boolean_t cleaned = FALSE;
        unsigned int total_tx_bytes=0, total_tx_packets=0;
 
        i = tx_ring->next_to_clean;
@@ -4013,10 +4011,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
 #ifdef CONFIG_E1000_NAPI
 #define E1000_TX_WEIGHT 64
                /* weight of a sort for tx, to avoid endless transmit cleanup */
-               if (count++ == E1000_TX_WEIGHT) {
-                       cleaned = FALSE;
-                       break;
-               }
+               if (count++ == E1000_TX_WEIGHT) break;
 #endif
        }
 
@@ -4230,9 +4225,12 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
                            netdev_alloc_skb(netdev, length + NET_IP_ALIGN);
                        if (new_skb) {
                                skb_reserve(new_skb, NET_IP_ALIGN);
-                               memcpy(new_skb->data - NET_IP_ALIGN,
-                                      skb->data - NET_IP_ALIGN,
-                                      length + NET_IP_ALIGN);
+                               skb_copy_to_linear_data_offset(new_skb,
+                                                              -NET_IP_ALIGN,
+                                                              (skb->data -
+                                                               NET_IP_ALIGN),
+                                                              (length +
+                                                               NET_IP_ALIGN));
                                /* save the skb in buffer_info as good */
                                buffer_info->skb = skb;
                                skb = new_skb;
@@ -4394,7 +4392,7 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
                                PCI_DMA_FROMDEVICE);
                        vaddr = kmap_atomic(ps_page->ps_page[0],
                                            KM_SKB_DATA_SOFTIRQ);
-                       memcpy(skb->tail, vaddr, l1);
+                       memcpy(skb_tail_pointer(skb), vaddr, l1);
                        kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);
                        pci_dma_sync_single_for_device(pdev,
                                ps_page_dma->ps_page_dma[0],
index b446309..39654e1 100644 (file)
@@ -1591,7 +1591,6 @@ eepro_rx(struct net_device *dev)
 
                                break;
                        }
-                       skb->dev = dev;
                        skb_reserve(skb,2);
 
                        if (lp->version == LAN595)
index e28bb1e..6c267c3 100644 (file)
@@ -1793,7 +1793,6 @@ speedo_rx(struct net_device *dev)
                           copying to a properly sized skbuff. */
                        if (pkt_len < rx_copybreak
                                && (skb = dev_alloc_skb(pkt_len + 2)) != 0) {
-                               skb->dev = dev;
                                skb_reserve(skb, 2);    /* Align IP on 16 byte boundaries */
                                /* 'skb_put()' points to the start of sk_buff data area. */
                                pci_dma_sync_single_for_cpu(sp->pdev, sp->rx_ring_dma[entry],
@@ -1805,8 +1804,9 @@ speedo_rx(struct net_device *dev)
                                eth_copy_and_sum(skb, sp->rx_skbuff[entry]->data, pkt_len, 0);
                                skb_put(skb, pkt_len);
 #else
-                               memcpy(skb_put(skb, pkt_len), sp->rx_skbuff[entry]->data,
-                                          pkt_len);
+                               skb_copy_from_linear_data(sp->rx_skbuff[entry],
+                                                         skb_put(skb, pkt_len),
+                                                         pkt_len);
 #endif
                                pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[entry],
                                                                                           sizeof(struct RxFD) + pkt_len,
index 3868b80..8aaf5ec 100644 (file)
@@ -976,7 +976,6 @@ static void eexp_hw_rx_pio(struct net_device *dev)
                                        lp->stats.rx_dropped++;
                                        break;
                                }
-                               skb->dev = dev;
                                skb_reserve(skb, 2);
                                outw(pbuf+10, ioaddr+READ_PTR);
                                insw(ioaddr+DATAPORT, skb_put(skb,pkt_len),(pkt_len+1)>>1);
index 0e4042b..58364a0 100644 (file)
@@ -391,8 +391,8 @@ static int ehea_poll(struct net_device *dev, int *budget)
                                        if (!skb)
                                                break;
                                }
-                               memcpy(skb->data, ((char*)cqe) + 64,
-                                      cqe->num_bytes_transfered - 4);
+                               skb_copy_to_linear_data(skb, ((char*)cqe) + 64,
+                                              cqe->num_bytes_transfered - 4);
                                ehea_fill_skb(dev, skb, cqe);
                        } else if (rq == 2) {  /* RQ2 */
                                skb = get_skb_by_index(skb_arr_rq2,
@@ -1262,8 +1262,8 @@ static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr)
 static inline void write_ip_start_end(struct ehea_swqe *swqe,
                                      const struct sk_buff *skb)
 {
-       swqe->ip_start = (u8)(((u64)skb->nh.iph) - ((u64)skb->data));
-       swqe->ip_end = (u8)(swqe->ip_start + skb->nh.iph->ihl * 4 - 1);
+       swqe->ip_start = skb_network_offset(skb);
+       swqe->ip_end = (u8)(swqe->ip_start + ip_hdrlen(skb) - 1);
 }
 
 static inline void write_tcp_offset_end(struct ehea_swqe *swqe,
@@ -1300,13 +1300,13 @@ static void write_swqe2_TSO(struct sk_buff *skb,
        /* copy only eth/ip/tcp headers to immediate data and
         * the rest of skb->data to sg1entry
         */
-       headersize = ETH_HLEN + (skb->nh.iph->ihl * 4) + (skb->h.th->doff * 4);
+       headersize = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
 
        skb_data_size = skb->len - skb->data_len;
 
        if (skb_data_size >= headersize) {
                /* copy immediate data */
-               memcpy(imm_data, skb->data, headersize);
+               skb_copy_from_linear_data(skb, imm_data, headersize);
                swqe->immediate_data_length = headersize;
 
                if (skb_data_size > headersize) {
@@ -1337,7 +1337,7 @@ static void write_swqe2_nonTSO(struct sk_buff *skb,
         */
        if (skb_data_size >= SWQE2_MAX_IMM) {
                /* copy immediate data */
-               memcpy(imm_data, skb->data, SWQE2_MAX_IMM);
+               skb_copy_from_linear_data(skb, imm_data, SWQE2_MAX_IMM);
 
                swqe->immediate_data_length = SWQE2_MAX_IMM;
 
@@ -1350,7 +1350,7 @@ static void write_swqe2_nonTSO(struct sk_buff *skb,
                        swqe->descriptors++;
                }
        } else {
-               memcpy(imm_data, skb->data, skb_data_size);
+               skb_copy_from_linear_data(skb, imm_data, skb_data_size);
                swqe->immediate_data_length = skb_data_size;
        }
 }
@@ -1688,6 +1688,7 @@ static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev,
                       struct ehea_swqe *swqe, u32 lkey)
 {
        if (skb->protocol == htons(ETH_P_IP)) {
+               const struct iphdr *iph = ip_hdr(skb);
                /* IPv4 */
                swqe->tx_control |= EHEA_SWQE_CRC
                                 | EHEA_SWQE_IP_CHECKSUM
@@ -1697,15 +1698,15 @@ static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev,
 
                write_ip_start_end(swqe, skb);
 
-               if (skb->nh.iph->protocol == IPPROTO_UDP) {
-                       if ((skb->nh.iph->frag_off & IP_MF) ||
-                           (skb->nh.iph->frag_off & IP_OFFSET))
+               if (iph->protocol == IPPROTO_UDP) {
+                       if ((iph->frag_off & IP_MF) ||
+                           (iph->frag_off & IP_OFFSET))
                                /* IP fragment, so don't change cs */
                                swqe->tx_control &= ~EHEA_SWQE_TCP_CHECKSUM;
                        else
                                write_udp_offset_end(swqe, skb);
 
-               } else if (skb->nh.iph->protocol == IPPROTO_TCP) {
+               } else if (iph->protocol == IPPROTO_TCP) {
                        write_tcp_offset_end(swqe, skb);
                }
 
@@ -1731,10 +1732,11 @@ static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev,
        int i;
 
        if (skb->protocol == htons(ETH_P_IP)) {
+               const struct iphdr *iph = ip_hdr(skb);
                /* IPv4 */
                write_ip_start_end(swqe, skb);
 
-               if (skb->nh.iph->protocol == IPPROTO_TCP) {
+               if (iph->protocol == IPPROTO_TCP) {
                        swqe->tx_control |= EHEA_SWQE_CRC
                                         | EHEA_SWQE_IP_CHECKSUM
                                         | EHEA_SWQE_TCP_CHECKSUM
@@ -1742,9 +1744,9 @@ static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev,
 
                        write_tcp_offset_end(swqe, skb);
 
-               } else if (skb->nh.iph->protocol == IPPROTO_UDP) {
-                       if ((skb->nh.iph->frag_off & IP_MF) ||
-                           (skb->nh.iph->frag_off & IP_OFFSET))
+               } else if (iph->protocol == IPPROTO_UDP) {
+                       if ((iph->frag_off & IP_MF) ||
+                           (iph->frag_off & IP_OFFSET))
                                /* IP fragment, so don't change cs */
                                swqe->tx_control |= EHEA_SWQE_CRC
                                                 | EHEA_SWQE_IMM_DATA_PRESENT;
@@ -1770,10 +1772,11 @@ static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev,
        /* copy (immediate) data */
        if (nfrags == 0) {
                /* data is in a single piece */
-               memcpy(imm_data, skb->data, skb->len);
+               skb_copy_from_linear_data(skb, imm_data, skb->len);
        } else {
                /* first copy data from the skb->data buffer ... */
-               memcpy(imm_data, skb->data, skb->len - skb->data_len);
+               skb_copy_from_linear_data(skb, imm_data,
+                                         skb->len - skb->data_len);
                imm_data += skb->len - skb->data_len;
 
                /* ... then copy data from the fragments */
index 3a6a83d..4e3f14c 100644 (file)
@@ -934,7 +934,6 @@ static void epic_init_ring(struct net_device *dev)
                ep->rx_skbuff[i] = skb;
                if (skb == NULL)
                        break;
-               skb->dev = dev;                 /* Mark as being used by this device. */
                skb_reserve(skb, 2);    /* 16 byte align the IP header. */
                ep->rx_ring[i].bufaddr = pci_map_single(ep->pci_dev,
                        skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
@@ -1199,7 +1198,6 @@ static int epic_rx(struct net_device *dev, int budget)
                           to a minimally-sized skbuff. */
                        if (pkt_len < rx_copybreak
                                && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
-                               skb->dev = dev;
                                skb_reserve(skb, 2);    /* 16 byte align the IP header */
                                pci_dma_sync_single_for_cpu(ep->pci_dev,
                                                            ep->rx_ring[entry].bufaddr,
@@ -1236,7 +1234,6 @@ static int epic_rx(struct net_device *dev, int budget)
                        skb = ep->rx_skbuff[entry] = dev_alloc_skb(ep->rx_buf_sz);
                        if (skb == NULL)
                                break;
-                       skb->dev = dev;                 /* Mark as being used by this device. */
                        skb_reserve(skb, 2);    /* Align IP on 16 byte boundaries */
                        ep->rx_ring[entry].bufaddr = pci_map_single(ep->pci_dev,
                                skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
index 93283e3..04abf59 100644 (file)
@@ -1175,7 +1175,6 @@ static void eth16i_rx(struct net_device *dev)
                                break;
                        }
 
-                       skb->dev = dev;
                        skb_reserve(skb,2);
 
                        /*
index 714ea11..cb0792c 100644 (file)
@@ -993,7 +993,6 @@ static int ewrk3_rx(struct net_device *dev)
 
                                        if ((skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
                                                unsigned char *p;
-                                               skb->dev = dev;
                                                skb_reserve(skb, 2);    /* Align to 16 bytes */
                                                p = skb_put(skb, pkt_len);
 
index 38a13f4..abe9b08 100644 (file)
@@ -1719,7 +1719,6 @@ static int netdev_rx(struct net_device *dev)
                           to a minimally-sized skbuff. */
                        if (pkt_len < rx_copybreak &&
                            (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
-                               skb->dev = dev;
                                skb_reserve(skb, 2);    /* 16 byte align the IP header */
                                pci_dma_sync_single_for_cpu(np->pci_dev,
                                                            np->cur_rx->buffer,
index 6764281..255b091 100644 (file)
@@ -647,7 +647,6 @@ while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) {
                printk("%s: Memory squeeze, dropping packet.\n", dev->name);
                fep->stats.rx_dropped++;
        } else {
-               skb->dev = dev;
                skb_put(skb,pkt_len-4); /* Make room */
                eth_copy_and_sum(skb, data, pkt_len-4, 0);
                skb->protocol=eth_type_trans(skb,dev);
index 77f747a..e824d5d 100644 (file)
@@ -551,7 +551,9 @@ static int fec_enet_rx_common(struct net_device *dev, int *budget)
                                skbn = dev_alloc_skb(pkt_len + 2);
                                if (skbn != NULL) {
                                        skb_reserve(skbn, 2);   /* align IP header */
-                                       memcpy(skbn->data, skb->data, pkt_len);
+                                       skb_copy_from_linear_data(skb
+                                                                 skbn->data,
+                                                                 pkt_len);
                                        /* swap */
                                        skbt = skb;
                                        skb = skbn;
@@ -561,7 +563,6 @@ static int fec_enet_rx_common(struct net_device *dev, int *budget)
                                skbn = dev_alloc_skb(ENET_RX_FRSIZE);
 
                        if (skbn != NULL) {
-                               skb->dev = dev;
                                skb_put(skb, pkt_len);  /* Make room */
                                skb->protocol = eth_type_trans(skb, dev);
                                received++;
index d04214e..7a01802 100644 (file)
@@ -1385,11 +1385,12 @@ static int nv_alloc_rx(struct net_device *dev)
        while (np->put_rx.orig != less_rx) {
                struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
                if (skb) {
-                       skb->dev = dev;
                        np->put_rx_ctx->skb = skb;
-                       np->put_rx_ctx->dma = pci_map_single(np->pci_dev, skb->data,
-                                                            skb->end-skb->data, PCI_DMA_FROMDEVICE);
-                       np->put_rx_ctx->dma_len = skb->end-skb->data;
+                       np->put_rx_ctx->dma = pci_map_single(np->pci_dev,
+                                                            skb->data,
+                                                            skb_tailroom(skb),
+                                                            PCI_DMA_FROMDEVICE);
+                       np->put_rx_ctx->dma_len = skb_tailroom(skb);
                        np->put_rx.orig->buf = cpu_to_le32(np->put_rx_ctx->dma);
                        wmb();
                        np->put_rx.orig->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL);
@@ -1416,11 +1417,12 @@ static int nv_alloc_rx_optimized(struct net_device *dev)
        while (np->put_rx.ex != less_rx) {
                struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
                if (skb) {
-                       skb->dev = dev;
                        np->put_rx_ctx->skb = skb;
-                       np->put_rx_ctx->dma = pci_map_single(np->pci_dev, skb->data,
-                                                            skb->end-skb->data, PCI_DMA_FROMDEVICE);
-                       np->put_rx_ctx->dma_len = skb->end-skb->data;
+                       np->put_rx_ctx->dma = pci_map_single(np->pci_dev,
+                                                            skb->data,
+                                                            skb_tailroom(skb),
+                                                            PCI_DMA_FROMDEVICE);
+                       np->put_rx_ctx->dma_len = skb_tailroom(skb);
                        np->put_rx.ex->bufhigh = cpu_to_le64(np->put_rx_ctx->dma) >> 32;
                        np->put_rx.ex->buflow = cpu_to_le64(np->put_rx_ctx->dma) & 0x0FFFFFFFF;
                        wmb();
@@ -1604,8 +1606,9 @@ static void nv_drain_rx(struct net_device *dev)
                wmb();
                if (np->rx_skb[i].skb) {
                        pci_unmap_single(np->pci_dev, np->rx_skb[i].dma,
-                                               np->rx_skb[i].skb->end-np->rx_skb[i].skb->data,
-                                               PCI_DMA_FROMDEVICE);
+                                        (skb_end_pointer(np->rx_skb[i].skb) -
+                                         np->rx_skb[i].skb->data),
+                                        PCI_DMA_FROMDEVICE);
                        dev_kfree_skb(np->rx_skb[i].skb);
                        np->rx_skb[i].skb = NULL;
                }
@@ -4376,11 +4379,12 @@ static int nv_loopback_test(struct net_device *dev)
                ret = 0;
                goto out;
        }
+       test_dma_addr = pci_map_single(np->pci_dev, tx_skb->data,
+                                      skb_tailroom(tx_skb),
+                                      PCI_DMA_FROMDEVICE);
        pkt_data = skb_put(tx_skb, pkt_len);
        for (i = 0; i < pkt_len; i++)
                pkt_data[i] = (u8)(i & 0xff);
-       test_dma_addr = pci_map_single(np->pci_dev, tx_skb->data,
-                                      tx_skb->end-tx_skb->data, PCI_DMA_FROMDEVICE);
 
        if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
                np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr);
@@ -4437,7 +4441,7 @@ static int nv_loopback_test(struct net_device *dev)
        }
 
        pci_unmap_page(np->pci_dev, test_dma_addr,
-                      tx_skb->end-tx_skb->data,
+                      (skb_end_pointer(tx_skb) - tx_skb->data),
                       PCI_DMA_TODEVICE);
        dev_kfree_skb_any(tx_skb);
  out:
index 4a05c14..e2ddd61 100644 (file)
@@ -160,7 +160,8 @@ static int fs_enet_rx_napi(struct net_device *dev, int *budget)
                                skbn = dev_alloc_skb(pkt_len + 2);
                                if (skbn != NULL) {
                                        skb_reserve(skbn, 2);   /* align IP header */
-                                       memcpy(skbn->data, skb->data, pkt_len);
+                                       skb_copy_from_linear_data(skb,
+                                                     skbn->data, pkt_len);
                                        /* swap */
                                        skbt = skb;
                                        skb = skbn;
@@ -170,7 +171,6 @@ static int fs_enet_rx_napi(struct net_device *dev, int *budget)
                                skbn = dev_alloc_skb(ENET_RX_FRSIZE);
 
                        if (skbn != NULL) {
-                               skb->dev = dev;
                                skb_put(skb, pkt_len);  /* Make room */
                                skb->protocol = eth_type_trans(skb, dev);
                                received++;
@@ -294,7 +294,8 @@ static int fs_enet_rx_non_napi(struct net_device *dev)
                                skbn = dev_alloc_skb(pkt_len + 2);
                                if (skbn != NULL) {
                                        skb_reserve(skbn, 2);   /* align IP header */
-                                       memcpy(skbn->data, skb->data, pkt_len);
+                                       skb_copy_from_linear_data(skb,
+                                                     skbn->data, pkt_len);
                                        /* swap */
                                        skbt = skb;
                                        skb = skbn;
@@ -304,7 +305,6 @@ static int fs_enet_rx_non_napi(struct net_device *dev)
                                skbn = dev_alloc_skb(ENET_RX_FRSIZE);
 
                        if (skbn != NULL) {
-                               skb->dev = dev;
                                skb_put(skb, pkt_len);  /* Make room */
                                skb->protocol = eth_type_trans(skb, dev);
                                received++;
@@ -516,7 +516,6 @@ void fs_init_bds(struct net_device *dev)
                        break;
                }
                fep->rx_skbuff[i] = skb;
-               skb->dev = dev;
                CBDW_BUFADDR(bdp,
                        dma_map_single(fep->dev, skb->data,
                                L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
index d981d4c..b666a0c 100644 (file)
@@ -942,18 +942,18 @@ static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb)
 
        /* Tell the controller what the protocol is */
        /* And provide the already calculated phcs */
-       if (skb->nh.iph->protocol == IPPROTO_UDP) {
+       if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
                flags |= TXFCB_UDP;
-               fcb->phcs = skb->h.uh->check;
+               fcb->phcs = udp_hdr(skb)->check;
        } else
-               fcb->phcs = skb->h.th->check;
+               fcb->phcs = udp_hdr(skb)->check;
 
        /* l3os is the distance between the start of the
         * frame (skb->data) and the start of the IP hdr.
         * l4os is the distance between the start of the
         * l3 hdr and the l4 hdr */
-       fcb->l3os = (u16)(skb->nh.raw - skb->data - GMAC_FCB_LEN);
-       fcb->l4os = (u16)(skb->h.raw - skb->nh.raw);
+       fcb->l3os = (u16)(skb_network_offset(skb) - GMAC_FCB_LEN);
+       fcb->l4os = skb_network_header_len(skb);
 
        fcb->flags = flags;
 }
@@ -1295,8 +1295,6 @@ struct sk_buff * gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp)
         */
        skb_reserve(skb, alignamount);
 
-       skb->dev = dev;
-
        bdp->bufPtr = dma_map_single(NULL, skb->data,
                        priv->rx_buffer_size, DMA_FROM_DEVICE);
 
index c3c0d67..2521b11 100644 (file)
@@ -1568,7 +1568,6 @@ static int hamachi_rx(struct net_device *dev)
                                printk(KERN_ERR "%s: rx_copybreak non-zero "
                                  "not good with RX_CHECKSUM\n", dev->name);
 #endif
-                               skb->dev = dev;
                                skb_reserve(skb, 2);    /* 16 byte align the IP header */
                                pci_dma_sync_single_for_cpu(hmp->pci_dev,
                                                            hmp->rx_ring[entry].addr,
index 59214e7..30baf6e 100644 (file)
 #include <linux/ioport.h>
 #include <linux/string.h>
 #include <linux/init.h>
-#include <asm/uaccess.h>
-#include <asm/io.h>
 #include <linux/hdlcdrv.h>
 #include <linux/baycom.h>
 #include <linux/jiffies.h>
 
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+
 /* --------------------------------------------------------------------- */
 
 #define BAYCOM_DEBUG
index d254269..656f278 100644 (file)
@@ -282,7 +282,7 @@ static int bpq_xmit(struct sk_buff *skb, struct net_device *dev)
        }
 
        skb->protocol = ax25_type_trans(skb, dev);
-       skb->nh.raw = skb->data;
+       skb_reset_network_header(skb);
        dev->hard_header(skb, dev, ETH_P_BPQ, bpq->dest_addr, NULL, 0);
        bpq->stats.tx_packets++;
        bpq->stats.tx_bytes+=skb->len;
index 0fbb414..3be8c50 100644 (file)
@@ -930,7 +930,7 @@ static int scc_send_packet(struct sk_buff *skb, struct net_device *dev)
 
        /* Transfer data to DMA buffer */
        i = priv->tx_head;
-       memcpy(priv->tx_buf[i], skb->data + 1, skb->len - 1);
+       skb_copy_from_linear_data_offset(skb, 1, priv->tx_buf[i], skb->len - 1);
        priv->tx_len[i] = skb->len - 1;
 
        /* Clear interrupts while we touch our circular buffers */
index f5a17ad..b33adc6 100644 (file)
@@ -317,7 +317,9 @@ void hdlcdrv_transmitter(struct net_device *dev, struct hdlcdrv_state *s)
                                dev_kfree_skb_irq(skb);
                                break;
                        }
-                       memcpy(s->hdlctx.buffer, skb->data+1, pkt_len);
+                       skb_copy_from_linear_data_offset(skb, 1,
+                                                        s->hdlctx.buffer,
+                                                        pkt_len);
                        dev_kfree_skb_irq(skb);
                        s->hdlctx.bp = s->hdlctx.buffer;
                        append_crc_ccitt(s->hdlctx.buffer, pkt_len);
index ee3ea4f..467559d 100644 (file)
@@ -638,7 +638,9 @@ static void yam_tx_byte(struct net_device *dev, struct yam_port *yp)
                                dev_kfree_skb_any(skb);
                                break;
                        }
-                       memcpy(yp->tx_buf, skb->data + 1, yp->tx_len);
+                       skb_copy_from_linear_data_offset(skb, 1,
+                                                        yp->tx_buf,
+                                                        yp->tx_len);
                        dev_kfree_skb_any(skb);
                        yp->tx_count = 0;
                        yp->tx_crcl = 0x21;
index 7dc5185..8118a67 100644 (file)
@@ -1816,7 +1816,6 @@ static void hp100_rx(struct net_device *dev)
                        u_char *ptr;
 
                        skb_reserve(skb,2);
-                       skb->dev = dev;
 
                        /* ptr to start of the sk_buff data area */
                        skb_put(skb, pkt_len);
index dd8ad87..3d82d46 100644 (file)
@@ -1338,7 +1338,7 @@ static inline int emac_rx_sg_append(struct ocp_enet_private *dev, int slot)
                        dev_kfree_skb(dev->rx_sg_skb);
                        dev->rx_sg_skb = NULL;
                } else {
-                       cacheable_memcpy(dev->rx_sg_skb->tail,
+                       cacheable_memcpy(skb_tail_pointer(dev->rx_sg_skb),
                                         dev->rx_skb[slot]->data, len);
                        skb_put(dev->rx_sg_skb, len);
                        emac_recycle_rx_skb(dev, slot, len);
@@ -1398,7 +1398,6 @@ static int emac_poll_rx(void *param, int budget)
 
                skb_put(skb, len);
              push_packet:
-               skb->dev = dev->ndev;
                skb->protocol = eth_type_trans(skb, dev->ndev);
                emac_rx_csum(dev, skb, ctrl);
 
index 3f946c8..fe85d6f 100644 (file)
@@ -601,7 +601,6 @@ static void irqrx_handler(struct net_device *dev)
 
                                /* set up skb fields */
 
-                               skb->dev = dev;
                                skb->protocol = eth_type_trans(skb, dev);
                                skb->ip_summed = CHECKSUM_NONE;
 
index 458db05..0573fcf 100644 (file)
@@ -798,7 +798,6 @@ static int ibmveth_poll(struct net_device *netdev, int *budget)
 
                                skb_reserve(skb, offset);
                                skb_put(skb, length);
-                               skb->dev = netdev;
                                skb->protocol = eth_type_trans(skb, netdev);
 
                                netif_receive_skb(skb); /* send it up */
index 4ad7807..f749e07 100644 (file)
@@ -633,8 +633,6 @@ static inline void ioc3_rx(struct ioc3_private *ip)
 
                        ip->rx_skbs[rx_entry] = NULL;   /* Poison  */
 
-                       new_skb->dev = priv_netdev(ip);
-
                        /* Because we reserve afterwards. */
                        skb_put(new_skb, (1664 + RX_OFFSET));
                        rxb = (struct ioc3_erxbuf *) new_skb->data;
@@ -940,7 +938,6 @@ static void ioc3_alloc_rings(struct net_device *dev)
                        }
 
                        ip->rx_skbs[i] = skb;
-                       skb->dev = dev;
 
                        /* Because we reserve afterwards. */
                        skb_put(skb, (1664 + RX_OFFSET));
@@ -1396,9 +1393,9 @@ static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev)
         * manually.
         */
        if (skb->ip_summed == CHECKSUM_PARTIAL) {
-               int proto = ntohs(skb->nh.iph->protocol);
+               const struct iphdr *ih = ip_hdr(skb);
+               const int proto = ntohs(ih->protocol);
                unsigned int csoff;
-               struct iphdr *ih = skb->nh.iph;
                uint32_t csum, ehsum;
                uint16_t *eh;
 
@@ -1425,11 +1422,11 @@ static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev)
                csoff = ETH_HLEN + (ih->ihl << 2);
                if (proto == IPPROTO_UDP) {
                        csoff += offsetof(struct udphdr, check);
-                       skb->h.uh->check = csum;
+                       udp_hdr(skb)->check = csum;
                }
                if (proto == IPPROTO_TCP) {
                        csoff += offsetof(struct tcphdr, check);
-                       skb->h.th->check = csum;
+                       tcp_hdr(skb)->check = csum;
                }
 
                w0 = ETXD_DOCHECKSUM | (csoff << ETXD_CHKOFF_SHIFT);
@@ -1446,7 +1443,7 @@ static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
        if (len <= 104) {
                /* Short packet, let's copy it directly into the ring.  */
-               memcpy(desc->data, skb->data, skb->len);
+               skb_copy_from_linear_data(skb, desc->data, skb->len);
                if (len < ETH_ZLEN) {
                        /* Very short packet, pad with zeros at the end. */
                        memset(desc->data + len, 0, ETH_ZLEN - len);
index cebf8c3..f9c889c 100644 (file)
@@ -1472,9 +1472,8 @@ static int ali_ircc_fir_hard_xmit(struct sk_buff *skb, struct net_device *dev)
 
        self->stats.tx_bytes += skb->len;
 
-       memcpy(self->tx_fifo.queue[self->tx_fifo.free].start, skb->data, 
-              skb->len);
-       
+       skb_copy_from_linear_data(skb, self->tx_fifo.queue[self->tx_fifo.free].start,
+                     skb->len);
        self->tx_fifo.len++;
        self->tx_fifo.free++;
 
@@ -1924,7 +1923,7 @@ static int  ali_ircc_dma_receive_complete(struct ali_ircc_cb *self)
                        
                        /* Copy frame without CRC, CRC is removed by hardware*/
                        skb_put(skb, len);
-                       memcpy(skb->data, self->rx_buff.data, len);
+                       skb_copy_to_linear_data(skb, self->rx_buff.data, len);
 
                        /* Move to next frame */
                        self->rx_buff.data += len;
@@ -1932,7 +1931,7 @@ static int  ali_ircc_dma_receive_complete(struct ali_ircc_cb *self)
                        self->stats.rx_packets++;
 
                        skb->dev = self->netdev;
-                       skb->mac.raw  = skb->data;
+                       skb_reset_mac_header(skb);
                        skb->protocol = htons(ETH_P_IRDA);
                        netif_rx(skb);
                        self->netdev->last_rx = jiffies;
index 37914dc..4dbdfaa 100644 (file)
@@ -526,7 +526,7 @@ static int au1k_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev)
        
        if (aup->speed == 4000000) {
                /* FIR */
-               memcpy((void *)pDB->vaddr, skb->data, skb->len);
+               skb_copy_from_linear_data(skb, pDB->vaddr, skb->len);
                ptxd->count_0 = skb->len & 0xff;
                ptxd->count_1 = (skb->len >> 8) & 0xff;
 
@@ -604,9 +604,9 @@ static int au1k_irda_rx(struct net_device *dev)
                                skb_put(skb, count);
                        else
                                skb_put(skb, count-2);
-                       memcpy(skb->data, (void *)pDB->vaddr, count-2);
+                       skb_copy_to_linear_data(skb, pDB->vaddr, count - 2);
                        skb->dev = dev;
-                       skb->mac.raw = skb->data;
+                       skb_reset_mac_header(skb);
                        skb->protocol = htons(ETH_P_IRDA);
                        netif_rx(skb);
                        prxd->count_0 = 0;
index 11af0ae..3ca47bf 100644 (file)
@@ -1119,7 +1119,7 @@ dumpbufs(skb->data,skb->len,'>');
   else
     {
       len = skb->len;
-      memcpy (self->tx_bufs[self->txs], skb->data, len);
+      skb_copy_from_linear_data(skb, self->tx_bufs[self->txs], len);
     }
   self->ring->tx[self->txs].len = len & 0x0fff;
 
@@ -1282,11 +1282,11 @@ dumpbufs(self->rx_bufs[self->rxs],len,'<');
                       skb_reserve (skb, 1);
 
                       skb_put (skb, len);
-                      memcpy (skb->data, self->rx_bufs[self->rxs], len);
-
+                      skb_copy_to_linear_data(skb, self->rx_bufs[self->rxs],
+                                             len);
                       self->stats.rx_packets++;
                       skb->dev = self->netdev;
-                      skb->mac.raw = skb->data;
+                      skb_reset_mac_header(skb);
                       skb->protocol = htons (ETH_P_IRDA);
                     }
                   else
index 1d510bd..0ac240c 100644 (file)
@@ -441,7 +441,7 @@ static int irda_usb_hard_xmit(struct sk_buff *skb, struct net_device *netdev)
                goto drop;
        }
 
-       memcpy(self->tx_buff + self->header_length, skb->data, skb->len);
+       skb_copy_from_linear_data(skb, self->tx_buff + self->header_length, skb->len);
 
        /* Change setting for next frame */
        if (self->capability & IUC_STIR421X) {
@@ -902,7 +902,7 @@ static void irda_usb_receive(struct urb *urb)
 
        if(docopy) {
                /* Copy packet, so we can recycle the original */
-               memcpy(newskb->data, skb->data, urb->actual_length);
+               skb_copy_from_linear_data(skb, newskb->data, urb->actual_length);
                /* Deliver this new skb */
                dataskb = newskb;
                /* And hook the old skb to the URB
@@ -921,7 +921,7 @@ static void irda_usb_receive(struct urb *urb)
 
        /* Ask the networking layer to queue the packet for the IrDA stack */
        dataskb->dev = self->netdev;
-       dataskb->mac.raw  = dataskb->data;
+       skb_reset_mac_header(dataskb);
        dataskb->protocol = htons(ETH_P_IRDA);
        len = dataskb->len;
        netif_rx(dataskb);
index f0c61f3..0de8672 100644 (file)
@@ -200,14 +200,14 @@ static inline int mcs_setup_transceiver_vishay(struct mcs_cb *mcs)
 /* Setup a communication between mcs7780 and agilent chip. */
 static inline int mcs_setup_transceiver_agilent(struct mcs_cb *mcs)
 {
-       IRDA_WARNING("This transceiver type is not supported yet.");
+       IRDA_WARNING("This transceiver type is not supported yet.\n");
        return 1;
 }
 
 /* Setup a communication between mcs7780 and sharp chip. */
 static inline int mcs_setup_transceiver_sharp(struct mcs_cb *mcs)
 {
-       IRDA_WARNING("This transceiver type is not supported yet.");
+       IRDA_WARNING("This transceiver type is not supported yet.\n");
        return 1;
 }
 
@@ -279,7 +279,7 @@ static inline int mcs_setup_transceiver(struct mcs_cb *mcs)
                break;
 
        default:
-               IRDA_WARNING("Unknown transceiver type: %d",
+               IRDA_WARNING("Unknown transceiver type: %d\n",
                             mcs->transceiver_type);
                ret = 1;
        }
@@ -318,7 +318,7 @@ static inline int mcs_setup_transceiver(struct mcs_cb *mcs)
                return ret;
 
 error:
-       IRDA_ERROR("%s", msg);
+       IRDA_ERROR("%s\n", msg);
        return ret;
 }
 
@@ -353,7 +353,7 @@ static unsigned mcs_wrap_fir_skb(const struct sk_buff *skb, __u8 *buf)
        buf[0] = len & 0xff;
        buf[1] = (len >> 8) & 0xff;
        /* copy the data into the tx buffer. */
-       memcpy(buf+2, skb->data, skb->len);
+       skb_copy_from_linear_data(skb, buf + 2, skb->len);
        /* put the fcs in the last four bytes in little endian order. */
        buf[len - 4] = fcs & 0xff;
        buf[len - 3] = (fcs >> 8) & 0xff;
@@ -377,7 +377,7 @@ static unsigned mcs_wrap_mir_skb(const struct sk_buff *skb, __u8 *buf)
        buf[0] = len & 0xff;
        buf[1] = (len >> 8) & 0xff;
        /* copy the data */
-       memcpy(buf+2, skb->data, skb->len);
+       skb_copy_from_linear_data(skb, buf + 2, skb->len);
        /* put the fcs in last two bytes in little endian order. */
        buf[len - 2] = fcs & 0xff;
        buf[len - 1] = (fcs >> 8) & 0xff;
@@ -426,9 +426,9 @@ static void mcs_unwrap_mir(struct mcs_cb *mcs, __u8 *buf, int len)
        }
 
        skb_reserve(skb, 1);
-       memcpy(skb->data, buf, new_len);
+       skb_copy_to_linear_data(skb, buf, new_len);
        skb_put(skb, new_len);
-       skb->mac.raw = skb->data;
+       skb_reset_mac_header(skb);
        skb->protocol = htons(ETH_P_IRDA);
        skb->dev = mcs->netdev;
 
@@ -479,9 +479,9 @@ static void mcs_unwrap_fir(struct mcs_cb *mcs, __u8 *buf, int len)
        }
 
        skb_reserve(skb, 1);
-       memcpy(skb->data, buf, new_len);
+       skb_copy_to_linear_data(skb, buf, new_len);
        skb_put(skb, new_len);
-       skb->mac.raw = skb->data;
+       skb_reset_mac_header(skb);
        skb->protocol = htons(ETH_P_IRDA);
        skb->dev = mcs->netdev;
 
@@ -587,7 +587,7 @@ static int mcs_speed_change(struct mcs_cb *mcs)
        } while(cnt++ < 100 && (rval & MCS_IRINTX));
 
        if(cnt >= 100) {
-               IRDA_ERROR("unable to change speed");
+               IRDA_ERROR("unable to change speed\n");
                ret = -EIO;
                goto error;
        }
@@ -638,7 +638,7 @@ static int mcs_speed_change(struct mcs_cb *mcs)
 
                default:
                        ret = 1;
-                       IRDA_WARNING("Unknown transceiver type: %d",
+                       IRDA_WARNING("Unknown transceiver type: %d\n",
                                     mcs->transceiver_type);
                }
        if (unlikely(ret))
@@ -733,7 +733,7 @@ static int mcs_net_open(struct net_device *netdev)
        sprintf(hwname, "usb#%d", mcs->usbdev->devnum);
        mcs->irlap = irlap_open(netdev, &mcs->qos, hwname);
        if (!mcs->irlap) {
-               IRDA_ERROR("mcs7780: irlap_open failed");
+               IRDA_ERROR("mcs7780: irlap_open failed\n");
                goto error2;
        }
 
@@ -862,7 +862,7 @@ static int mcs_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
                          mcs->out_buf, wraplen, mcs_send_irq, mcs);
 
        if ((ret = usb_submit_urb(mcs->tx_urb, GFP_ATOMIC))) {
-               IRDA_ERROR("failed tx_urb: %d", ret);
+               IRDA_ERROR("failed tx_urb: %d\n", ret);
                switch (ret) {
                case -ENODEV:
                case -EPIPE:
@@ -897,7 +897,7 @@ static int mcs_probe(struct usb_interface *intf,
        if (!ndev)
                goto error1;
 
-       IRDA_DEBUG(1, "MCS7780 USB-IrDA bridge found at %d.", udev->devnum);
+       IRDA_DEBUG(1, "MCS7780 USB-IrDA bridge found at %d.\n", udev->devnum);
 
        /* what is it realy for? */
        SET_MODULE_OWNER(ndev);
@@ -905,7 +905,7 @@ static int mcs_probe(struct usb_interface *intf,
 
        ret = usb_reset_configuration(udev);
        if (ret != 0) {
-               IRDA_ERROR("mcs7780: usb reset configuration failed");
+               IRDA_ERROR("mcs7780: usb reset configuration failed\n");
                goto error2;
        }
 
@@ -950,7 +950,7 @@ static int mcs_probe(struct usb_interface *intf,
        if (ret != 0)
                goto error2;
 
-       IRDA_DEBUG(1, "IrDA: Registered MosChip MCS7780 device as %s",
+       IRDA_DEBUG(1, "IrDA: Registered MosChip MCS7780 device as %s\n",
                   ndev->name);
 
        mcs->transceiver_type = transceiver_type;
@@ -981,7 +981,7 @@ static void mcs_disconnect(struct usb_interface *intf)
        free_netdev(mcs->netdev);
 
        usb_set_intfdata(intf, NULL);
-       IRDA_DEBUG(0, "MCS7780 now disconnected.");
+       IRDA_DEBUG(0, "MCS7780 now disconnected.\n");
 }
 
 /* Module insertion */
@@ -992,7 +992,7 @@ static int __init mcs_init(void)
        /* register this driver with the USB subsystem */
        result = usb_register(&mcs_driver);
        if (result)
-               IRDA_ERROR("usb_register failed. Error number %d", result);
+               IRDA_ERROR("usb_register failed. Error number %d\n", result);
 
        return result;
 }
index 29b5ccd..d96c897 100644 (file)
@@ -1466,9 +1466,8 @@ static int nsc_ircc_hard_xmit_fir(struct sk_buff *skb, struct net_device *dev)
 
        self->stats.tx_bytes += skb->len;
 
-       memcpy(self->tx_fifo.queue[self->tx_fifo.free].start, skb->data, 
-              skb->len);
-       
+       skb_copy_from_linear_data(skb, self->tx_fifo.queue[self->tx_fifo.free].start,
+                     skb->len);
        self->tx_fifo.len++;
        self->tx_fifo.free++;
 
@@ -1869,10 +1868,14 @@ static int nsc_ircc_dma_receive_complete(struct nsc_ircc_cb *self, int iobase)
                        /* Copy frame without CRC */
                        if (self->io.speed < 4000000) {
                                skb_put(skb, len-2);
-                               memcpy(skb->data, self->rx_buff.data, len-2);
+                               skb_copy_to_linear_data(skb,
+                                                       self->rx_buff.data,
+                                                       len - 2);
                        } else {
                                skb_put(skb, len-4);
-                               memcpy(skb->data, self->rx_buff.data, len-4);
+                               skb_copy_to_linear_data(skb,
+                                                       self->rx_buff.data,
+                                                       len - 4);
                        }
 
                        /* Move to next frame */
@@ -1881,7 +1884,7 @@ static int nsc_ircc_dma_receive_complete(struct nsc_ircc_cb *self, int iobase)
                        self->stats.rx_packets++;
 
                        skb->dev = self->netdev;
-                       skb->mac.raw  = skb->data;
+                       skb_reset_mac_header(skb);
                        skb->protocol = htons(ETH_P_IRDA);
                        netif_rx(skb);
                        self->netdev->last_rx = jiffies;
index 2272156..fb196fd 100644 (file)
@@ -386,12 +386,12 @@ static void pxa_irda_fir_irq_eif(struct pxa_irda *si, struct net_device *dev, in
 
                /* Align IP header to 20 bytes  */
                skb_reserve(skb, 1);
-               memcpy(skb->data, si->dma_rx_buff, len);
+               skb_copy_to_linear_data(skb, si->dma_rx_buff, len);
                skb_put(skb, len);
 
                /* Feed it to IrLAP  */
                skb->dev = dev;
-               skb->mac.raw  = skb->data;
+               skb_reset_mac_header(skb);
                skb->protocol = htons(ETH_P_IRDA);
                netif_rx(skb);
 
@@ -484,7 +484,7 @@ static int pxa_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev)
                unsigned long mtt = irda_get_mtt(skb);
 
                si->dma_tx_buff_len = skb->len;
-               memcpy(si->dma_tx_buff, skb->data, skb->len);
+               skb_copy_from_linear_data(skb, si->dma_tx_buff, skb->len);
 
                if (mtt)
                        while ((unsigned)(OSCR - si->last_oscr)/4 < mtt)
index 937372d..056639f 100644 (file)
@@ -504,7 +504,7 @@ static void sa1100_irda_fir_error(struct sa1100_irda *si, struct net_device *dev
 
                skb_put(skb, len);
                skb->dev = dev;
-               skb->mac.raw = skb->data;
+               skb_reset_mac_header(skb);
                skb->protocol = htons(ETH_P_IRDA);
                si->stats.rx_packets++;
                si->stats.rx_bytes += len;
index 31c6233..198bf3b 100644 (file)
@@ -315,6 +315,7 @@ static struct smsc_chip __initdata lpc_chips_flat[] =
 {
        /* Base address 0x2E or 0x4E */
        { "47N227",     KEY55_1|FIR|SERx4,      0x5a, 0x00 },
+       { "47N227",     KEY55_1|FIR|SERx4,      0x7a, 0x00 },
        { "47N267",     KEY55_1|FIR|SERx4,      0x5e, 0x00 },
        { NULL }
 };
@@ -1161,7 +1162,7 @@ static int smsc_ircc_hard_xmit_fir(struct sk_buff *skb, struct net_device *dev)
                self->new_speed = speed;
        }
 
-       memcpy(self->tx_buff.head, skb->data, skb->len);
+       skb_copy_from_linear_data(skb, self->tx_buff.head, skb->len);
 
        self->tx_buff.len = skb->len;
        self->tx_buff.data = self->tx_buff.head;
@@ -1412,7 +1413,7 @@ static void smsc_ircc_dma_receive_complete(struct smsc_ircc_cb *self)
        self->stats.rx_bytes += len;
 
        skb->dev = self->netdev;
-       skb->mac.raw  = skb->data;
+       skb_reset_mac_header(skb);
        skb->protocol = htons(ETH_P_IRDA);
        netif_rx(skb);
 }
index 20d306f..755aa44 100644 (file)
@@ -52,7 +52,6 @@
 #include <linux/kthread.h>
 #include <linux/freezer.h>
 #include <net/irda/irda.h>
-#include <net/irda/irlap.h>
 #include <net/irda/irda_device.h>
 #include <net/irda/wrapper.h>
 #include <net/irda/crc.h>
@@ -349,7 +348,7 @@ static void fir_eof(struct stir_cb *stir)
                }
                skb_reserve(nskb, 1);
                skb = nskb;
-               memcpy(nskb->data, rx_buff->data, len);
+               skb_copy_to_linear_data(nskb, rx_buff->data, len);
        } else {
                nskb = dev_alloc_skb(rx_buff->truesize);
                if (unlikely(!nskb)) {
@@ -364,7 +363,7 @@ static void fir_eof(struct stir_cb *stir)
 
        skb_put(skb, len);
 
-       skb->mac.raw  = skb->data;
+       skb_reset_mac_header(skb);
        skb->protocol = htons(ETH_P_IRDA);
        skb->dev = stir->netdev;
 
index c3ed9b3..ff53585 100644 (file)
@@ -925,8 +925,8 @@ static int via_ircc_hard_xmit_fir(struct sk_buff *skb,
 
        self->tx_fifo.tail += skb->len;
        self->stats.tx_bytes += skb->len;
-       memcpy(self->tx_fifo.queue[self->tx_fifo.free].start, skb->data,
-              skb->len);
+       skb_copy_from_linear_data(skb,
+                     self->tx_fifo.queue[self->tx_fifo.free].start, skb->len);
        self->tx_fifo.len++;
        self->tx_fifo.free++;
 //F01   if (self->tx_fifo.len == 1) {
@@ -1125,7 +1125,7 @@ static int via_ircc_dma_receive_complete(struct via_ircc_cb *self,
                self->stats.rx_bytes += len;
                self->stats.rx_packets++;
                skb->dev = self->netdev;
-               skb->mac.raw = skb->data;
+               skb_reset_mac_header(skb);
                skb->protocol = htons(ETH_P_IRDA);
                netif_rx(skb);
                return TRUE;
@@ -1189,7 +1189,7 @@ F01_E */
                skb_reserve(skb, 1);
                skb_put(skb, len - 4);
 
-               memcpy(skb->data, self->rx_buff.data, len - 4);
+               skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4);
                IRDA_DEBUG(2, "%s(): len=%x.rx_buff=%p\n", __FUNCTION__,
                           len - 4, self->rx_buff.data);
 
@@ -1198,7 +1198,7 @@ F01_E */
                self->stats.rx_bytes += len;
                self->stats.rx_packets++;
                skb->dev = self->netdev;
-               skb->mac.raw = skb->data;
+               skb_reset_mac_header(skb);
                skb->protocol = htons(ETH_P_IRDA);
                netif_rx(skb);
 
@@ -1234,7 +1234,7 @@ static int upload_rxdata(struct via_ircc_cb *self, int iobase)
        }
        skb_reserve(skb, 1);
        skb_put(skb, len - 4 + 1);
-       memcpy(skb->data, self->rx_buff.data, len - 4 + 1);
+       skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4 + 1);
        st_fifo->tail++;
        st_fifo->len++;
        if (st_fifo->tail > MAX_RX_WINDOW)
@@ -1244,7 +1244,7 @@ static int upload_rxdata(struct via_ircc_cb *self, int iobase)
        self->stats.rx_bytes += len;
        self->stats.rx_packets++;
        skb->dev = self->netdev;
-       skb->mac.raw = skb->data;
+       skb_reset_mac_header(skb);
        skb->protocol = htons(ETH_P_IRDA);
        netif_rx(skb);
        if (st_fifo->len < (MAX_RX_WINDOW + 2)) {
@@ -1303,7 +1303,7 @@ static int RxTimerHandler(struct via_ircc_cb *self, int iobase)
                        }
                        skb_reserve(skb, 1);
                        skb_put(skb, len - 4);
-                       memcpy(skb->data, self->rx_buff.data, len - 4);
+                       skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4);
 
                        IRDA_DEBUG(2, "%s(): len=%x.head=%x\n", __FUNCTION__,
                                   len - 4, st_fifo->head);
@@ -1313,7 +1313,7 @@ static int RxTimerHandler(struct via_ircc_cb *self, int iobase)
                        self->stats.rx_bytes += len;
                        self->stats.rx_packets++;
                        skb->dev = self->netdev;
-                       skb->mac.raw = skb->data;
+                       skb_reset_mac_header(skb);
                        skb->protocol = htons(ETH_P_IRDA);
                        netif_rx(skb);
                }               //while
index 3457e9d..c4be973 100644 (file)
@@ -595,7 +595,7 @@ static int vlsi_process_rx(struct vlsi_ring *r, struct ring_descr *rd)
        rd->skb = NULL;
        skb->dev = ndev;
        memcpy(skb_put(skb,len), rd->buf, len);
-       skb->mac.raw = skb->data;
+       skb_reset_mac_header(skb);
        if (in_interrupt())
                netif_rx(skb);
        else
@@ -993,7 +993,7 @@ static int vlsi_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
                        goto drop;
                }
                else
-                       memcpy(rd->buf, skb->data, len);
+                       skb_copy_from_linear_data(skb, rd->buf, len);
        }
 
        rd->skb = skb;                  /* remember skb for tx-complete stats */
index 4212657..5182e80 100644 (file)
@@ -529,7 +529,7 @@ int w83977af_hard_xmit(struct sk_buff *skb, struct net_device *dev)
        /* Decide if we should use PIO or DMA transfer */
        if (self->io.speed > PIO_MAX_SPEED) {
                self->tx_buff.data = self->tx_buff.head;
-               memcpy(self->tx_buff.data, skb->data, skb->len);
+               skb_copy_from_linear_data(skb, self->tx_buff.data, skb->len);
                self->tx_buff.len = skb->len;
                
                mtt = irda_get_mtt(skb);
@@ -908,10 +908,14 @@ int w83977af_dma_receive_complete(struct w83977af_ir *self)
                        /* Copy frame without CRC */
                        if (self->io.speed < 4000000) {
                                skb_put(skb, len-2);
-                               memcpy(skb->data, self->rx_buff.data, len-2);
+                               skb_copy_to_linear_data(skb,
+                                                       self->rx_buff.data,
+                                                       len - 2);
                        } else {
                                skb_put(skb, len-4);
-                               memcpy(skb->data, self->rx_buff.data, len-4);
+                               skb_copy_to_linear_data(skb,
+                                                       self->rx_buff.data,
+                                                       len - 4);
                        }
 
                        /* Move to next frame */
@@ -919,7 +923,7 @@ int w83977af_dma_receive_complete(struct w83977af_ir *self)
                        self->stats.rx_packets++;
                        
                        skb->dev = self->netdev;
-                       skb->mac.raw  = skb->data;
+                       skb_reset_mac_header(skb);
                        skb->protocol = htons(ETH_P_IRDA);
                        netif_rx(skb);
                        self->netdev->last_rx = jiffies;
index 0e9ba3c..347d50c 100644 (file)
@@ -1540,7 +1540,6 @@ static void veth_receive(struct veth_lpar_connection *cnx,
                }
 
                skb_put(skb, length);
-               skb->dev = dev;
                skb->protocol = eth_type_trans(skb, dev);
                skb->ip_summed = CHECKSUM_NONE;
                netif_rx(skb);  /* send it up */
index afc2ec7..dfde80e 100644 (file)
@@ -1182,24 +1182,27 @@ ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
 
        if (likely(skb_is_gso(skb))) {
                struct ixgb_buffer *buffer_info;
+               struct iphdr *iph;
+
                if (skb_header_cloned(skb)) {
                        err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
                        if (err)
                                return err;
                }
 
-               hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
+               hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
                mss = skb_shinfo(skb)->gso_size;
-               skb->nh.iph->tot_len = 0;
-               skb->nh.iph->check = 0;
-               skb->h.th->check = ~csum_tcpudp_magic(skb->nh.iph->saddr,
-                                                     skb->nh.iph->daddr,
-                                                     0, IPPROTO_TCP, 0);
-               ipcss = skb->nh.raw - skb->data;
-               ipcso = (void *)&(skb->nh.iph->check) - (void *)skb->data;
-               ipcse = skb->h.raw - skb->data - 1;
-               tucss = skb->h.raw - skb->data;
-               tucso = (void *)&(skb->h.th->check) - (void *)skb->data;
+               iph = ip_hdr(skb);
+               iph->tot_len = 0;
+               iph->check = 0;
+               tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
+                                                        iph->daddr, 0,
+                                                        IPPROTO_TCP, 0);
+               ipcss = skb_network_offset(skb);
+               ipcso = (void *)&(iph->check) - (void *)skb->data;
+               ipcse = skb_transport_offset(skb) - 1;
+               tucss = skb_transport_offset(skb);
+               tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
                tucse = 0;
 
                i = adapter->tx_ring.next_to_use;
@@ -1243,7 +1246,7 @@ ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb)
 
        if(likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
                struct ixgb_buffer *buffer_info;
-               css = skb->h.raw - skb->data;
+               css = skb_transport_offset(skb);
                cso = css + skb->csum_offset;
 
                i = adapter->tx_ring.next_to_use;
@@ -2014,9 +2017,12 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
                            netdev_alloc_skb(netdev, length + NET_IP_ALIGN);
                        if (new_skb) {
                                skb_reserve(new_skb, NET_IP_ALIGN);
-                               memcpy(new_skb->data - NET_IP_ALIGN,
-                                      skb->data - NET_IP_ALIGN,
-                                      length + NET_IP_ALIGN);
+                               skb_copy_to_linear_data_offset(new_skb,
+                                                              -NET_IP_ALIGN,
+                                                              (skb->data -
+                                                               NET_IP_ALIGN),
+                                                              (length +
+                                                               NET_IP_ALIGN));
                                /* save the skb in buffer_info as good */
                                buffer_info->skb = skb;
                                skb = new_skb;
index a4eccb1..6683afc 100644 (file)
@@ -110,11 +110,10 @@ static int ixpdev_rx(struct net_device *dev, int *budget)
 
                skb = dev_alloc_skb(desc->pkt_length + 2);
                if (likely(skb != NULL)) {
-                       skb->dev = nds[desc->channel];
                        skb_reserve(skb, 2);
                        eth_copy_and_sum(skb, buf, desc->pkt_length, 0);
                        skb_put(skb, desc->pkt_length);
-                       skb->protocol = eth_type_trans(skb, skb->dev);
+                       skb->protocol = eth_type_trans(skb, nds[desc->channel]);
 
                        skb->dev->last_rx = jiffies;
 
index a384332..0fe96c8 100644 (file)
@@ -988,7 +988,7 @@ static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
                if (lance_debug > 5)
                        printk("%s: bouncing a high-memory packet (%#x).\n",
                                   dev->name, (u32)isa_virt_to_bus(skb->data));
-               memcpy(&lp->tx_bounce_buffs[entry], skb->data, skb->len);
+               skb_copy_from_linear_data(skb, &lp->tx_bounce_buffs[entry], skb->len);
                lp->tx_ring[entry].base =
                        ((u32)isa_virt_to_bus((lp->tx_bounce_buffs + entry)) & 0xffffff) | 0x83000000;
                dev_kfree_skb(skb);
@@ -1184,7 +1184,6 @@ lance_rx(struct net_device *dev)
                                        }
                                        break;
                                }
-                               skb->dev = dev;
                                skb_reserve(skb,2);     /* 16 byte align */
                                skb_put(skb,pkt_len);   /* Make room */
                                eth_copy_and_sum(skb,
index 452863d..0edcd12 100644 (file)
@@ -801,7 +801,6 @@ memory_squeeze:
                                lp->stats.rx_dropped++;
                        }
                        else {
-                               skb->dev = dev;
                                if (!rx_in_place) {
                                        /* 16 byte align the data fields */
                                        dma_sync_single_for_cpu(lp->dev, (dma_addr_t)WSWAPchar(rbd->b_data), PKT_BUF_SZ, DMA_FROM_DEVICE);
index e726c06..5c86e73 100644 (file)
@@ -722,7 +722,6 @@ static void ei_receive(struct net_device *dev)
                        else
                        {
                                skb_reserve(skb,2);     /* IP headers on 16 byte boundaries */
-                               skb->dev = dev;
                                skb_put(skb, pkt_len);  /* Make room */
                                ei_block_input(dev, pkt_len, skb, current_offset + sizeof(rx_frame));
                                skb->protocol=eth_type_trans(skb,dev);
index 2b739fd..6ba6ed2 100644 (file)
@@ -75,8 +75,9 @@ static DEFINE_PER_CPU(struct pcpu_lstats, pcpu_lstats);
 #ifdef LOOPBACK_TSO
 static void emulate_large_send_offload(struct sk_buff *skb)
 {
-       struct iphdr *iph = skb->nh.iph;
-       struct tcphdr *th = (struct tcphdr*)(skb->nh.raw + (iph->ihl * 4));
+       struct iphdr *iph = ip_hdr(skb);
+       struct tcphdr *th = (struct tcphdr *)(skb_network_header(skb) +
+                                             (iph->ihl * 4));
        unsigned int doffset = (iph->ihl + th->doff) * 4;
        unsigned int mtu = skb_shinfo(skb)->gso_size + doffset;
        unsigned int offset = 0;
@@ -90,10 +91,11 @@ static void emulate_large_send_offload(struct sk_buff *skb)
                if (!nskb)
                        break;
                skb_reserve(nskb, 32);
-               nskb->mac.raw = nskb->data - 14;
-               nskb->nh.raw = nskb->data;
-               iph = nskb->nh.iph;
-               memcpy(nskb->data, skb->nh.raw, doffset);
+               skb_set_mac_header(nskb, -ETH_HLEN);
+               skb_reset_network_header(nskb);
+               iph = ip_hdr(nskb);
+               skb_copy_to_linear_data(nskb, skb_network_header(skb),
+                                       doffset);
                if (skb_copy_bits(skb,
                                  doffset + offset,
                                  nskb->data + doffset,
@@ -108,7 +110,7 @@ static void emulate_large_send_offload(struct sk_buff *skb)
                memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
                nskb->pkt_type = skb->pkt_type;
 
-               th = (struct tcphdr*)(nskb->nh.raw + iph->ihl*4);
+               th = (struct tcphdr *)(skb_network_header(nskb) + iph->ihl * 4);
                iph->tot_len = htons(frag_size + doffset);
                iph->id = htons(id);
                iph->check = 0;
@@ -137,7 +139,6 @@ static int loopback_xmit(struct sk_buff *skb, struct net_device *dev)
        skb_orphan(skb);
 
        skb->protocol = eth_type_trans(skb,dev);
-       skb->dev = dev;
 #ifndef LOOPBACK_MUST_CHECKSUM
        skb->ip_summed = CHECKSUM_UNNECESSARY;
 #endif
@@ -145,7 +146,7 @@ static int loopback_xmit(struct sk_buff *skb, struct net_device *dev)
 #ifdef LOOPBACK_TSO
        if (skb_is_gso(skb)) {
                BUG_ON(skb->protocol != htons(ETH_P_IP));
-               BUG_ON(skb->nh.iph->protocol != IPPROTO_TCP);
+               BUG_ON(ip_hdr(skb)->protocol != IPPROTO_TCP);
 
                emulate_large_send_offload(skb);
                return 0;
@@ -163,11 +164,9 @@ static int loopback_xmit(struct sk_buff *skb, struct net_device *dev)
        return 0;
 }
 
-static struct net_device_stats loopback_stats;
-
 static struct net_device_stats *get_stats(struct net_device *dev)
 {
-       struct net_device_stats *stats = &loopback_stats;
+       struct net_device_stats *stats = &dev->stats;
        unsigned long bytes = 0;
        unsigned long packets = 0;
        int i;
@@ -207,7 +206,6 @@ static const struct ethtool_ops loopback_ethtool_ops = {
 struct net_device loopback_dev = {
        .name                   = "lo",
        .get_stats              = &get_stats,
-       .priv                   = &loopback_stats,
        .mtu                    = (16 * 1024) + 20 + 20 + 12,
        .hard_start_xmit        = loopback_xmit,
        .hard_header            = eth_header,
index 177c502..5fc18da 100644 (file)
@@ -676,7 +676,6 @@ i596_rx_one(struct net_device *dev, struct i596_private *lp,
                        return 1;
                }
 
-               skb->dev = dev;
                memcpy(skb_put(skb,pkt_len), rfd->data, pkt_len);
 
                skb->protocol = eth_type_trans(skb,dev);
index e960138..90e695d 100644 (file)
@@ -530,7 +530,6 @@ net_rx(struct net_device *dev)
                return;
        }
        skb_put(skb, length);
-       skb->dev = dev;
 
        memcpy_fromio(skb->data, dev->mem_start + PP_RxFrame, length);
 
index 2e9571b..0e04f7a 100644 (file)
@@ -357,7 +357,6 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
        }
 
        skb_reserve(skb, RX_OFFSET);
-       skb->dev = bp->dev;
        skb->ip_summed = CHECKSUM_NONE;
        skb_put(skb, len);
 
@@ -368,9 +367,10 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
                        BUG_ON(frag != last_frag);
                        frag_len = len - offset;
                }
-               memcpy(skb->data + offset,
-                      bp->rx_buffers + (RX_BUFFER_SIZE * frag),
-                      frag_len);
+               skb_copy_to_linear_data_offset(skb, offset,
+                                              (bp->rx_buffers +
+                                               (RX_BUFFER_SIZE * frag)),
+                                              frag_len);
                offset += RX_BUFFER_SIZE;
                bp->rx_ring[frag].addr &= ~MACB_BIT(RX_USED);
                wmb();
@@ -576,7 +576,8 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
        int i;
        dev_dbg(&bp->pdev->dev,
                "start_xmit: len %u head %p data %p tail %p end %p\n",
-               skb->len, skb->head, skb->data, skb->tail, skb->end);
+               skb->len, skb->head, skb->data,
+               skb_tail_pointer(skb), skb_end_pointer(skb));
        dev_dbg(&bp->pdev->dev,
                "data:");
        for (i = 0; i < 16; i++)
index 9ec24f0..b3bd623 100644 (file)
@@ -939,7 +939,6 @@ static irqreturn_t mace_rxdma_intr(int irq, void *dev_id)
                else    /* Ethernet header; mace includes FCS */
                    nb -= 8;
                skb_put(skb, nb);
-               skb->dev = dev;
                skb->protocol = eth_type_trans(skb, dev);
                mp->stats.rx_bytes += skb->len;
                netif_rx(skb);
index 5d541e8..27911c0 100644 (file)
@@ -420,8 +420,7 @@ static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
        mp->stats.tx_bytes += skb->len;
 
        /* We need to copy into our xmit buffer to take care of alignment and caching issues */
-
-       memcpy((void *) mp->tx_ring, skb->data, skb->len);
+       skb_copy_from_linear_data(skb, mp->tx_ring, skb->len);
 
        /* load the Tx DMA and fire it off */
 
@@ -621,7 +620,6 @@ static void mace_dma_rx_frame(struct net_device *dev, struct mace_frame *mf)
        skb_reserve(skb,2);
        memcpy(skb_put(skb, mf->len), mf->data, mf->len);
 
-       skb->dev = dev;
        skb->protocol = eth_type_trans(skb, dev);
        netif_rx(skb);
        dev->last_rx = jiffies;
index 7e69ca6..0343ea1 100644 (file)
@@ -421,7 +421,6 @@ static void meth_rx(struct net_device* dev, unsigned long int_status)
                                        /* Write metadata, and then pass to the receive level */
                                        skb_put(skb_c, len);
                                        priv->rx_skbs[priv->rx_write] = skb;
-                                       skb_c->dev = dev;
                                        skb_c->protocol = eth_type_trans(skb_c, dev);
                                        dev->last_rx = jiffies;
                                        priv->stats.rx_packets++;
@@ -609,7 +608,7 @@ static void meth_tx_short_prepare(struct meth_private *priv,
 
        desc->header.raw = METH_TX_CMD_INT_EN | (len-1) | ((128-len) << 16);
        /* maybe I should set whole thing to 0 first... */
-       memcpy(desc->data.dt + (120 - len), skb->data, skb->len);
+       skb_copy_from_linear_data(skb, desc->data.dt + (120 - len), skb->len);
        if (skb->len < len)
                memset(desc->data.dt + 120 - len + skb->len, 0, len-skb->len);
 }
@@ -627,8 +626,8 @@ static void meth_tx_1page_prepare(struct meth_private *priv,
 
        /* unaligned part */
        if (unaligned_len) {
-               memcpy(desc->data.dt + (120 - unaligned_len),
-                      skb->data, unaligned_len);
+               skb_copy_from_linear_data(skb, desc->data.dt + (120 - unaligned_len),
+                             unaligned_len);
                desc->header.raw |= (128 - unaligned_len) << 16;
        }
 
@@ -653,8 +652,8 @@ static void meth_tx_2page_prepare(struct meth_private *priv,
        desc->header.raw = METH_TX_CMD_INT_EN | TX_CATBUF1 | TX_CATBUF2| (skb->len - 1);
        /* unaligned part */
        if (unaligned_len){
-               memcpy(desc->data.dt + (120 - unaligned_len),
-                      skb->data, unaligned_len);
+               skb_copy_from_linear_data(skb, desc->data.dt + (120 - unaligned_len),
+                             unaligned_len);
                desc->header.raw |= (128 - unaligned_len) << 16;
        }
 
index f42b9e2..403f63a 100644 (file)
@@ -101,7 +101,6 @@ static inline ssize_t mipsnet_get_fromdev(struct net_device *dev, size_t count)
        if (ioiocpy_frommipsnet(dev, skb_put(skb, len), len))
                return -EFAULT;
 
-       skb->dev = dev;
        skb->protocol = eth_type_trans(skb, dev);
        skb->ip_summed = CHECKSUM_UNNECESSARY;
 
index 8015a7c..ab15ecd 100644 (file)
@@ -434,7 +434,6 @@ static int mv643xx_eth_receive_queue(struct net_device *dev, int budget)
                         * received packet
                         */
                        skb_put(skb, pkt_info.byte_cnt - 4);
-                       skb->dev = dev;
 
                        if (pkt_info.cmd_sts & ETH_LAYER_4_CHECKSUM_OK) {
                                skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1162,15 +1161,15 @@ static void eth_tx_submit_descs_for_skb(struct mv643xx_private *mp,
 
                cmd_sts |= ETH_GEN_TCP_UDP_CHECKSUM |
                           ETH_GEN_IP_V_4_CHECKSUM  |
-                          skb->nh.iph->ihl << ETH_TX_IHL_SHIFT;
+                          ip_hdr(skb)->ihl << ETH_TX_IHL_SHIFT;
 
-               switch (skb->nh.iph->protocol) {
+               switch (ip_hdr(skb)->protocol) {
                case IPPROTO_UDP:
                        cmd_sts |= ETH_UDP_FRAME;
-                       desc->l4i_chk = skb->h.uh->check;
+                       desc->l4i_chk = udp_hdr(skb)->check;
                        break;
                case IPPROTO_TCP:
-                       desc->l4i_chk = skb->h.th->check;
+                       desc->l4i_chk = tcp_hdr(skb)->check;
                        break;
                default:
                        BUG();
index f8efe0e..16e3c43 100644 (file)
@@ -879,7 +879,7 @@ myri10ge_rx_skb_build(struct sk_buff *skb, u8 * va,
         * skb_pull() (for ether_pad and eth_type_trans()) requires
         * the beginning of the packet in skb_headlen(), move it
         * manually */
-       memcpy(skb->data, va, hlen);
+       skb_copy_to_linear_data(skb, va, hlen);
        skb_shinfo(skb)->frags[0].page_offset += hlen;
        skb_shinfo(skb)->frags[0].size -= hlen;
        skb->data_len -= hlen;
@@ -1020,7 +1020,6 @@ myri10ge_rx_done(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx,
                skb_shinfo(skb)->nr_frags = 0;
        }
        skb->protocol = eth_type_trans(skb, dev);
-       skb->dev = dev;
 
        if (mgp->csum_flag) {
                if ((skb->protocol == htons(ETH_P_IP)) ||
@@ -2030,7 +2029,7 @@ again:
        odd_flag = 0;
        flags = (MXGEFW_FLAGS_NO_TSO | MXGEFW_FLAGS_FIRST);
        if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
-               cksum_offset = (skb->h.raw - skb->data);
+               cksum_offset = skb_transport_offset(skb);
                pseudo_hdr_offset = cksum_offset + skb->csum_offset;
                /* If the headers are excessively large, then we must
                 * fall back to a software checksum */
@@ -2055,7 +2054,7 @@ again:
                 * send loop that we are still in the
                 * header portion of the TSO packet.
                 * TSO header must be at most 134 bytes long */
-               cum_len = -((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
+               cum_len = -(skb_transport_offset(skb) + tcp_hdrlen(skb));
 
                /* for TSO, pseudo_hdr_offset holds mss.
                 * The firmware figures out where to put
index ee26ef5..13444da 100644 (file)
@@ -368,7 +368,7 @@ static __be16 myri_type_trans(struct sk_buff *skb, struct net_device *dev)
        struct ethhdr *eth;
        unsigned char *rawp;
 
-       skb->mac.raw = (((unsigned char *)skb->data) + MYRI_PAD_LEN);
+       skb_set_mac_header(skb, MYRI_PAD_LEN);
        skb_pull(skb, dev->hard_header_len);
        eth = eth_hdr(skb);
 
@@ -502,7 +502,7 @@ static void myri_rx(struct myri_eth *mp, struct net_device *dev)
                        copy_skb->dev = dev;
                        DRX(("resv_and_put "));
                        skb_put(copy_skb, len);
-                       memcpy(copy_skb->data, skb->data, len);
+                       skb_copy_from_linear_data(skb, copy_skb->data, len);
 
                        /* Reuse original ring buffer. */
                        DRX(("reuse "));
index 349b96a..a8d7ff2 100644 (file)
@@ -2289,7 +2289,6 @@ static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do)
                         * without copying to a minimally-sized skbuff. */
                        if (pkt_len < rx_copybreak
                            && (skb = dev_alloc_skb(pkt_len + RX_OFFSET)) != NULL) {
-                               skb->dev = dev;
                                /* 16 byte align the IP header */
                                skb_reserve(skb, RX_OFFSET);
                                pci_dma_sync_single_for_cpu(np->pci_dev,
index a53644f..2b8da0a 100644 (file)
@@ -168,7 +168,6 @@ static void netx_eth_receive(struct net_device *ndev)
                FIFO_PTR_SEGMENT(seg) | FIFO_PTR_FRAMENO(frameno));
 
        ndev->last_rx = jiffies;
-       skb->dev = ndev;
        skb->protocol = eth_type_trans(skb, ndev);
        netif_rx(skb);
        priv->stats.rx_packets++;
index 6537574..0fba8f1 100644 (file)
@@ -35,6 +35,8 @@
 #include "netxen_nic_hw.h"
 #include "netxen_nic_phan_reg.h"
 
+#include <net/ip.h>
+
 /*  PCI Windowing for DDR regions.  */
 
 #define ADDR_IN_RANGE(addr, low, high) \
@@ -371,22 +373,21 @@ void netxen_tso_check(struct netxen_adapter *adapter,
                      struct cmd_desc_type0 *desc, struct sk_buff *skb)
 {
        if (desc->mss) {
-               desc->total_hdr_length = sizeof(struct ethhdr) +
-                   ((skb->nh.iph)->ihl * sizeof(u32)) +
-                   ((skb->h.th)->doff * sizeof(u32));
+               desc->total_hdr_length = (sizeof(struct ethhdr) +
+                                         ip_hdrlen(skb) + tcp_hdrlen(skb));
                netxen_set_cmd_desc_opcode(desc, TX_TCP_LSO);
        } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
-               if (skb->nh.iph->protocol == IPPROTO_TCP) {
+               if (ip_hdr(skb)->protocol == IPPROTO_TCP) {
                        netxen_set_cmd_desc_opcode(desc, TX_TCP_PKT);
-               } else if (skb->nh.iph->protocol == IPPROTO_UDP) {
+               } else if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
                        netxen_set_cmd_desc_opcode(desc, TX_UDP_PKT);
                } else {
                        return;
                }
        }
        adapter->stats.xmitcsummed++;
-       desc->tcp_hdr_offset = skb->h.raw - skb->data;
-       desc->ip_hdr_offset = skb->nh.raw - skb->data;
+       desc->tcp_hdr_offset = skb_transport_offset(skb);
+       desc->ip_hdr_offset = skb_network_offset(skb);
 }
 
 int netxen_is_flash_supported(struct netxen_adapter *adapter)
index eff965d..5cd4056 100644 (file)
@@ -1129,7 +1129,6 @@ netxen_process_rcv(struct netxen_adapter *adapter, int ctxid,
                port->stats.csummed++;
                skb->ip_summed = CHECKSUM_UNNECESSARY;
        }
-       skb->dev = netdev;
        if (desc_ctx == RCV_DESC_LRO_CTXID) {
                /* True length was only available on the last pkt */
                skb_put(skb, buffer->lro_length);
index 7d2525e..ab25c22 100644 (file)
@@ -41,6 +41,7 @@
 
 #include <linux/dma-mapping.h>
 #include <linux/vmalloc.h>
+#include <net/ip.h>
 
 MODULE_DESCRIPTION("NetXen Multi port (1/10) Gigabit Network Driver");
 MODULE_LICENSE("GPL");
@@ -778,9 +779,8 @@ static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
                if (skb_shinfo(skb)->gso_size > 0) {
 
                        no_of_desc++;
-                       if (((skb->nh.iph)->ihl * sizeof(u32)) +
-                           ((skb->h.th)->doff * sizeof(u32)) +
-                           sizeof(struct ethhdr) >
+                       if ((ip_hdrlen(skb) + tcp_hdrlen(skb) +
+                            sizeof(struct ethhdr)) >
                            (sizeof(struct cmd_desc_type0) - 2)) {
                                no_of_desc++;
                        }
@@ -920,8 +920,10 @@ static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
                        /* copy the next 64 bytes - should be enough except
                         * for pathological case
                         */
-                       memcpy((void *)hwdesc, (void *)(skb->data) +
-                              first_hdr_len, hdr_len - first_hdr_len);
+                       skb_copy_from_linear_data_offset(skb, first_hdr_len,
+                                                        hwdesc,
+                                                        (hdr_len -
+                                                         first_hdr_len));
                        producer = get_next_index(producer, max_tx_desc_count);
                }
        }
index 8be0d03..3d5b423 100644 (file)
@@ -562,7 +562,6 @@ static void ni5010_rx(struct net_device *dev)
                return;
        }
 
-       skb->dev = dev;
        skb_reserve(skb, 2);
 
        /* Read packet into buffer */
index a6f4b24..8dbd6d1 100644 (file)
@@ -934,7 +934,6 @@ static void ni52_rcv_int(struct net_device *dev)
                                        skb = (struct sk_buff *) dev_alloc_skb(totlen+2);
                                        if(skb != NULL)
                                        {
-                                               skb->dev = dev;
                                                skb_reserve(skb,2);
                                                skb_put(skb,totlen);
                                                eth_copy_and_sum(skb,(char *) p->base+(unsigned long) rbd->buffer,totlen,0);
@@ -1183,7 +1182,7 @@ static int ni52_send_packet(struct sk_buff *skb, struct net_device *dev)
        else
 #endif
        {
-               memcpy((char *)p->xmit_cbuffs[p->xmit_count],(char *)(skb->data),skb->len);
+               skb_copy_from_linear_data(skb, (char *) p->xmit_cbuffs[p->xmit_count], skb->len);
                len = skb->len;
                if (len < ETH_ZLEN) {
                        len = ETH_ZLEN;
index 1578f4d..3818edf 100644 (file)
@@ -610,7 +610,6 @@ static void *ni65_alloc_mem(struct net_device *dev,char *what,int size,int type)
                        printk(KERN_WARNING "%s: unable to allocate %s memory.\n",dev->name,what);
                        return NULL;
                }
-               skb->dev = dev;
                skb_reserve(skb,2+16);
                skb_put(skb,R_BUF_SIZE);         /* grab the whole space .. (not necessary) */
                ptr = skb->data;
@@ -1094,7 +1093,6 @@ static void ni65_recv_intr(struct net_device *dev,int csr0)
                        if(skb)
                        {
                                skb_reserve(skb,2);
-       skb->dev = dev;
 #ifdef RCV_VIA_SKB
                                if( (unsigned long) (skb->data + R_BUF_SIZE) > 0x1000000) {
                                        skb_put(skb,len);
@@ -1178,8 +1176,9 @@ static int ni65_send_packet(struct sk_buff *skb, struct net_device *dev)
                if( (unsigned long) (skb->data + skb->len) > 0x1000000) {
 #endif
 
-                       memcpy((char *) p->tmdbounce[p->tmdbouncenum] ,(char *)skb->data,
-                                                        (skb->len > T_BUF_SIZE) ? T_BUF_SIZE : skb->len);
+                       skb_copy_from_linear_data(skb, p->tmdbounce[p->tmdbouncenum],
+                                     skb->len > T_BUF_SIZE ? T_BUF_SIZE :
+                                                             skb->len);
                        if (len > skb->len)
                                memset((char *)p->tmdbounce[p->tmdbouncenum]+skb->len, 0, len-skb->len);
                        dev_kfree_skb (skb);
index 9ec6e9e..6a32338 100644 (file)
@@ -607,7 +607,6 @@ static inline int rx_refill(struct net_device *ndev, gfp_t gfp)
                res &= 0xf;
                skb_reserve(skb, res);
 
-               skb->dev = ndev;
                if (gfp != GFP_ATOMIC)
                        spin_lock_irqsave(&dev->rx_info.lock, flags);
                res = ns83820_add_rx_skb(dev, skb);
@@ -1157,9 +1156,9 @@ again:
        extsts = 0;
        if (skb->ip_summed == CHECKSUM_PARTIAL) {
                extsts |= EXTSTS_IPPKT;
-               if (IPPROTO_TCP == skb->nh.iph->protocol)
+               if (IPPROTO_TCP == ip_hdr(skb)->protocol)
                        extsts |= EXTSTS_TCPPKT;
-               else if (IPPROTO_UDP == skb->nh.iph->protocol)
+               else if (IPPROTO_UDP == ip_hdr(skb)->protocol)
                        extsts |= EXTSTS_UDPPKT;
        }
 
index d670ac7..76fe9dd 100644 (file)
@@ -334,8 +334,6 @@ static void pasemi_mac_replenish_rx_ring(struct net_device *dev)
                        break;
                }
 
-               skb->dev = dev;
-
                dma = pci_map_single(mac->dma_pdev, skb->data, skb->len,
                                     PCI_DMA_FROMDEVICE);
 
@@ -731,16 +729,18 @@ static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev)
        dflags = XCT_MACTX_O | XCT_MACTX_ST | XCT_MACTX_SS | XCT_MACTX_CRC_PAD;
 
        if (skb->ip_summed == CHECKSUM_PARTIAL) {
-               switch (skb->nh.iph->protocol) {
+               const unsigned char *nh = skb_network_header(skb);
+
+               switch (ip_hdr(skb)->protocol) {
                case IPPROTO_TCP:
                        dflags |= XCT_MACTX_CSUM_TCP;
-                       dflags |= XCT_MACTX_IPH((skb->h.raw - skb->nh.raw) >> 2);
-                       dflags |= XCT_MACTX_IPO(skb->nh.raw - skb->data);
+                       dflags |= XCT_MACTX_IPH(skb_network_header_len(skb) >> 2);
+                       dflags |= XCT_MACTX_IPO(nh - skb->data);
                        break;
                case IPPROTO_UDP:
                        dflags |= XCT_MACTX_CSUM_UDP;
-                       dflags |= XCT_MACTX_IPH((skb->h.raw - skb->nh.raw) >> 2);
-                       dflags |= XCT_MACTX_IPO(skb->nh.raw - skb->data);
+                       dflags |= XCT_MACTX_IPH(skb_network_header_len(skb) >> 2);
+                       dflags |= XCT_MACTX_IPO(nh - skb->data);
                        break;
                }
        }
index 6ca4e4f..df8998b 100644 (file)
@@ -1344,7 +1344,7 @@ static int netdrv_start_xmit (struct sk_buff *skb, struct net_device *dev)
 
        tp->tx_info[entry].skb = skb;
        /* tp->tx_info[entry].mapping = 0; */
-       memcpy (tp->tx_buf[entry], skb->data, skb->len);
+       skb_copy_from_linear_data(skb, tp->tx_buf[entry], skb->len);
 
        /* Note: the chip doesn't have auto-pad! */
        NETDRV_W32 (TxStatus0 + (entry * sizeof(u32)),
@@ -1565,7 +1565,6 @@ static void netdrv_rx_interrupt (struct net_device *dev,
 
                skb = dev_alloc_skb (pkt_size + 2);
                if (skb) {
-                       skb->dev = dev;
                        skb_reserve (skb, 2);   /* 16 byte align the IP fields. */
 
                        eth_copy_and_sum (skb, &rx_ring[ring_offset + 4], pkt_size, 0);
index c7bd9c1..2b395ee 100644 (file)
@@ -1056,7 +1056,6 @@ static int el3_rx(struct net_device *dev, int worklimit)
                        DEBUG(3, "  Receiving packet size %d status %4.4x.\n",
                                  pkt_len, rx_status);
                        if (skb != NULL) {
-                               skb->dev = dev;
                                skb_reserve(skb, 2);
                                insl(ioaddr+RX_FIFO, skb_put(skb, pkt_len),
                                                ((pkt_len+3)>>2));
index 461e827..143ae2f 100644 (file)
@@ -883,7 +883,6 @@ static int el3_rx(struct net_device *dev)
            DEBUG(3, "    Receiving packet size %d status %4.4x.\n",
                  pkt_len, rx_status);
            if (skb != NULL) {
-               skb->dev = dev;
                skb_reserve(skb, 2);
                insl(ioaddr+RX_FIFO, skb_put(skb, pkt_len),
                        (pkt_len+3)>>2);
index 6139048..808fae1 100644 (file)
@@ -1136,7 +1136,7 @@ static int ei_start_xmit(struct sk_buff *skb, struct net_device *dev)
                ei_block_output(dev, length, skb->data, output_page);
        else {
                memset(packet, 0, ETH_ZLEN);
-               memcpy(packet, skb->data, skb->len);
+               skb_copy_from_linear_data(skb, packet, skb->len);
                ei_block_output(dev, length, packet, output_page);
        }
        
@@ -1496,7 +1496,6 @@ static void ei_receive(struct net_device *dev)
                        else
                        {
                                skb_reserve(skb,2);     /* IP headers on 16 byte boundaries */
-                               skb->dev = dev;
                                skb_put(skb, pkt_len);  /* Make room */
                                ei_block_input(dev, pkt_len, skb, current_offset + sizeof(rx_frame));
                                skb->protocol=eth_type_trans(skb,dev);
index 0d7de61..3f93d49 100644 (file)
@@ -999,7 +999,6 @@ static void fjn_rx(struct net_device *dev)
                lp->stats.rx_dropped++;
                break;
            }
-           skb->dev = dev;
 
            skb_reserve(skb, 2);
            insw(ioaddr + DATAPORT, skb_put(skb, pkt_len),
index 3b70774..73da611 100644 (file)
@@ -1182,12 +1182,10 @@ static int mace_rx(struct net_device *dev, unsigned char RxCnt)
       skb = dev_alloc_skb(pkt_len+2);
 
       if (skb != NULL) {
-       skb->dev = dev;
-
        skb_reserve(skb, 2);
        insw(ioaddr + AM2150_RCV, skb_put(skb, pkt_len), pkt_len>>1);
        if (pkt_len & 1)
-           *(skb->tail-1) = inb(ioaddr + AM2150_RCV);
+           *(skb_tail_pointer(skb) - 1) = inb(ioaddr + AM2150_RCV);
        skb->protocol = eth_type_trans(skb, dev);
        
        netif_rx(skb); /* Send the packet to the upper (protocol) layers. */
index 2561f76..7912dbd 100644 (file)
@@ -1669,7 +1669,6 @@ static void smc_rx(struct net_device *dev)
             (packet_length+1)>>1);
        skb->protocol = eth_type_trans(skb, dev);
        
-       skb->dev = dev;
        netif_rx(skb);
        dev->last_rx = jiffies;
        smc->stats.rx_packets++;
index 5879e7c..809ec44 100644 (file)
@@ -1226,7 +1226,6 @@ xirc2ps_interrupt(int irq, void *dev_id)
                            (pktlen+1)>>1);
                }
                skb->protocol = eth_type_trans(skb, dev);
-               skb->dev = dev;
                netif_rx(skb);
                dev->last_rx = jiffies;
                lp->stats.rx_packets++;
index 4d94ba7..0791360 100644 (file)
@@ -1206,7 +1206,6 @@ static void pcnet32_rx_entry(struct net_device *dev,
                                         PCI_DMA_FROMDEVICE);
                        skb_put(skb, pkt_len);
                        lp->rx_skbuff[entry] = newskb;
-                       newskb->dev = dev;
                        lp->rx_dma_addr[entry] =
                                            pci_map_single(lp->pci_dev,
                                                           newskb->data,
index 6bb085f..8754cf3 100644 (file)
@@ -546,7 +546,7 @@ static __be16 plip_type_trans(struct sk_buff *skb, struct net_device *dev)
        struct ethhdr *eth;
        unsigned char *rawp;
 
-       skb->mac.raw=skb->data;
+       skb_reset_mac_header(skb);
        skb_pull(skb,dev->hard_header_len);
        eth = eth_hdr(skb);
 
index 933e2f3..caabbc4 100644 (file)
@@ -802,9 +802,9 @@ process_input_packet(struct asyncppp *ap)
 
        /* check for address/control and protocol compression */
        p = skb->data;
-       if (p[0] == PPP_ALLSTATIONS && p[1] == PPP_UI) {
+       if (p[0] == PPP_ALLSTATIONS) {
                /* chop off address/control */
-               if (skb->len < 3)
+               if (p[1] != PPP_UI || skb->len < 3)
                        goto err;
                p = skb_pull(skb, 2);
        }
index ef58e41..6d596ca 100644 (file)
@@ -88,8 +88,6 @@ struct ppp_file {
 #define PF_TO_PPP(pf)          PF_TO_X(pf, struct ppp)
 #define PF_TO_CHANNEL(pf)      PF_TO_X(pf, struct channel)
 
-#define ROUNDUP(n, x)          (((n) + (x) - 1) / (x))
-
 /*
  * Data structure describing one ppp unit.
  * A ppp unit corresponds to a ppp network interface device
@@ -1297,7 +1295,7 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
         */
        fragsize = len;
        if (nfree > 1)
-               fragsize = ROUNDUP(fragsize, nfree);
+               fragsize = DIV_ROUND_UP(fragsize, nfree);
        /* nbigger channels get fragsize bytes, the rest get fragsize-1,
           except if nbigger==0, then they all get fragsize. */
        nbigger = len % nfree;
@@ -1685,7 +1683,7 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
                        skb_pull_rcsum(skb, 2);
                        skb->dev = ppp->dev;
                        skb->protocol = htons(npindex_to_ethertype[npi]);
-                       skb->mac.raw = skb->data;
+                       skb_reset_mac_header(skb);
                        netif_rx(skb);
                        ppp->dev->last_rx = jiffies;
                }
index b6f0e9a..5918fab 100644 (file)
@@ -594,7 +594,8 @@ ppp_sync_txmunge(struct syncppp *ap, struct sk_buff *skb)
                                return NULL;
                        }
                        skb_reserve(npkt,2);
-                       memcpy(skb_put(npkt,skb->len), skb->data, skb->len);
+                       skb_copy_from_linear_data(skb,
+                                     skb_put(npkt, skb->len), skb->len);
                        kfree_skb(skb);
                        skb = npkt;
                }
index ebfa296..6f98834 100644 (file)
@@ -207,7 +207,7 @@ static inline struct pppox_sock *get_item(unsigned long sid,
 
 static inline struct pppox_sock *get_item_by_addr(struct sockaddr_pppox *sp)
 {
-       struct net_device *dev = NULL;
+       struct net_device *dev;
        int ifindex;
 
        dev = dev_get_by_name(sp->sa_addr.pppoe.dev);
@@ -218,20 +218,6 @@ static inline struct pppox_sock *get_item_by_addr(struct sockaddr_pppox *sp)
        return get_item(sp->sa_addr.pppoe.sid, sp->sa_addr.pppoe.remote, ifindex);
 }
 
-static inline int set_item(struct pppox_sock *po)
-{
-       int i;
-
-       if (!po)
-               return -EINVAL;
-
-       write_lock_bh(&pppoe_hash_lock);
-       i = __set_item(po);
-       write_unlock_bh(&pppoe_hash_lock);
-
-       return i;
-}
-
 static inline struct pppox_sock *delete_item(unsigned long sid, char *addr, int ifindex)
 {
        struct pppox_sock *ret;
@@ -255,54 +241,53 @@ static inline struct pppox_sock *delete_item(unsigned long sid, char *addr, int
 static void pppoe_flush_dev(struct net_device *dev)
 {
        int hash;
-
        BUG_ON(dev == NULL);
 
-       read_lock_bh(&pppoe_hash_lock);
+       write_lock_bh(&pppoe_hash_lock);
        for (hash = 0; hash < PPPOE_HASH_SIZE; hash++) {
                struct pppox_sock *po = item_hash_table[hash];
 
                while (po != NULL) {
-                       if (po->pppoe_dev == dev) {
-                               struct sock *sk = sk_pppox(po);
-
-                               sock_hold(sk);
-                               po->pppoe_dev = NULL;
+                       struct sock *sk = sk_pppox(po);
+                       if (po->pppoe_dev != dev) {
+                               po = po->next;
+                               continue;
+                       }
+                       po->pppoe_dev = NULL;
+                       dev_put(dev);
 
-                               /* We hold a reference to SK, now drop the
-                                * hash table lock so that we may attempt
-                                * to lock the socket (which can sleep).
-                                */
-                               read_unlock_bh(&pppoe_hash_lock);
 
-                               lock_sock(sk);
+                       /* We always grab the socket lock, followed by the
+                        * pppoe_hash_lock, in that order.  Since we should
+                        * hold the sock lock while doing any unbinding,
+                        * we need to release the lock we're holding.
+                        * Hold a reference to the sock so it doesn't disappear
+                        * as we're jumping between locks.
+                        */
 
-                               if (sk->sk_state &
-                                   (PPPOX_CONNECTED | PPPOX_BOUND)) {
-                                       pppox_unbind_sock(sk);
-                                       dev_put(dev);
-                                       sk->sk_state = PPPOX_ZOMBIE;
-                                       sk->sk_state_change(sk);
-                               }
+                       sock_hold(sk);
 
-                               release_sock(sk);
+                       write_unlock_bh(&pppoe_hash_lock);
+                       lock_sock(sk);
 
-                               sock_put(sk);
+                       if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) {
+                               pppox_unbind_sock(sk);
+                               sk->sk_state = PPPOX_ZOMBIE;
+                               sk->sk_state_change(sk);
+                       }
 
-                               read_lock_bh(&pppoe_hash_lock);
+                       release_sock(sk);
+                       sock_put(sk);
 
-                               /* Now restart from the beginning of this
-                                * hash chain.  We always NULL out pppoe_dev
-                                * so we are guaranteed to make forward
-                                * progress.
-                                */
-                               po = item_hash_table[hash];
-                               continue;
-                       }
-                       po = po->next;
+                       /* Restart scan at the beginning of this hash chain.
+                        * While the lock was dropped the chain contents may
+                        * have changed.
+                        */
+                       write_lock_bh(&pppoe_hash_lock);
+                       po = item_hash_table[hash];
                }
        }
-       read_unlock_bh(&pppoe_hash_lock);
+       write_unlock_bh(&pppoe_hash_lock);
 }
 
 static int pppoe_device_event(struct notifier_block *this,
@@ -344,10 +329,10 @@ static struct notifier_block pppoe_notifier = {
 static int pppoe_rcv_core(struct sock *sk, struct sk_buff *skb)
 {
        struct pppox_sock *po = pppox_sk(sk);
-       struct pppox_sock *relay_po = NULL;
+       struct pppox_sock *relay_po;
 
        if (sk->sk_state & PPPOX_BOUND) {
-               struct pppoe_hdr *ph = (struct pppoe_hdr *) skb->nh.raw;
+               struct pppoe_hdr *ph = pppoe_hdr(skb);
                int len = ntohs(ph->length);
                skb_pull_rcsum(skb, sizeof(struct pppoe_hdr));
                if (pskb_trim_rcsum(skb, len))
@@ -401,7 +386,7 @@ static int pppoe_rcv(struct sk_buff *skb,
        if (!(skb = skb_share_check(skb, GFP_ATOMIC)))
                goto out;
 
-       ph = (struct pppoe_hdr *) skb->nh.raw;
+       ph = pppoe_hdr(skb);
 
        po = get_item((unsigned long) ph->sid, eth_hdr(skb)->h_source, dev->ifindex);
        if (po != NULL)
@@ -433,7 +418,7 @@ static int pppoe_disc_rcv(struct sk_buff *skb,
        if (!(skb = skb_share_check(skb, GFP_ATOMIC)))
                goto out;
 
-       ph = (struct pppoe_hdr *) skb->nh.raw;
+       ph = pppoe_hdr(skb);
        if (ph->code != PADT_CODE)
                goto abort;
 
@@ -514,36 +499,49 @@ static int pppoe_release(struct socket *sock)
 {
        struct sock *sk = sock->sk;
        struct pppox_sock *po;
-       int error = 0;
 
        if (!sk)
                return 0;
 
-       if (sock_flag(sk, SOCK_DEAD))
+       lock_sock(sk);
+       if (sock_flag(sk, SOCK_DEAD)){
+               release_sock(sk);
                return -EBADF;
+       }
 
        pppox_unbind_sock(sk);
 
        /* Signal the death of the socket. */
        sk->sk_state = PPPOX_DEAD;
 
+
+       /* Write lock on hash lock protects the entire "po" struct from
+        * concurrent updates via pppoe_flush_dev. The "po" struct should
+        * be considered part of the hash table contents, thus protected
+        * by the hash table lock */
+       write_lock_bh(&pppoe_hash_lock);
+
        po = pppox_sk(sk);
        if (po->pppoe_pa.sid) {
-               delete_item(po->pppoe_pa.sid, po->pppoe_pa.remote, po->pppoe_ifindex);
+               __delete_item(po->pppoe_pa.sid,
+                             po->pppoe_pa.remote, po->pppoe_ifindex);
        }
 
-       if (po->pppoe_dev)
+       if (po->pppoe_dev) {
                dev_put(po->pppoe_dev);
+               po->pppoe_dev = NULL;
+       }
 
-       po->pppoe_dev = NULL;
+       write_unlock_bh(&pppoe_hash_lock);
 
        sock_orphan(sk);
        sock->sk = NULL;
 
        skb_queue_purge(&sk->sk_receive_queue);
+       release_sock(sk);
        sock_put(sk);
 
-       return error;
+       return 0;
 }
 
 
@@ -599,14 +597,18 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
                po->pppoe_dev = dev;
                po->pppoe_ifindex = dev->ifindex;
 
-               if (!(dev->flags & IFF_UP))
+               write_lock_bh(&pppoe_hash_lock);
+               if (!(dev->flags & IFF_UP)){
+                       write_unlock_bh(&pppoe_hash_lock);
                        goto err_put;
+               }
 
                memcpy(&po->pppoe_pa,
                       &sp->sa_addr.pppoe,
                       sizeof(struct pppoe_addr));
 
-               error = set_item(po);
+               error = __set_item(po);
+               write_unlock_bh(&pppoe_hash_lock);
                if (error < 0)
                        goto err_put;
 
@@ -762,10 +764,10 @@ static int pppoe_ioctl(struct socket *sock, unsigned int cmd,
 static int pppoe_sendmsg(struct kiocb *iocb, struct socket *sock,
                  struct msghdr *m, size_t total_len)
 {
-       struct sk_buff *skb = NULL;
+       struct sk_buff *skb;
        struct sock *sk = sock->sk;
        struct pppox_sock *po = pppox_sk(sk);
-       int error = 0;
+       int error;
        struct pppoe_hdr hdr;
        struct pppoe_hdr *ph;
        struct net_device *dev;
@@ -799,7 +801,7 @@ static int pppoe_sendmsg(struct kiocb *iocb, struct socket *sock,
 
        /* Reserve space for headers. */
        skb_reserve(skb, dev->hard_header_len);
-       skb->nh.raw = skb->data;
+       skb_reset_network_header(skb);
 
        skb->dev = dev;
 
@@ -869,7 +871,8 @@ static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb)
                        goto abort;
 
                skb_reserve(skb2, dev->hard_header_len + sizeof(struct pppoe_hdr));
-               memcpy(skb_put(skb2, skb->len), skb->data, skb->len);
+               skb_copy_from_linear_data(skb, skb_put(skb2, skb->len),
+                                         skb->len);
        } else {
                /* Make a clone so as to not disturb the original skb,
                 * give dev_queue_xmit something it can free.
@@ -884,7 +887,7 @@ static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb)
        memcpy(ph, &hdr, sizeof(struct pppoe_hdr));
        skb2->protocol = __constant_htons(ETH_P_PPP_SES);
 
-       skb2->nh.raw = skb2->data;
+       skb_reset_network_header(skb2);
 
        skb2->dev = dev;
 
@@ -929,10 +932,8 @@ static int pppoe_recvmsg(struct kiocb *iocb, struct socket *sock,
                  struct msghdr *m, size_t total_len, int flags)
 {
        struct sock *sk = sock->sk;
-       struct sk_buff *skb = NULL;
+       struct sk_buff *skb;
        int error = 0;
-       int len;
-       struct pppoe_hdr *ph = NULL;
 
        if (sk->sk_state & PPPOX_BOUND) {
                error = -EIO;
@@ -942,26 +943,21 @@ static int pppoe_recvmsg(struct kiocb *iocb, struct socket *sock,
        skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
                                flags & MSG_DONTWAIT, &error);
 
-       if (error < 0) {
+       if (error < 0)
                goto end;
-       }
 
        m->msg_namelen = 0;
 
        if (skb) {
-               error = 0;
-               ph = (struct pppoe_hdr *) skb->nh.raw;
-               len = ntohs(ph->length);
+               struct pppoe_hdr *ph = pppoe_hdr(skb);
+               const int len = ntohs(ph->length);
 
                error = memcpy_toiovec(m->msg_iov, (unsigned char *) &ph->tag[0], len);
-               if (error < 0)
-                       goto do_skb_free;
-               error = len;
+               if (error == 0)
+                       error = len;
        }
 
-do_skb_free:
-       if (skb)
-               kfree_skb(skb);
+       kfree_skb(skb);
 end:
        return error;
 }
@@ -991,7 +987,7 @@ out:
 
 static __inline__ struct pppox_sock *pppoe_get_idx(loff_t pos)
 {
-       struct pppox_sock *po = NULL;
+       struct pppox_sock *po;
        int i = 0;
 
        for (; i < PPPOE_HASH_SIZE; i++) {
index 9315046..3f8115d 100644 (file)
@@ -58,7 +58,7 @@ void pppox_unbind_sock(struct sock *sk)
 {
        /* Clear connection to ppp device, if attached. */
 
-       if (sk->sk_state & (PPPOX_BOUND | PPPOX_ZOMBIE)) {
+       if (sk->sk_state & (PPPOX_BOUND | PPPOX_CONNECTED | PPPOX_ZOMBIE)) {
                ppp_unregister_channel(&pppox_sk(sk)->chan);
                sk->sk_state = PPPOX_DEAD;
        }
index a8246eb..7b80fb7 100755 (executable)
@@ -1873,7 +1873,6 @@ static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
                         pci_unmap_len(lrg_buf_cb2, maplen),
                         PCI_DMA_FROMDEVICE);
        prefetch(skb->data);
-       skb->dev = qdev->ndev;
        skb->ip_summed = CHECKSUM_NONE;
        skb->protocol = eth_type_trans(skb, qdev->ndev);
 
@@ -1928,7 +1927,8 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
                 * Copy the ethhdr from first buffer to second. This
                 * is necessary for 3022 IP completions.
                 */
-               memcpy(skb_push(skb2, size), skb1->data + VLAN_ID_LEN, size);
+               skb_copy_from_linear_data_offset(skb1, VLAN_ID_LEN,
+                                                skb_push(skb2, size), size);
        } else {
                u16 checksum = le16_to_cpu(ib_ip_rsp_ptr->checksum);
                if (checksum & 
@@ -1946,7 +1946,6 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
                        skb2->ip_summed = CHECKSUM_UNNECESSARY;
                }
        }
-       skb2->dev = qdev->ndev;
        skb2->protocol = eth_type_trans(skb2, qdev->ndev);
 
        netif_receive_skb(skb2);
index 6a77b8a..45876a8 100644 (file)
@@ -2284,7 +2284,7 @@ static inline u32 rtl8169_tso_csum(struct sk_buff *skb, struct net_device *dev)
                        return LargeSend | ((mss & MSSMask) << MSSShift);
        }
        if (skb->ip_summed == CHECKSUM_PARTIAL) {
-               const struct iphdr *ip = skb->nh.iph;
+               const struct iphdr *ip = ip_hdr(skb);
 
                if (ip->protocol == IPPROTO_TCP)
                        return IPCS | TCPCS;
@@ -2586,7 +2586,6 @@ rtl8169_rx_interrupt(struct net_device *dev, struct rtl8169_private *tp,
                        pci_action(tp->pci_dev, le64_to_cpu(desc->addr),
                                   tp->rx_buf_sz, PCI_DMA_FROMDEVICE);
 
-                       skb->dev = dev;
                        skb_put(skb, pkt_size);
                        skb->protocol = eth_type_trans(skb, dev);
 
index b7ff484..df6b738 100644 (file)
@@ -115,7 +115,6 @@ static int rionet_rx_clean(struct net_device *ndev)
 
                rnet->rx_skb[i]->data = data;
                skb_put(rnet->rx_skb[i], RIO_MAX_MSG_SIZE);
-               rnet->rx_skb[i]->dev = ndev;
                rnet->rx_skb[i]->protocol =
                    eth_type_trans(rnet->rx_skb[i], ndev);
                error = netif_rx(rnet->rx_skb[i]);
index d81536f..25c73d4 100644 (file)
@@ -1029,7 +1029,6 @@ static void rx_int(struct net_device *dev, u32 rxlimit, u32 index)
                                        goto defer;
                                }
                        }
-                       skb->dev = dev;
                        skb->protocol = hippi_type_trans(skb, dev);
 
                        netif_rx(skb);          /* send it up */
@@ -1452,7 +1451,7 @@ static int rr_start_xmit(struct sk_buff *skb, struct net_device *dev)
                }
                skb_reserve(new_skb, 8);
                skb_put(new_skb, len);
-               memcpy(new_skb->data, skb->data, len);
+               skb_copy_from_linear_data(skb, new_skb->data, len);
                dev_kfree_skb(skb);
                skb = new_skb;
        }
index 46ebf14..600d3ff 100644 (file)
@@ -2195,7 +2195,7 @@ static int fill_rxd_3buf(struct s2io_nic *nic, struct RxD_t *rxdp, struct \
        frag_list->next = NULL;
        tmp = (void *)ALIGN((long)frag_list->data, ALIGN_SIZE + 1);
        frag_list->data = tmp;
-       frag_list->tail = tmp;
+       skb_reset_tail_pointer(frag_list);
 
        /* Buffer-2 receives L4 data payload */
        ((struct RxD3*)rxdp)->Buffer2_ptr = pci_map_single(nic->pdev,
@@ -2349,7 +2349,7 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
                        tmp += ALIGN_SIZE;
                        tmp &= ~ALIGN_SIZE;
                        skb->data = (void *) (unsigned long)tmp;
-                       skb->tail = (void *) (unsigned long)tmp;
+                       skb_reset_tail_pointer(skb);
 
                        if (!(((struct RxD3*)rxdp)->Buffer0_ptr))
                                ((struct RxD3*)rxdp)->Buffer0_ptr =
index 143958f..ad94358 100644 (file)
@@ -688,7 +688,6 @@ static int lan_saa9730_rx(struct net_device *dev)
                        } else {
                                lp->stats.rx_bytes += len;
                                lp->stats.rx_packets++;
-                               skb->dev = dev;
                                skb_reserve(skb, 2);    /* 16 byte align */
                                skb_put(skb, len);      /* make room */
                                eth_copy_and_sum(skb,
index b9fa4fb..1de3eec 100644 (file)
@@ -834,7 +834,7 @@ printk("cm0: IP identification: %02x%02x  fragment offset: %02x%02x\n", buffer[3
                        goto dropped_frame;
                }
                skb->dev = dev;
-               skb->mac.raw = skb->data;
+               skb_reset_mac_header(skb);
                skb->protocol = (unsigned short) buffer[NewDatagramHeaderSkip + 16];
                insw(ioaddr, skb_put(skb, NewDatagramDataSize),
                        NewDatagramDataSize / 2);
index 103c317..0a3a379 100644 (file)
@@ -933,9 +933,6 @@ static int sbdma_add_rcvbuffer(sbmacdma_t *d,struct sk_buff *sb)
                }
 
                sbdma_align_skb(sb_new, SMP_CACHE_BYTES, ETHER_ALIGN);
-
-               /* mark skbuff owned by our device */
-               sb_new->dev = d->sbdma_eth->sbm_dev;
        }
        else {
                sb_new = sb;
index c32c21a..5b7284c 100644 (file)
@@ -814,7 +814,6 @@ static void _sc92031_rx_tasklet(struct net_device *dev)
                        memcpy(skb_put(skb, pkt_size), rx_ring + rx_ring_offset, pkt_size);
                }
 
-               skb->dev = dev;
                skb->protocol = eth_type_trans(skb, dev);
                dev->last_rx = jiffies;
                netif_rx(skb);
index 0d6c95c..4bce7c4 100644 (file)
@@ -550,7 +550,6 @@ static void seeq8005_rx(struct net_device *dev)
                                lp->stats.rx_dropped++;
                                break;
                        }
-                       skb->dev = dev;
                        skb_reserve(skb, 2);    /* align data on 16 byte */
                        buf = skb_put(skb,pkt_len);
 
index 52ed522..d8c9c5d 100644 (file)
@@ -318,7 +318,6 @@ static inline void sgiseeq_rx(struct net_device *dev, struct sgiseeq_private *sp
                        skb = dev_alloc_skb(len + 2);
 
                        if (skb) {
-                               skb->dev = dev;
                                skb_reserve(skb, 2);
                                skb_put(skb, len);
 
@@ -535,7 +534,7 @@ static int sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev)
         *    entry and the HPC got to the end of the chain before we
         *    added this new entry and restarted it.
         */
-       memcpy((char *)(long)td->buf_vaddr, skb->data, skblen);
+       skb_copy_from_linear_data(skb, (char *)(long)td->buf_vaddr, skblen);
        if (len != skblen)
                memset((char *)(long)td->buf_vaddr + skb->len, 0, len-skblen);
        td->tdma.cntinfo = (len & HPCDMA_BCNT) |
index 34463ce..bc8de48 100644 (file)
@@ -632,7 +632,6 @@ static int sis190_rx_interrupt(struct net_device *dev,
                        pci_action(tp->pci_dev, le32_to_cpu(desc->addr),
                                   tp->rx_buf_sz, PCI_DMA_FROMDEVICE);
 
-                       skb->dev = dev;
                        skb_put(skb, pkt_size);
                        skb->protocol = eth_type_trans(skb, dev);
 
index b3750f2..dea0126 100644 (file)
@@ -1160,7 +1160,6 @@ sis900_init_rx_ring(struct net_device *net_dev)
                           buffer */
                        break;
                }
-               skb->dev = net_dev;
                sis_priv->rx_skbuff[i] = skb;
                sis_priv->rx_ring[i].cmdsts = RX_BUF_SIZE;
                 sis_priv->rx_ring[i].bufptr = pci_map_single(sis_priv->pci_dev,
@@ -1755,6 +1754,24 @@ static int sis900_rx(struct net_device *net_dev)
                } else {
                        struct sk_buff * skb;
 
+                       pci_unmap_single(sis_priv->pci_dev,
+                               sis_priv->rx_ring[entry].bufptr, RX_BUF_SIZE,
+                               PCI_DMA_FROMDEVICE);
+
+                       /* refill the Rx buffer, what if there is not enought
+                        * memory for new socket buffer ?? */
+                       if ((skb = dev_alloc_skb(RX_BUF_SIZE)) == NULL) {
+                               /*
+                                * Not enough memory to refill the buffer
+                                * so we need to recycle the old one so
+                                * as to avoid creating a memory hole
+                                * in the rx ring
+                                */
+                               skb = sis_priv->rx_skbuff[entry];
+                               sis_priv->stats.rx_dropped++;
+                               goto refill_rx_ring;
+                       }       
+
                        /* This situation should never happen, but due to
                           some unknow bugs, it is possible that
                           we are working on NULL sk_buff :-( */
@@ -1768,9 +1785,6 @@ static int sis900_rx(struct net_device *net_dev)
                                break;
                        }
 
-                       pci_unmap_single(sis_priv->pci_dev,
-                               sis_priv->rx_ring[entry].bufptr, RX_BUF_SIZE,
-                               PCI_DMA_FROMDEVICE);
                        /* give the socket buffer to upper layers */
                        skb = sis_priv->rx_skbuff[entry];
                        skb_put(skb, rx_size);
@@ -1783,33 +1797,13 @@ static int sis900_rx(struct net_device *net_dev)
                        net_dev->last_rx = jiffies;
                        sis_priv->stats.rx_bytes += rx_size;
                        sis_priv->stats.rx_packets++;
-
-                       /* refill the Rx buffer, what if there is not enought
-                        * memory for new socket buffer ?? */
-                       if ((skb = dev_alloc_skb(RX_BUF_SIZE)) == NULL) {
-                               /* not enough memory for skbuff, this makes a
-                                * "hole" on the buffer ring, it is not clear
-                                * how the hardware will react to this kind
-                                * of degenerated buffer */
-                               if (netif_msg_rx_status(sis_priv))
-                                       printk(KERN_INFO "%s: Memory squeeze,"
-                                               "deferring packet.\n",
-                                               net_dev->name);
-                               sis_priv->rx_skbuff[entry] = NULL;
-                               /* reset buffer descriptor state */
-                               sis_priv->rx_ring[entry].cmdsts = 0;
-                               sis_priv->rx_ring[entry].bufptr = 0;
-                               sis_priv->stats.rx_dropped++;
-                               sis_priv->cur_rx++;
-                               break;
-                       }
-                       skb->dev = net_dev;
+                       sis_priv->dirty_rx++;
+refill_rx_ring:
                        sis_priv->rx_skbuff[entry] = skb;
                        sis_priv->rx_ring[entry].cmdsts = RX_BUF_SIZE;
                        sis_priv->rx_ring[entry].bufptr =
                                pci_map_single(sis_priv->pci_dev, skb->data,
                                        RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
-                       sis_priv->dirty_rx++;
                }
                sis_priv->cur_rx++;
                entry = sis_priv->cur_rx % NUM_RX_DESC;
@@ -1836,7 +1830,6 @@ static int sis900_rx(struct net_device *net_dev)
                                sis_priv->stats.rx_dropped++;
                                break;
                        }
-                       skb->dev = net_dev;
                        sis_priv->rx_skbuff[entry] = skb;
                        sis_priv->rx_ring[entry].cmdsts = RX_BUF_SIZE;
                        sis_priv->rx_ring[entry].bufptr =
index e94ab25..e0a9300 100644 (file)
@@ -1562,10 +1562,10 @@ struct sk_buff  *pMessage)      /* pointer to send-message              */
        pTxd->pMBuf     = pMessage;
 
        if (pMessage->ip_summed == CHECKSUM_PARTIAL) {
-               u16 hdrlen = pMessage->h.raw - pMessage->data;
+               u16 hdrlen = skb_transport_offset(pMessage);
                u16 offset = hdrlen + pMessage->csum_offset;
 
-               if ((pMessage->h.ipiph->protocol == IPPROTO_UDP ) &&
+               if ((ipip_hdr(pMessage)->protocol == IPPROTO_UDP) &&
                        (pAC->GIni.GIChipRev == 0) &&
                        (pAC->GIni.GIChipId == CHIP_ID_YUKON)) {
                        pTxd->TBControl = BMU_TCP_CHECK;
@@ -1681,7 +1681,7 @@ struct sk_buff    *pMessage)      /* pointer to send-message              */
        ** Does the HW need to evaluate checksum for TCP or UDP packets? 
        */
        if (pMessage->ip_summed == CHECKSUM_PARTIAL) {
-               u16 hdrlen = pMessage->h.raw - pMessage->data;
+               u16 hdrlen = skb_transport_offset(pMessage);
                u16 offset = hdrlen + pMessage->csum_offset;
 
                Control = BMU_STFWD;
@@ -1691,7 +1691,7 @@ struct sk_buff    *pMessage)      /* pointer to send-message              */
                ** opcode for udp is not working in the hardware yet 
                ** (Revision 2.0)
                */
-               if ((pMessage->h.ipiph->protocol == IPPROTO_UDP ) &&
+               if ((ipip_hdr(pMessage)->protocol == IPPROTO_UDP) &&
                        (pAC->GIni.GIChipRev == 0) &&
                        (pAC->GIni.GIChipId == CHIP_ID_YUKON)) {
                        Control |= BMU_TCP_CHECK;
@@ -2127,7 +2127,7 @@ rx_start:
                                                    (dma_addr_t) PhysAddr,
                                                    FrameLength,
                                                    PCI_DMA_FROMDEVICE);
-                       memcpy(pNewMsg->data, pMsg, FrameLength);
+                       skb_copy_to_linear_data(pNewMsg, pMsg, FrameLength);
 
                        pci_dma_sync_single_for_device(pAC->PciDev,
                                                       (dma_addr_t) PhysAddr,
@@ -2193,7 +2193,6 @@ rx_start:
                                SK_PNMI_CNT_RX_OCTETS_DELIVERED(pAC,
                                        FrameLength, pRxPort->PortIndex);
 
-                               pMsg->dev = pAC->dev[pRxPort->PortIndex];
                                pMsg->protocol = eth_type_trans(pMsg,
                                        pAC->dev[pRxPort->PortIndex]);
                                netif_rx(pMsg);
@@ -2246,7 +2245,6 @@ rx_start:
                                (IFF_PROMISC | IFF_ALLMULTI)) != 0 ||
                                (ForRlmt & SK_RLMT_RX_PROTOCOL) ==
                                SK_RLMT_RX_PROTOCOL) {
-                               pMsg->dev = pAC->dev[pRxPort->PortIndex];
                                pMsg->protocol = eth_type_trans(pMsg,
                                        pAC->dev[pRxPort->PortIndex]);
                                netif_rx(pMsg);
index 9733a11..a7ef6c8 100644 (file)
@@ -1680,7 +1680,6 @@ void mac_drv_rx_complete(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
        rxd->rxd_os.skb = NULL;
        skb_trim(skb, len);
        skb->protocol = fddi_type_trans(skb, bp->dev);
-       skb->dev = bp->dev;     /* pass up device pointer */
 
        netif_rx(skb);
        bp->dev->last_rx = jiffies;
@@ -1938,7 +1937,7 @@ int mac_drv_rx_init(struct s_smc *smc, int len, int fc,
        }
        skb_reserve(skb, 3);
        skb_put(skb, len);
-       memcpy(skb->data, look_ahead, len);
+       skb_copy_to_linear_data(skb, look_ahead, len);
 
        // deliver frame to system
        skb->protocol = fddi_type_trans(skb, smc->os.dev);
index d476a3c..f1a0e6c 100644 (file)
@@ -2654,12 +2654,12 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev)
        td->dma_hi = map >> 32;
 
        if (skb->ip_summed == CHECKSUM_PARTIAL) {
-               int offset = skb->h.raw - skb->data;
+               const int offset = skb_transport_offset(skb);
 
                /* This seems backwards, but it is what the sk98lin
                 * does.  Looks like hardware is wrong?
                 */
-               if (skb->h.ipiph->protocol == IPPROTO_UDP
+               if (ipip_hdr(skb)->protocol == IPPROTO_UDP
                    && hw->chip_rev == 0 && hw->chip_id == CHIP_ID_YUKON)
                        control = BMU_TCP_CHECK;
                else
@@ -2950,7 +2950,7 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
                pci_dma_sync_single_for_cpu(skge->hw->pdev,
                                            pci_unmap_addr(e, mapaddr),
                                            len, PCI_DMA_FROMDEVICE);
-               memcpy(skb->data, e->skb->data, len);
+               skb_copy_from_linear_data(e->skb, skb->data, len);
                pci_dma_sync_single_for_device(skge->hw->pdev,
                                               pci_unmap_addr(e, mapaddr),
                                               len, PCI_DMA_FROMDEVICE);
index 4a009b7..238c2ca 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/ethtool.h>
 #include <linux/pci.h>
 #include <linux/ip.h>
+#include <net/ip.h>
 #include <linux/tcp.h>
 #include <linux/in.h>
 #include <linux/delay.h>
@@ -49,7 +50,7 @@
 #include "sky2.h"
 
 #define DRV_NAME               "sky2"
-#define DRV_VERSION            "1.13"
+#define DRV_VERSION            "1.14"
 #define PFX                    DRV_NAME " "
 
 /*
@@ -123,7 +124,10 @@ static const struct pci_device_id sky2_id_table[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4361) }, /* 88E8050 */
        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4362) }, /* 88E8053 */
        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4363) }, /* 88E8055 */
+#ifdef broken
+       /* This device causes data corruption problems that are not resolved */
        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4364) }, /* 88E8056 */
+#endif
        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4366) }, /* 88EC036 */
        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4367) }, /* 88EC032 */
        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4368) }, /* 88EC034 */
@@ -740,12 +744,17 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
        if (hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_EX) {
                sky2_write8(hw, SK_REG(port, RX_GMF_LP_THR), 768/8);
                sky2_write8(hw, SK_REG(port, RX_GMF_UP_THR), 1024/8);
-               if (hw->dev[port]->mtu > ETH_DATA_LEN) {
-                       /* set Tx GMAC FIFO Almost Empty Threshold */
-                       sky2_write32(hw, SK_REG(port, TX_GMF_AE_THR), 0x180);
-                       /* Disable Store & Forward mode for TX */
-                       sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_STFW_DIS);
-               }
+
+               /* set Tx GMAC FIFO Almost Empty Threshold */
+               sky2_write32(hw, SK_REG(port, TX_GMF_AE_THR),
+                            (ECU_JUMBO_WM << 16) | ECU_AE_THR);
+
+               if (hw->dev[port]->mtu > ETH_DATA_LEN)
+                       sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
+                                    TX_JUMBO_ENA | TX_STFW_DIS);
+               else
+                       sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
+                                    TX_JUMBO_DIS | TX_STFW_ENA);
        }
 
 }
@@ -1278,7 +1287,7 @@ static int sky2_up(struct net_device *dev)
        /* Set almost empty threshold */
        if (hw->chip_id == CHIP_ID_YUKON_EC_U
            && hw->chip_rev == CHIP_REV_YU_EC_U_A0)
-               sky2_write16(hw, Q_ADDR(txqaddr[port], Q_AL), 0x1a0);
+               sky2_write16(hw, Q_ADDR(txqaddr[port], Q_AL), ECU_TXFF_LEV);
 
        sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map,
                           TX_RING_SIZE - 1);
@@ -1383,8 +1392,8 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
        /* Check for TCP Segmentation Offload */
        mss = skb_shinfo(skb)->gso_size;
        if (mss != 0) {
-               mss += ((skb->h.th->doff - 5) * 4);     /* TCP options */
-               mss += (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
+               mss += tcp_optlen(skb); /* TCP options */
+               mss += ip_hdrlen(skb) + sizeof(struct tcphdr);
                mss += ETH_HLEN;
 
                if (mss != sky2->tx_last_mss) {
@@ -1412,14 +1421,14 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
 
        /* Handle TCP checksum offload */
        if (skb->ip_summed == CHECKSUM_PARTIAL) {
-               unsigned offset = skb->h.raw - skb->data;
+               const unsigned offset = skb_transport_offset(skb);
                u32 tcpsum;
 
                tcpsum = offset << 16;          /* sum start */
                tcpsum |= offset + skb->csum_offset;    /* sum write */
 
                ctrl = CALSUM | WR_SUM | INIT_SUM | LOCK_SUM;
-               if (skb->nh.iph->protocol == IPPROTO_UDP)
+               if (ip_hdr(skb)->protocol == IPPROTO_UDP)
                        ctrl |= UDPTCP;
 
                if (tcpsum != sky2->tx_tcpsum) {
@@ -1584,13 +1593,6 @@ static int sky2_down(struct net_device *dev)
        sky2_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL),
                     RB_RST_SET | RB_DIS_OP_MD);
 
-       /* WA for dev. #4.209 */
-       if (hw->chip_id == CHIP_ID_YUKON_EC_U
-           && (hw->chip_rev == CHIP_REV_YU_EC_U_A1 || hw->chip_rev == CHIP_REV_YU_EC_U_B0))
-               sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
-                            sky2->speed != SPEED_1000 ?
-                            TX_STFW_ENA : TX_STFW_DIS);
-
        ctrl = gma_read16(hw, port, GM_GP_CTRL);
        ctrl &= ~(GM_GPCR_TX_ENA | GM_GPCR_RX_ENA);
        gma_write16(hw, port, GM_GP_CTRL, ctrl);
@@ -1890,6 +1892,7 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
 {
        struct sky2_port *sky2 = netdev_priv(dev);
        struct sky2_hw *hw = sky2->hw;
+       unsigned port = sky2->port;
        int err;
        u16 ctl, mode;
        u32 imask;
@@ -1897,9 +1900,8 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
        if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU)
                return -EINVAL;
 
-       /* TSO on Yukon Ultra and MTU > 1500 not supported */
-       if (hw->chip_id == CHIP_ID_YUKON_EC_U && new_mtu > ETH_DATA_LEN)
-               dev->features &= ~NETIF_F_TSO;
+       if (new_mtu > ETH_DATA_LEN && hw->chip_id == CHIP_ID_YUKON_FE)
+               return -EINVAL;
 
        if (!netif_running(dev)) {
                dev->mtu = new_mtu;
@@ -1915,8 +1917,18 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
 
        synchronize_irq(hw->pdev->irq);
 
-       ctl = gma_read16(hw, sky2->port, GM_GP_CTRL);
-       gma_write16(hw, sky2->port, GM_GP_CTRL, ctl & ~GM_GPCR_RX_ENA);
+       if (hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_EX) {
+               if (new_mtu > ETH_DATA_LEN) {
+                       sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
+                                    TX_JUMBO_ENA | TX_STFW_DIS);
+                       dev->features &= NETIF_F_TSO | NETIF_F_SG | NETIF_F_IP_CSUM;
+               } else
+                       sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
+                                    TX_JUMBO_DIS | TX_STFW_ENA);
+       }
+
+       ctl = gma_read16(hw, port, GM_GP_CTRL);
+       gma_write16(hw, port, GM_GP_CTRL, ctl & ~GM_GPCR_RX_ENA);
        sky2_rx_stop(sky2);
        sky2_rx_clean(sky2);
 
@@ -1928,9 +1940,9 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
        if (dev->mtu > ETH_DATA_LEN)
                mode |= GM_SMOD_JUMBO_ENA;
 
-       gma_write16(hw, sky2->port, GM_SERIAL_MODE, mode);
+       gma_write16(hw, port, GM_SERIAL_MODE, mode);
 
-       sky2_write8(hw, RB_ADDR(rxqaddr[sky2->port], RB_CTRL), RB_ENA_OP_MD);
+       sky2_write8(hw, RB_ADDR(rxqaddr[port], RB_CTRL), RB_ENA_OP_MD);
 
        err = sky2_rx_start(sky2);
        sky2_write32(hw, B0_IMSK, imask);
@@ -1938,7 +1950,7 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
        if (err)
                dev_close(dev);
        else {
-               gma_write16(hw, sky2->port, GM_GP_CTRL, ctl);
+               gma_write16(hw, port, GM_GP_CTRL, ctl);
 
                netif_poll_enable(hw->dev[0]);
                netif_wake_queue(dev);
@@ -1959,7 +1971,7 @@ static struct sk_buff *receive_copy(struct sky2_port *sky2,
                skb_reserve(skb, 2);
                pci_dma_sync_single_for_cpu(sky2->hw->pdev, re->data_addr,
                                            length, PCI_DMA_FROMDEVICE);
-               memcpy(skb->data, re->skb->data, length);
+               skb_copy_from_linear_data(re->skb, skb->data, length);
                skb->ip_summed = re->skb->ip_summed;
                skb->csum = re->skb->csum;
                pci_dma_sync_single_for_device(sky2->hw->pdev, re->data_addr,
@@ -2340,26 +2352,22 @@ static void sky2_mac_intr(struct sky2_hw *hw, unsigned port)
        }
 }
 
-/* This should never happen it is a fatal situation */
-static void sky2_descriptor_error(struct sky2_hw *hw, unsigned port,
-                                 const char *rxtx, u32 mask)
+/* This should never happen it is a bug. */
+static void sky2_le_error(struct sky2_hw *hw, unsigned port,
+                         u16 q, unsigned ring_size)
 {
        struct net_device *dev = hw->dev[port];
        struct sky2_port *sky2 = netdev_priv(dev);
-       u32 imask;
-
-       printk(KERN_ERR PFX "%s: %s descriptor error (hardware problem)\n",
-              dev ? dev->name : "<not registered>", rxtx);
+       unsigned idx;
+       const u64 *le = (q == Q_R1 || q == Q_R2)
+               ? (u64 *) sky2->rx_le : (u64 *) sky2->tx_le;
 
-       imask = sky2_read32(hw, B0_IMSK);
-       imask &= ~mask;
-       sky2_write32(hw, B0_IMSK, imask);
+       idx = sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_GET_IDX));
+       printk(KERN_ERR PFX "%s: descriptor error q=%#x get=%u [%llx] put=%u\n",
+              dev->name, (unsigned) q, idx, (unsigned long long) le[idx],
+              (unsigned) sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX)));
 
-       if (dev) {
-               spin_lock(&sky2->phy_lock);
-               sky2_link_down(sky2);
-               spin_unlock(&sky2->phy_lock);
-       }
+       sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_CLR_IRQ_CHK);
 }
 
 /* If idle then force a fake soft NAPI poll once a second
@@ -2383,23 +2391,15 @@ static void sky2_idle(unsigned long arg)
        mod_timer(&hw->idle_timer, jiffies + msecs_to_jiffies(idle_timeout));
 }
 
-
-static int sky2_poll(struct net_device *dev0, int *budget)
+/* Hardware/software error handling */
+static void sky2_err_intr(struct sky2_hw *hw, u32 status)
 {
-       struct sky2_hw *hw = ((struct sky2_port *) netdev_priv(dev0))->hw;
-       int work_limit = min(dev0->quota, *budget);
-       int work_done = 0;
-       u32 status = sky2_read32(hw, B0_Y2_SP_EISR);
+       if (net_ratelimit())
+               dev_warn(&hw->pdev->dev, "error interrupt status=%#x\n", status);
 
        if (status & Y2_IS_HW_ERR)
                sky2_hw_intr(hw);
 
-       if (status & Y2_IS_IRQ_PHY1)
-               sky2_phy_intr(hw, 0);
-
-       if (status & Y2_IS_IRQ_PHY2)
-               sky2_phy_intr(hw, 1);
-
        if (status & Y2_IS_IRQ_MAC1)
                sky2_mac_intr(hw, 0);
 
@@ -2407,16 +2407,33 @@ static int sky2_poll(struct net_device *dev0, int *budget)
                sky2_mac_intr(hw, 1);
 
        if (status & Y2_IS_CHK_RX1)
-               sky2_descriptor_error(hw, 0, "receive", Y2_IS_CHK_RX1);
+               sky2_le_error(hw, 0, Q_R1, RX_LE_SIZE);
 
        if (status & Y2_IS_CHK_RX2)
-               sky2_descriptor_error(hw, 1, "receive", Y2_IS_CHK_RX2);
+               sky2_le_error(hw, 1, Q_R2, RX_LE_SIZE);
 
        if (status & Y2_IS_CHK_TXA1)
-               sky2_descriptor_error(hw, 0, "transmit", Y2_IS_CHK_TXA1);
+               sky2_le_error(hw, 0, Q_XA1, TX_RING_SIZE);
 
        if (status & Y2_IS_CHK_TXA2)
-               sky2_descriptor_error(hw, 1, "transmit", Y2_IS_CHK_TXA2);
+               sky2_le_error(hw, 1, Q_XA2, TX_RING_SIZE);
+}
+
+static int sky2_poll(struct net_device *dev0, int *budget)
+{
+       struct sky2_hw *hw = ((struct sky2_port *) netdev_priv(dev0))->hw;
+       int work_limit = min(dev0->quota, *budget);
+       int work_done = 0;
+       u32 status = sky2_read32(hw, B0_Y2_SP_EISR);
+
+       if (unlikely(status & Y2_IS_ERROR))
+               sky2_err_intr(hw, status);
+
+       if (status & Y2_IS_IRQ_PHY1)
+               sky2_phy_intr(hw, 0);
+
+       if (status & Y2_IS_IRQ_PHY2)
+               sky2_phy_intr(hw, 1);
 
        work_done = sky2_status_intr(hw, work_limit);
        if (work_done < work_limit) {
@@ -2534,16 +2551,14 @@ static void sky2_reset(struct sky2_hw *hw)
        int i;
 
        /* disable ASF */
-       if (hw->chip_id <= CHIP_ID_YUKON_EC) {
-               if (hw->chip_id == CHIP_ID_YUKON_EX) {
-                       status = sky2_read16(hw, HCU_CCSR);
-                       status &= ~(HCU_CCSR_AHB_RST | HCU_CCSR_CPU_RST_MODE |
-                                   HCU_CCSR_UC_STATE_MSK);
-                       sky2_write16(hw, HCU_CCSR, status);
-               } else
-                       sky2_write8(hw, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET);
-               sky2_write16(hw, B0_CTST, Y2_ASF_DISABLE);
-       }
+       if (hw->chip_id == CHIP_ID_YUKON_EX) {
+               status = sky2_read16(hw, HCU_CCSR);
+               status &= ~(HCU_CCSR_AHB_RST | HCU_CCSR_CPU_RST_MODE |
+                           HCU_CCSR_UC_STATE_MSK);
+               sky2_write16(hw, HCU_CCSR, status);
+       } else
+               sky2_write8(hw, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET);
+       sky2_write16(hw, B0_CTST, Y2_ASF_DISABLE);
 
        /* do a SW reset */
        sky2_write8(hw, B0_CTST, CS_RST_SET);
@@ -3328,6 +3343,36 @@ static void sky2_get_regs(struct net_device *dev, struct ethtool_regs *regs,
                      regs->len - B3_RI_WTO_R1);
 }
 
+/* In order to do Jumbo packets on these chips, need to turn off the
+ * transmit store/forward. Therefore checksum offload won't work.
+ */
+static int no_tx_offload(struct net_device *dev)
+{
+       const struct sky2_port *sky2 = netdev_priv(dev);
+       const struct sky2_hw *hw = sky2->hw;
+
+       return dev->mtu > ETH_DATA_LEN &&
+               (hw->chip_id == CHIP_ID_YUKON_EX
+                || hw->chip_id == CHIP_ID_YUKON_EC_U);
+}
+
+static int sky2_set_tx_csum(struct net_device *dev, u32 data)
+{
+       if (data && no_tx_offload(dev))
+               return -EINVAL;
+
+       return ethtool_op_set_tx_csum(dev, data);
+}
+
+
+static int sky2_set_tso(struct net_device *dev, u32 data)
+{
+       if (data && no_tx_offload(dev))
+               return -EINVAL;
+
+       return ethtool_op_set_tso(dev, data);
+}
+
 static const struct ethtool_ops sky2_ethtool_ops = {
        .get_settings = sky2_get_settings,
        .set_settings = sky2_set_settings,
@@ -3343,9 +3388,9 @@ static const struct ethtool_ops sky2_ethtool_ops = {
        .get_sg = ethtool_op_get_sg,
        .set_sg = ethtool_op_set_sg,
        .get_tx_csum = ethtool_op_get_tx_csum,
-       .set_tx_csum = ethtool_op_set_tx_csum,
+       .set_tx_csum = sky2_set_tx_csum,
        .get_tso = ethtool_op_get_tso,
-       .set_tso = ethtool_op_set_tso,
+       .set_tso = sky2_set_tso,
        .get_rx_csum = sky2_get_rx_csum,
        .set_rx_csum = sky2_set_rx_csum,
        .get_strings = sky2_get_strings,
index ac24bdc..5efb5af 100644 (file)
@@ -288,6 +288,9 @@ enum {
                          | Y2_IS_CHK_TXA1 | Y2_IS_CHK_RX1,
        Y2_IS_PORT_2    = Y2_IS_IRQ_PHY2 | Y2_IS_IRQ_MAC2
                          | Y2_IS_CHK_TXA2 | Y2_IS_CHK_RX2,
+       Y2_IS_ERROR     = Y2_IS_HW_ERR |
+                         Y2_IS_IRQ_MAC1 | Y2_IS_CHK_TXA1 | Y2_IS_CHK_RX1 |
+                         Y2_IS_IRQ_MAC2 | Y2_IS_CHK_TXA2 | Y2_IS_CHK_RX2,
 };
 
 /*     B2_IRQM_HWE_MSK 32 bit  IRQ Moderation HW Error Mask */
@@ -738,6 +741,11 @@ enum {
        TX_GMF_RP       = 0x0d70,/* 32 bit      Tx GMAC FIFO Read Pointer */
        TX_GMF_RSTP     = 0x0d74,/* 32 bit      Tx GMAC FIFO Restart Pointer */
        TX_GMF_RLEV     = 0x0d78,/* 32 bit      Tx GMAC FIFO Read Level */
+
+       /* Threshold values for Yukon-EC Ultra and Extreme */
+       ECU_AE_THR      = 0x0070, /* Almost Empty Threshold */
+       ECU_TXFF_LEV    = 0x01a0, /* Tx BMU FIFO Level */
+       ECU_JUMBO_WM    = 0x0080, /* Jumbo Mode Watermark */
 };
 
 /* Descriptor Poll Timer Registers */
@@ -1631,6 +1639,9 @@ enum {
        TX_VLAN_TAG_ON  = 1<<25,/* enable  VLAN tagging */
        TX_VLAN_TAG_OFF = 1<<24,/* disable VLAN tagging */
 
+       TX_JUMBO_ENA    = 1<<23,/* PCI Jumbo Mode enable (Yukon-EC Ultra) */
+       TX_JUMBO_DIS    = 1<<22,/* PCI Jumbo Mode enable (Yukon-EC Ultra) */
+
        GMF_WSP_TST_ON  = 1<<18,/* Write Shadow Pointer Test On */
        GMF_WSP_TST_OFF = 1<<17,/* Write Shadow Pointer Test Off */
        GMF_WSP_STEP    = 1<<16,/* Write Shadow Pointer Step/Increment */
index 2f4b1de..65bd20f 100644 (file)
@@ -363,7 +363,7 @@ sl_bump(struct slip *sl)
        }
        skb->dev = sl->dev;
        memcpy(skb_put(skb,count), sl->rbuff, count);
-       skb->mac.raw=skb->data;
+       skb_reset_mac_header(skb);
        skb->protocol=htons(ETH_P_IP);
        netif_rx(skb);
        sl->dev->last_rx = jiffies;
index c956141..8a2109a 100644 (file)
@@ -502,7 +502,6 @@ static inline void   smc911x_rcv(struct net_device *dev)
                DBG(SMC_DEBUG_PKTS, "%s: Received packet\n", dev->name,);
                PRINT_PKT(data, ((pkt_len - 4) <= 64) ? pkt_len - 4 : 64);
                dev->last_rx = jiffies;
-               skb->dev = dev;
                skb->protocol = eth_type_trans(skb, dev);
                netif_rx(skb);
                lp->stats.rx_packets++;
@@ -1307,7 +1306,6 @@ smc911x_rx_dma_irq(int dma, void *data)
        lp->current_rx_skb = NULL;
        PRINT_PKT(skb->data, skb->len);
        dev->last_rx = jiffies;
-       skb->dev = dev;
        skb->protocol = eth_type_trans(skb, dev);
        netif_rx(skb);
        lp->stats.rx_packets++;
index bd6e845..36c1eba 100644 (file)
@@ -1262,7 +1262,6 @@ static void smc_rcv(struct net_device *dev)
 
                skb_reserve( skb, 2 );   /* 16 bit alignment */
 
-               skb->dev = dev;
                data = skb_put( skb, packet_length);
 
 #ifdef USE_32_BIT
index 49f4b77..01cc3c7 100644 (file)
@@ -568,7 +568,6 @@ static inline void  smc_rcv(struct net_device *dev)
                PRINT_PKT(data, packet_len - 4);
 
                dev->last_rx = jiffies;
-               skb->dev = dev;
                skb->protocol = eth_type_trans(skb, dev);
                netif_rx(skb);
                lp->stats.rx_packets++;
index ed7aa0a..c6320c7 100644 (file)
@@ -85,7 +85,6 @@ static int sonic_open(struct net_device *dev)
                               dev->name);
                        return -ENOMEM;
                }
-               skb->dev = dev;
                /* align IP header unless DMA requires otherwise */
                if (SONIC_BUS_SCALE(lp->dma_bitmode) == 2)
                        skb_reserve(skb, 2);
@@ -451,7 +450,6 @@ static void sonic_rx(struct net_device *dev)
                                lp->stats.rx_dropped++;
                                break;
                        }
-                       new_skb->dev = dev;
                        /* provide 16 byte IP header alignment unless DMA requires otherwise */
                        if(SONIC_BUS_SCALE(lp->dma_bitmode) == 2)
                                skb_reserve(new_skb, 2);
index 3b91af8..230da14 100644 (file)
@@ -719,8 +719,8 @@ spider_net_prepare_tx_descr(struct spider_net_card *card,
                        SPIDER_NET_DESCR_CARDOWNED | SPIDER_NET_DMAC_NOCS;
        spin_unlock_irqrestore(&chain->lock, flags);
 
-       if (skb->protocol == htons(ETH_P_IP))
-               switch (skb->nh.iph->protocol) {
+       if (skb->protocol == htons(ETH_P_IP) && skb->ip_summed == CHECKSUM_PARTIAL)
+               switch (ip_hdr(skb)->protocol) {
                case IPPROTO_TCP:
                        hwdescr->dmac_cmd_status |= SPIDER_NET_DMAC_TCP;
                        break;
@@ -990,7 +990,6 @@ spider_net_pass_skb_up(struct spider_net_descr *descr,
        netdev = card->netdev;
 
        skb = descr->skb;
-       skb->dev = netdev;
        skb_put(skb, hwdescr->valid_size);
 
        /* the card seems to add 2 bytes of junk in front
index 8bba2e3..9d6e454 100644 (file)
@@ -1452,7 +1452,6 @@ static int __netdev_rx(struct net_device *dev, int *quota)
                   to a minimally-sized skbuff. */
                if (pkt_len < rx_copybreak
                    && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
-                       skb->dev = dev;
                        skb_reserve(skb, 2);    /* 16 byte align the IP header */
                        pci_dma_sync_single_for_cpu(np->pci_dev,
                                                    np->rx_info[entry].mapping,
index 4757aa6..396c3d9 100644 (file)
@@ -775,7 +775,6 @@ static void sun3_82586_rcv_int(struct net_device *dev)
                                        skb = (struct sk_buff *) dev_alloc_skb(totlen+2);
                                        if(skb != NULL)
                                        {
-                                               skb->dev = dev;
                                                skb_reserve(skb,2);
                                                skb_put(skb,totlen);
                                                eth_copy_and_sum(skb,(char *) p->base+swab32((unsigned long) rbd->buffer),totlen,0);
@@ -1027,7 +1026,7 @@ static int sun3_82586_send_packet(struct sk_buff *skb, struct net_device *dev)
                        memset((char *)p->xmit_cbuffs[p->xmit_count], 0, ETH_ZLEN);
                        len = ETH_ZLEN;
                }
-               memcpy((char *)p->xmit_cbuffs[p->xmit_count],(char *)(skb->data),skb->len);
+               skb_copy_from_linear_data(skb, p->xmit_cbuffs[p->xmit_count], skb->len);
 
 #if (NUM_XMIT_BUFFS == 1)
 #      ifdef NO_NOPCOMMANDS
index 7bee45b..791e081 100644 (file)
@@ -629,7 +629,7 @@ static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev )
        head->length = (-len) | 0xf000;
        head->misc = 0;
 
-       memcpy( PKTBUF_ADDR(head), (void *)skb->data, skb->len );
+       skb_copy_from_linear_data(skb, PKTBUF_ADDR(head), skb->len);
        if (len != skb->len)
                memset(PKTBUF_ADDR(head) + skb->len, 0, len-skb->len);
 
@@ -851,10 +851,9 @@ static int lance_rx( struct net_device *dev )
                                }
 
 
-                               skb->dev = dev;
                                skb_reserve( skb, 2 );  /* 16 byte align */
                                skb_put( skb, pkt_len );        /* Make room */
-//                             memcpy( skb->data, PKTBUF_ADDR(head), pkt_len );
+//                             skb_copy_to_linear_data(skb, PKTBUF_ADDR(head), pkt_len);
                                eth_copy_and_sum(skb,
                                                 PKTBUF_ADDR(head),
                                                 pkt_len, 0);
index 18f8885..2ad8d58 100644 (file)
@@ -855,7 +855,6 @@ static void bigmac_rx(struct bigmac *bp)
                                drops++;
                                goto drop_it;
                        }
-                       copy_skb->dev = bp->dev;
                        skb_reserve(copy_skb, 2);
                        skb_put(copy_skb, len);
                        sbus_dma_sync_single_for_cpu(bp->bigmac_sdev,
index c06ecc8..f51ba31 100644 (file)
@@ -1308,7 +1308,6 @@ static void rx_poll(unsigned long data)
                           to a minimally-sized skbuff. */
                        if (pkt_len < rx_copybreak
                                && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
-                               skb->dev = dev;
                                skb_reserve(skb, 2);    /* 16 byte align the IP header */
                                pci_dma_sync_single_for_cpu(np->pci_dev,
                                                            desc->frag[0].addr,
index 08ea61d..5da7321 100644 (file)
 #include <asm/uaccess.h>
 #include <asm/irq.h>
 
-#ifdef __sparc__
+#ifdef CONFIG_SPARC
 #include <asm/idprom.h>
-#include <asm/openprom.h>
-#include <asm/oplib.h>
-#include <asm/pbm.h>
+#include <asm/prom.h>
 #endif
 
 #ifdef CONFIG_PPC_PMAC
@@ -845,11 +843,10 @@ static int gem_rx(struct gem *gp, int work_to_do)
                                goto drop_it;
                        }
 
-                       copy_skb->dev = gp->dev;
                        skb_reserve(copy_skb, 2);
                        skb_put(copy_skb, len);
                        pci_dma_sync_single_for_cpu(gp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
-                       memcpy(copy_skb->data, skb->data, len);
+                       skb_copy_from_linear_data(skb, copy_skb->data, len);
                        pci_dma_sync_single_for_device(gp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
 
                        /* We'll reuse the original ring buffer. */
@@ -1029,10 +1026,8 @@ static int gem_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
        ctrl = 0;
        if (skb->ip_summed == CHECKSUM_PARTIAL) {
-               u64 csum_start_off, csum_stuff_off;
-
-               csum_start_off = (u64) (skb->h.raw - skb->data);
-               csum_stuff_off = csum_start_off + skb->csum_offset;
+               const u64 csum_start_off = skb_transport_offset(skb);
+               const u64 csum_stuff_off = csum_start_off + skb->csum_offset;
 
                ctrl = (TXDCTRL_CENAB |
                        (csum_start_off << 15) |
@@ -2849,7 +2844,7 @@ static int gem_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
        return rc;
 }
 
-#if (!defined(__sparc__) && !defined(CONFIG_PPC_PMAC))
+#if (!defined(CONFIG_SPARC) && !defined(CONFIG_PPC_PMAC))
 /* Fetch MAC address from vital product data of PCI ROM. */
 static int find_eth_addr_in_vpd(void __iomem *rom_base, int len, unsigned char *dev_addr)
 {
@@ -2904,36 +2899,19 @@ static void get_gem_mac_nonobp(struct pci_dev *pdev, unsigned char *dev_addr)
 
 static int __devinit gem_get_device_address(struct gem *gp)
 {
-#if defined(__sparc__) || defined(CONFIG_PPC_PMAC)
+#if defined(CONFIG_SPARC) || defined(CONFIG_PPC_PMAC)
        struct net_device *dev = gp->dev;
-#endif
-
-#if defined(__sparc__)
-       struct pci_dev *pdev = gp->pdev;
-       struct pcidev_cookie *pcp = pdev->sysdata;
-       int use_idprom = 1;
-
-       if (pcp != NULL) {
-               unsigned char *addr;
-               int len;
-
-               addr = of_get_property(pcp->prom_node, "local-mac-address",
-                                      &len);
-               if (addr && len == 6) {
-                       use_idprom = 0;
-                       memcpy(dev->dev_addr, addr, 6);
-               }
-       }
-       if (use_idprom)
-               memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
-#elif defined(CONFIG_PPC_PMAC)
        const unsigned char *addr;
 
        addr = get_property(gp->of_node, "local-mac-address", NULL);
        if (addr == NULL) {
+#ifdef CONFIG_SPARC
+               addr = idprom->id_ethaddr;
+#else
                printk("\n");
                printk(KERN_ERR "%s: can't get mac-address\n", dev->name);
                return -1;
+#endif
        }
        memcpy(dev->dev_addr, addr, 6);
 #else
@@ -3091,7 +3069,7 @@ static int __devinit gem_init_one(struct pci_dev *pdev,
        /* On Apple, we want a reference to the Open Firmware device-tree
         * node. We use it for clock control.
         */
-#ifdef CONFIG_PPC_PMAC
+#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_SPARC)
        gp->of_node = pci_device_to_OF_node(pdev);
 #endif
 
index a70067c..58cf87c 100644 (file)
@@ -1025,7 +1025,7 @@ struct gem {
 
        struct pci_dev          *pdev;
        struct net_device       *dev;
-#ifdef CONFIG_PPC_PMAC
+#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_SPARC)
        struct device_node      *of_node;
 #endif
 };
index ef67173..51c3fe2 100644 (file)
@@ -55,9 +55,6 @@
 
 #ifdef CONFIG_PCI
 #include <linux/pci.h>
-#ifdef CONFIG_SPARC
-#include <asm/pbm.h>
-#endif
 #endif
 
 #include "sunhme.h"
@@ -2058,11 +2055,10 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
                                goto drop_it;
                        }
 
-                       copy_skb->dev = dev;
                        skb_reserve(copy_skb, 2);
                        skb_put(copy_skb, len);
                        hme_dma_sync_for_cpu(hp, dma_addr, len, DMA_FROMDEVICE);
-                       memcpy(copy_skb->data, skb->data, len);
+                       skb_copy_from_linear_data(skb, copy_skb->data, len);
                        hme_dma_sync_for_device(hp, dma_addr, len, DMA_FROMDEVICE);
 
                        /* Reuse original ring buffer. */
@@ -2270,10 +2266,8 @@ static int happy_meal_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
        tx_flags = TXFLAG_OWN;
        if (skb->ip_summed == CHECKSUM_PARTIAL) {
-               u32 csum_start_off, csum_stuff_off;
-
-               csum_start_off = (u32) (skb->h.raw - skb->data);
-               csum_stuff_off = csum_start_off + skb->csum_offset;
+               const u32 csum_start_off = skb_transport_offset(skb);
+               const u32 csum_stuff_off = csum_start_off + skb->csum_offset;
 
                tx_flags = (TXFLAG_OWN | TXFLAG_CSENABLE |
                            ((csum_start_off << 14) & TXFLAG_CSBUFBEGIN) |
@@ -2704,7 +2698,7 @@ static int __devinit happy_meal_sbus_probe_one(struct sbus_dev *sdev, int is_qfe
                        dev->dev_addr[i] = macaddr[i];
                macaddr[5]++;
        } else {
-               unsigned char *addr;
+               const unsigned char *addr;
                int len;
 
                addr = of_get_property(dp, "local-mac-address", &len);
@@ -2986,7 +2980,7 @@ static int __devinit happy_meal_pci_probe(struct pci_dev *pdev,
 {
        struct quattro *qp = NULL;
 #ifdef CONFIG_SPARC
-       struct pcidev_cookie *pcp;
+       struct device_node *dp;
 #endif
        struct happy_meal *hp;
        struct net_device *dev;
@@ -2998,13 +2992,8 @@ static int __devinit happy_meal_pci_probe(struct pci_dev *pdev,
 
        /* Now make sure pci_dev cookie is there. */
 #ifdef CONFIG_SPARC
-       pcp = pdev->sysdata;
-       if (pcp == NULL) {
-               printk(KERN_ERR "happymeal(PCI): Some PCI device info missing\n");
-               return -ENODEV;
-       }
-
-       strcpy(prom_name, pcp->prom_node->name);
+       dp = pci_device_to_OF_node(pdev);
+       strcpy(prom_name, dp->name);
 #else
        if (is_quattro_p(pdev))
                strcpy(prom_name, "SUNW,qfe");
@@ -3081,11 +3070,11 @@ static int __devinit happy_meal_pci_probe(struct pci_dev *pdev,
                macaddr[5]++;
        } else {
 #ifdef CONFIG_SPARC
-               unsigned char *addr;
+               const unsigned char *addr;
                int len;
 
                if (qfe_slot != -1 &&
-                   (addr = of_get_property(pcp->prom_node,
+                   (addr = of_get_property(dp,
                                            "local-mac-address", &len)) != NULL
                    && len == 6) {
                        memcpy(dev->dev_addr, addr, 6);
@@ -3105,7 +3094,7 @@ static int __devinit happy_meal_pci_probe(struct pci_dev *pdev,
        hp->tcvregs    = (hpreg_base + 0x7000UL);
 
 #ifdef CONFIG_SPARC
-       hp->hm_revision = of_getintprop_default(pcp->prom_node, "hm-rev", 0xff);
+       hp->hm_revision = of_getintprop_default(dp, "hm-rev", 0xff);
        if (hp->hm_revision == 0xff) {
                unsigned char prev;
 
@@ -3300,7 +3289,7 @@ static int __devinit hme_sbus_probe(struct of_device *dev, const struct of_devic
 {
        struct sbus_dev *sdev = to_sbus_device(&dev->dev);
        struct device_node *dp = dev->node;
-       char *model = of_get_property(dp, "model", NULL);
+       const char *model = of_get_property(dp, "model", NULL);
        int is_qfe = (match->data != NULL);
 
        if (!is_qfe && model && !strcmp(model, "SUNW,sbus-qfe"))
@@ -3314,7 +3303,7 @@ static int __devexit hme_sbus_remove(struct of_device *dev)
        struct happy_meal *hp = dev_get_drvdata(&dev->dev);
        struct net_device *net_dev = hp->dev;
 
-       unregister_netdevice(net_dev);
+       unregister_netdev(net_dev);
 
        /* XXX qfe parent interrupt... */
 
index 5b00d79..4272253 100644 (file)
@@ -547,7 +547,6 @@ static void lance_rx_dvma(struct net_device *dev)
 
                        lp->stats.rx_bytes += len;
 
-                       skb->dev = dev;
                        skb_reserve(skb, 2);            /* 16 byte align */
                        skb_put(skb, len);              /* make room */
                        eth_copy_and_sum(skb,
@@ -721,7 +720,6 @@ static void lance_rx_pio(struct net_device *dev)
 
                        lp->stats.rx_bytes += len;
 
-                       skb->dev = dev;
                        skb_reserve (skb, 2);           /* 16 byte align */
                        skb_put(skb, len);              /* make room */
                        lance_piocopy_to_skb(skb, &(ib->rx_buf[entry][0]), len);
@@ -1145,7 +1143,7 @@ static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
                struct lance_init_block *ib = lp->init_block_mem;
                ib->btx_ring [entry].length = (-len) | 0xf000;
                ib->btx_ring [entry].misc = 0;
-               memcpy((char *)&ib->tx_buf [entry][0], skb->data, skblen);
+               skb_copy_from_linear_data(skb, &ib->tx_buf [entry][0], skblen);
                if (len != skblen)
                        memset((char *) &ib->tx_buf [entry][skblen], 0, len - skblen);
                ib->btx_ring [entry].tmd1_bits = (LE_T1_POK | LE_T1_OWN);
@@ -1550,7 +1548,7 @@ static int __exit sunlance_sun4_remove(void)
        struct lance_private *lp = dev_get_drvdata(&sun4_sdev.ofdev.dev);
        struct net_device *net_dev = lp->dev;
 
-       unregister_netdevice(net_dev);
+       unregister_netdev(net_dev);
 
        lance_free_hwresources(lp);
 
@@ -1590,7 +1588,7 @@ static int __devexit sunlance_sbus_remove(struct of_device *dev)
        struct lance_private *lp = dev_get_drvdata(&dev->dev);
        struct net_device *net_dev = lp->dev;
 
-       unregister_netdevice(net_dev);
+       unregister_netdev(net_dev);
 
        lance_free_hwresources(lp);
 
index 7874eb1..fa70e0b 100644 (file)
@@ -437,7 +437,6 @@ static void qe_rx(struct sunqe *qep)
                                drops++;
                                qep->net_stats.rx_dropped++;
                        } else {
-                               skb->dev = qep->dev;
                                skb_reserve(skb, 2);
                                skb_put(skb, len);
                                eth_copy_and_sum(skb, (unsigned char *) this_qbuf,
@@ -593,7 +592,7 @@ static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev)
        /* Avoid a race... */
        qep->qe_block->qe_txd[entry].tx_flags = TXD_UPDATE;
 
-       memcpy(txbuf, skb->data, len);
+       skb_copy_from_linear_data(skb, txbuf, len);
 
        qep->qe_block->qe_txd[entry].tx_addr = txbuf_dvma;
        qep->qe_block->qe_txd[entry].tx_flags =
@@ -845,6 +844,8 @@ static int __init qec_ether_init(struct sbus_dev *sdev)
        if (!dev)
                return -ENOMEM;
 
+       memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
+
        qe = netdev_priv(dev);
 
        i = of_getintprop_default(sdev->ofdev.node, "channel#", -1);
@@ -960,7 +961,7 @@ static int __devexit qec_sbus_remove(struct of_device *dev)
        struct sunqe *qp = dev_get_drvdata(&dev->dev);
        struct net_device *net_dev = qp->dev;
 
-       unregister_netdevice(net_dev);
+       unregister_netdev(net_dev);
 
        sbus_iounmap(qp->qcregs, CREG_REG_SIZE);
        sbus_iounmap(qp->mregs, MREGS_REG_SIZE);
index e3a7e3c..d7741e2 100644 (file)
@@ -1145,7 +1145,6 @@ tc35815_rx(struct net_device *dev)
                                break;
                        }
                        skb_reserve(skb, 2);   /* 16 bit alignment */
-                       skb->dev = dev;
 
                        data = skb_put(skb, pkt_len);
 
index 256969e..9488f49 100644 (file)
 #include <linux/dma-mapping.h>
 
 #include <net/checksum.h>
+#include <net/ip.h>
 
 #include <asm/system.h>
 #include <asm/io.h>
 #include <asm/byteorder.h>
 #include <asm/uaccess.h>
 
-#ifdef CONFIG_SPARC64
+#ifdef CONFIG_SPARC
 #include <asm/idprom.h>
-#include <asm/oplib.h>
-#include <asm/pbm.h>
+#include <asm/prom.h>
 #endif
 
 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
@@ -3349,7 +3349,7 @@ static int tg3_rx(struct tg3 *tp, int budget)
                        skb_reserve(copy_skb, 2);
                        skb_put(copy_skb, len);
                        pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
-                       memcpy(copy_skb->data, skb->data, len);
+                       skb_copy_from_linear_data(skb, copy_skb->data, len);
                        pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
 
                        /* We'll reuse the original ring buffer. */
@@ -3908,20 +3908,20 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
                if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
                        mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
                else {
-                       tcp_opt_len = ((skb->h.th->doff - 5) * 4);
-                       ip_tcp_len = (skb->nh.iph->ihl * 4) +
-                                    sizeof(struct tcphdr);
+                       struct iphdr *iph = ip_hdr(skb);
+
+                       tcp_opt_len = tcp_optlen(skb);
+                       ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
 
-                       skb->nh.iph->check = 0;
-                       skb->nh.iph->tot_len = htons(mss + ip_tcp_len +
-                                                    tcp_opt_len);
+                       iph->check = 0;
+                       iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
                        mss |= (ip_tcp_len + tcp_opt_len) << 9;
                }
 
                base_flags |= (TXD_FLAG_CPU_PRE_DMA |
                               TXD_FLAG_CPU_POST_DMA);
 
-               skb->h.th->check = 0;
+               tcp_hdr(skb)->check = 0;
 
        }
        else if (skb->ip_summed == CHECKSUM_PARTIAL)
@@ -4055,6 +4055,7 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
        mss = 0;
        if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
            (mss = skb_shinfo(skb)->gso_size) != 0) {
+               struct iphdr *iph;
                int tcp_opt_len, ip_tcp_len, hdr_len;
 
                if (skb_header_cloned(skb) &&
@@ -4063,8 +4064,8 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
                        goto out_unlock;
                }
 
-               tcp_opt_len = ((skb->h.th->doff - 5) * 4);
-               ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
+               tcp_opt_len = tcp_optlen(skb);
+               ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
 
                hdr_len = ip_tcp_len + tcp_opt_len;
                if (unlikely((ETH_HLEN + hdr_len) > 80) &&
@@ -4074,34 +4075,31 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
                base_flags |= (TXD_FLAG_CPU_PRE_DMA |
                               TXD_FLAG_CPU_POST_DMA);
 
-               skb->nh.iph->check = 0;
-               skb->nh.iph->tot_len = htons(mss + hdr_len);
+               iph = ip_hdr(skb);
+               iph->check = 0;
+               iph->tot_len = htons(mss + hdr_len);
                if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
-                       skb->h.th->check = 0;
+                       tcp_hdr(skb)->check = 0;
                        base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
-               }
-               else {
-                       skb->h.th->check =
-                               ~csum_tcpudp_magic(skb->nh.iph->saddr,
-                                                  skb->nh.iph->daddr,
-                                                  0, IPPROTO_TCP, 0);
-               }
+               } else
+                       tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
+                                                                iph->daddr, 0,
+                                                                IPPROTO_TCP,
+                                                                0);
 
                if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
                    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
-                       if (tcp_opt_len || skb->nh.iph->ihl > 5) {
+                       if (tcp_opt_len || iph->ihl > 5) {
                                int tsflags;
 
-                               tsflags = ((skb->nh.iph->ihl - 5) +
-                                          (tcp_opt_len >> 2));
+                               tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
                                mss |= (tsflags << 11);
                        }
                } else {
-                       if (tcp_opt_len || skb->nh.iph->ihl > 5) {
+                       if (tcp_opt_len || iph->ihl > 5) {
                                int tsflags;
 
-                               tsflags = ((skb->nh.iph->ihl - 5) +
-                                          (tcp_opt_len >> 2));
+                               tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
                                base_flags |= tsflags << 12;
                        }
                }
@@ -10988,24 +10986,20 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
        return err;
 }
 
-#ifdef CONFIG_SPARC64
+#ifdef CONFIG_SPARC
 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
 {
        struct net_device *dev = tp->dev;
        struct pci_dev *pdev = tp->pdev;
-       struct pcidev_cookie *pcp = pdev->sysdata;
-
-       if (pcp != NULL) {
-               unsigned char *addr;
-               int len;
-
-               addr = of_get_property(pcp->prom_node, "local-mac-address",
-                                       &len);
-               if (addr && len == 6) {
-                       memcpy(dev->dev_addr, addr, 6);
-                       memcpy(dev->perm_addr, dev->dev_addr, 6);
-                       return 0;
-               }
+       struct device_node *dp = pci_device_to_OF_node(pdev);
+       const unsigned char *addr;
+       int len;
+
+       addr = of_get_property(dp, "local-mac-address", &len);
+       if (addr && len == 6) {
+               memcpy(dev->dev_addr, addr, 6);
+               memcpy(dev->perm_addr, dev->dev_addr, 6);
+               return 0;
        }
        return -ENODEV;
 }
@@ -11026,7 +11020,7 @@ static int __devinit tg3_get_device_address(struct tg3 *tp)
        u32 hi, lo, mac_offset;
        int addr_ok = 0;
 
-#ifdef CONFIG_SPARC64
+#ifdef CONFIG_SPARC
        if (!tg3_get_macaddr_sparc(tp))
                return 0;
 #endif
index f85f002..106dc1e 100644 (file)
@@ -1112,7 +1112,7 @@ static int TLan_StartTx( struct sk_buff *skb, struct net_device *dev )
 
        if ( bbuf ) {
                tail_buffer = priv->txBuffer + ( priv->txTail * TLAN_MAX_FRAME_SIZE );
-               memcpy( tail_buffer, skb->data, skb->len );
+               skb_copy_from_linear_data(skb, tail_buffer, skb->len);
        } else {
                tail_list->buffer[0].address = pci_map_single(priv->pciDev, skb->data, skb->len, PCI_DMA_TODEVICE);
                TLan_StoreSKB(tail_list, skb);
@@ -1577,7 +1577,6 @@ u32 TLan_HandleRxEOF( struct net_device *dev, u16 host_int )
                                printk(KERN_INFO "TLAN: Couldn't allocate memory for received data.\n");
                        else {
                                head_buffer = priv->rxBuffer + (priv->rxHead * TLAN_MAX_FRAME_SIZE);
-                               skb->dev = dev;
                                skb_reserve(skb, 2);
                                t = (void *) skb_put(skb, frameSize);
 
@@ -1608,7 +1607,6 @@ u32 TLan_HandleRxEOF( struct net_device *dev, u16 host_int )
                                skb->protocol = eth_type_trans( skb, dev );
                                netif_rx( skb );
 
-                               new_skb->dev = dev;
                                skb_reserve( new_skb, 2 );
                                t = (void *) skb_put( new_skb, TLAN_MAX_FRAME_SIZE );
                                head_list->buffer[0].address = pci_map_single(priv->pciDev, new_skb->data, TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE);
index 7580bde..e22a3f5 100644 (file)
@@ -933,20 +933,21 @@ static void xl_rx(struct net_device *dev)
                                return ;                                
                        }
        
-                       skb->dev = dev ; 
-
                        while (xl_priv->rx_ring_tail != temp_ring_loc) { 
                                copy_len = xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfraglen & 0x7FFF ; 
                                frame_length -= copy_len ;  
                                pci_dma_sync_single_for_cpu(xl_priv->pdev,xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr,xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
-                               memcpy(skb_put(skb,copy_len), xl_priv->rx_ring_skb[xl_priv->rx_ring_tail]->data, copy_len) ; 
+                               skb_copy_from_linear_data(xl_priv->rx_ring_skb[xl_priv->rx_ring_tail],
+                                                         skb_put(skb, copy_len),
+                                                         copy_len);
                                pci_dma_sync_single_for_device(xl_priv->pdev,xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr,xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
                                adv_rx_ring(dev) ; 
                        } 
 
                        /* Now we have found the last fragment */
                        pci_dma_sync_single_for_cpu(xl_priv->pdev,xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr,xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
-                       memcpy(skb_put(skb,copy_len), xl_priv->rx_ring_skb[xl_priv->rx_ring_tail]->data, frame_length) ; 
+                       skb_copy_from_linear_data(xl_priv->rx_ring_skb[xl_priv->rx_ring_tail],
+                                     skb_put(skb,copy_len), frame_length);
 /*                     memcpy(skb_put(skb,frame_length), bus_to_virt(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr), frame_length) ; */
                        pci_dma_sync_single_for_device(xl_priv->pdev,xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr,xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
                        adv_rx_ring(dev) ; 
@@ -967,8 +968,6 @@ static void xl_rx(struct net_device *dev)
                                return ; 
                        }
 
-                       skb->dev = dev ; 
-
                        skb2 = xl_priv->rx_ring_skb[xl_priv->rx_ring_tail] ; 
                        pci_unmap_single(xl_priv->pdev, xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr, xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; 
                        skb_put(skb2, frame_length) ; 
index 01d5531..1e8958e 100644 (file)
@@ -1771,7 +1771,6 @@ static void tr_rx(struct net_device *dev)
        /*BMS again, if she comes in with few but leaves with many */
        skb_reserve(skb, sizeof(struct trh_hdr) - lan_hdr_len);
        skb_put(skb, length);
-       skb->dev = dev;
        data = skb->data;
        rbuffer_len = ntohs(readw(rbuf + offsetof(struct rec_buf, buf_len)));
        rbufdata = rbuf + offsetof(struct rec_buf, data);
index e999feb..5d849c0 100644 (file)
@@ -944,8 +944,6 @@ static void streamer_rx(struct net_device *dev)
                                printk(KERN_WARNING "%s: Not enough memory to copy packet to upper layers. \n", dev->name);
                                streamer_priv->streamer_stats.rx_dropped++;
                        } else {        /* we allocated an skb OK */
-                               skb->dev = dev;
-
                                if (buffer_cnt == 1) {
                                        /* release the DMA mapping */
                                        pci_unmap_single(streamer_priv->pci_dev, 
@@ -1607,10 +1605,11 @@ static void streamer_arb_cmd(struct net_device *dev)
                                      frame_data, buffer_len);
                } while (next_ptr && (buff_off = next_ptr));
 
+               mac_frame->protocol = tr_type_trans(mac_frame, dev);
 #if STREAMER_NETWORK_MONITOR
                printk(KERN_WARNING "%s: Received MAC Frame, details: \n",
                       dev->name);
-               mac_hdr = (struct trh_hdr *) mac_frame->data;
+               mac_hdr = tr_hdr(mac_frame);
                printk(KERN_WARNING
                       "%s: MAC Frame Dest. Addr: %02x:%02x:%02x:%02x:%02x:%02x \n",
                       dev->name, mac_hdr->daddr[0], mac_hdr->daddr[1],
@@ -1622,8 +1621,6 @@ static void streamer_arb_cmd(struct net_device *dev)
                       mac_hdr->saddr[2], mac_hdr->saddr[3],
                       mac_hdr->saddr[4], mac_hdr->saddr[5]);
 #endif
-               mac_frame->dev = dev;
-               mac_frame->protocol = tr_type_trans(mac_frame, dev);
                netif_rx(mac_frame);
 
                /* Now tell the card we have dealt with the received frame */
index 8f4ecc1..09b3cfb 100644 (file)
@@ -814,8 +814,6 @@ static void olympic_rx(struct net_device *dev)
                                        olympic_priv->rx_ring_last_received += i ; 
                                        olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1) ;  
                                } else  {
-                                       skb->dev = dev ; 
-
                                        /* Optimise based upon number of buffers used. 
                                           If only one buffer is used we can simply swap the buffers around.
                                           If more than one then we must use the new buffer and copy the information
@@ -847,7 +845,9 @@ static void olympic_rx(struct net_device *dev)
                                                        pci_dma_sync_single_for_cpu(olympic_priv->pdev,
                                                                le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
                                                                olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; 
-                                                       memcpy(skb_put(skb,length-4),olympic_priv->rx_ring_skb[rx_ring_last_received]->data,length-4) ; 
+                                                       skb_copy_from_linear_data(olympic_priv->rx_ring_skb[rx_ring_last_received],
+                                                                     skb_put(skb,length - 4),
+                                                                     length - 4);
                                                        pci_dma_sync_single_for_device(olympic_priv->pdev,
                                                                le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
                                                                olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
@@ -864,7 +864,9 @@ static void olympic_rx(struct net_device *dev)
                                                                olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; 
                                                        rx_desc = &(olympic_priv->olympic_rx_ring[rx_ring_last_received]);
                                                        cpy_length = (i == 1 ? frag_len : le32_to_cpu(rx_desc->res_length)); 
-                                                       memcpy(skb_put(skb, cpy_length), olympic_priv->rx_ring_skb[rx_ring_last_received]->data, cpy_length) ;
+                                                       skb_copy_from_linear_data(olympic_priv->rx_ring_skb[rx_ring_last_received],
+                                                                     skb_put(skb, cpy_length),
+                                                                     cpy_length);
                                                        pci_dma_sync_single_for_device(olympic_priv->pdev,
                                                                le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
                                                                olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
@@ -1440,16 +1442,16 @@ static void olympic_arb_cmd(struct net_device *dev)
                        next_ptr=readw(buf_ptr+offsetof(struct mac_receive_buffer,next)); 
                } while (next_ptr && (buf_ptr=olympic_priv->olympic_lap + ntohs(next_ptr)));
 
+               mac_frame->protocol = tr_type_trans(mac_frame, dev);
+
                if (olympic_priv->olympic_network_monitor) { 
                        struct trh_hdr *mac_hdr ; 
                        printk(KERN_WARNING "%s: Received MAC Frame, details: \n",dev->name) ;
-                       mac_hdr = (struct trh_hdr *)mac_frame->data ; 
+                       mac_hdr = tr_hdr(mac_frame);
                        printk(KERN_WARNING "%s: MAC Frame Dest. Addr: %02x:%02x:%02x:%02x:%02x:%02x \n", dev->name , mac_hdr->daddr[0], mac_hdr->daddr[1], mac_hdr->daddr[2], mac_hdr->daddr[3], mac_hdr->daddr[4], mac_hdr->daddr[5]) ; 
                        printk(KERN_WARNING "%s: MAC Frame Srce. Addr: %02x:%02x:%02x:%02x:%02x:%02x \n", dev->name , mac_hdr->saddr[0], mac_hdr->saddr[1], mac_hdr->saddr[2], mac_hdr->saddr[3], mac_hdr->saddr[4], mac_hdr->saddr[5]) ; 
                }
-               mac_frame->dev = dev ; 
-               mac_frame->protocol = tr_type_trans(mac_frame,dev);
-               netif_rx(mac_frame) ;   
+               netif_rx(mac_frame);
                dev->last_rx = jiffies;
 
 drop_frame:
index cec282a..9bbea5c 100644 (file)
@@ -3889,14 +3889,13 @@ static int smctr_process_rx_packet(MAC_HEADER *rmf, __u16 size,
 
                 /* Slide data into a sleek skb. */
                 skb_put(skb, skb->len);
-                memcpy(skb->data, rmf, skb->len);
+                skb_copy_to_linear_data(skb, rmf, skb->len);
 
                 /* Update Counters */
                 tp->MacStat.rx_packets++;
                 tp->MacStat.rx_bytes += skb->len;
 
                 /* Kick the packet on up. */
-                skb->dev = dev;
                 skb->protocol = tr_type_trans(skb, dev);
                 netif_rx(skb);
                dev->last_rx = jiffies;
@@ -4476,14 +4475,13 @@ static int smctr_rx_frame(struct net_device *dev)
                                if (skb) {
                                        skb_put(skb, rx_size);
 
-                                       memcpy(skb->data, pbuff, rx_size);
+                                       skb_copy_to_linear_data(skb, pbuff, rx_size);
 
                                        /* Update Counters */
                                        tp->MacStat.rx_packets++;
                                        tp->MacStat.rx_bytes += skb->len;
 
                                        /* Kick the packet on up. */
-                                       skb->dev = dev;
                                        skb->protocol = tr_type_trans(skb, dev);
                                        netif_rx(skb);
                                        dev->last_rx = jiffies;
index ea797ca..12bd294 100644 (file)
@@ -644,7 +644,7 @@ static int tms380tr_hardware_send_packet(struct sk_buff *skb, struct net_device
                dmabuf  = 0;
                i       = tp->TplFree->TPLIndex;
                buf     = tp->LocalTxBuffers[i];
-               memcpy(buf, skb->data, length);
+               skb_copy_from_linear_data(skb, buf, length);
                newbuf  = ((char *)buf - (char *)tp) + tp->dmabuffer;
        }
        else {
@@ -2168,7 +2168,6 @@ static void tms380tr_rcv_status_irq(struct net_device *dev)
                                }
                                else
                                {
-                                       skb->dev        = dev;
                                        skb_put(skb, tp->MaxPacketSize);
                                        rpl->SkbStat    = SKB_DATA_COPY;
                                        ReceiveDataPtr  = rpl->MData;
@@ -2179,7 +2178,8 @@ static void tms380tr_rcv_status_irq(struct net_device *dev)
                                || rpl->SkbStat == SKB_DMA_DIRECT))
                        {
                                if(rpl->SkbStat == SKB_DATA_COPY)
-                                       memcpy(skb->data, ReceiveDataPtr, Length);
+                                       skb_copy_to_linear_data(skb, ReceiveDataPtr,
+                                                      Length);
 
                                /* Deliver frame to system */
                                rpl->Skb = NULL;
index d92c5c5..0bfc2c9 100644 (file)
@@ -788,7 +788,6 @@ static int tsi108_complete_rx(struct net_device *dev, int budget)
                        printk(".\n");
                }
 
-               skb->dev = dev;
                skb_put(skb, data->rxring[rx].len);
                skb->protocol = eth_type_trans(skb, dev);
                netif_receive_skb(skb);
index c82befa..8617298 100644 (file)
@@ -63,7 +63,7 @@ MODULE_PARM_DESC (debug, "de2104x bitmapped message enable number");
 
 /* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */
 #if defined(__alpha__) || defined(__arm__) || defined(__hppa__) \
-        || defined(__sparc__) || defined(__ia64__) \
+        || defined(CONFIG_SPARC) || defined(__ia64__) \
         || defined(__sh__) || defined(__mips__)
 static int rx_copybreak = 1518;
 #else
@@ -435,7 +435,6 @@ static void de_rx (struct de_private *de)
                        rx_work = 100;
                        goto rx_next;
                }
-               copy_skb->dev = de->dev;
 
                if (!copying_skb) {
                        pci_unmap_single(de->pdev, mapping,
@@ -450,8 +449,8 @@ static void de_rx (struct de_private *de)
                } else {
                        pci_dma_sync_single_for_cpu(de->pdev, mapping, len, PCI_DMA_FROMDEVICE);
                        skb_reserve(copy_skb, RX_OFFSET);
-                       memcpy(skb_put(copy_skb, len), skb->data, len);
-
+                       skb_copy_from_linear_data(skb, skb_put(copy_skb, len),
+                                                 len);
                        pci_dma_sync_single_for_device(de->pdev, mapping, len, PCI_DMA_FROMDEVICE);
 
                        /* We'll reuse the original ring buffer. */
index 4b3cd3d..62143f9 100644 (file)
@@ -1160,7 +1160,7 @@ de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev)
        sprintf(lp->adapter_name,"%s (%s)", name, gendev->bus_id);
 
        lp->dma_size = (NUM_RX_DESC + NUM_TX_DESC) * sizeof(struct de4x5_desc);
-#if defined(__alpha__) || defined(__powerpc__) || defined(__sparc_v9__) || defined(DE4X5_DO_MEMCPY)
+#if defined(__alpha__) || defined(__powerpc__) || defined(CONFIG_SPARC) || defined(DE4X5_DO_MEMCPY)
        lp->dma_size += RX_BUFF_SZ * NUM_RX_DESC + DE4X5_ALIGN;
 #endif
        lp->rx_ring = dma_alloc_coherent(gendev, lp->dma_size,
@@ -1175,7 +1175,7 @@ de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev)
        ** Set up the RX descriptor ring (Intels)
        ** Allocate contiguous receive buffers, long word aligned (Alphas)
        */
-#if !defined(__alpha__) && !defined(__powerpc__) && !defined(__sparc_v9__) && !defined(DE4X5_DO_MEMCPY)
+#if !defined(__alpha__) && !defined(__powerpc__) && !defined(CONFIG_SPARC) && !defined(DE4X5_DO_MEMCPY)
        for (i=0; i<NUM_RX_DESC; i++) {
            lp->rx_ring[i].status = 0;
            lp->rx_ring[i].des1 = cpu_to_le32(RX_BUFF_SZ);
@@ -1252,11 +1252,7 @@ de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev)
            mii_get_phy(dev);
        }
 
-#ifndef __sparc_v9__
        printk("      and requires IRQ%d (provided by %s).\n", dev->irq,
-#else
-       printk("      and requires IRQ%x (provided by %s).\n", dev->irq,
-#endif
               ((lp->bus == PCI) ? "PCI BIOS" : "EISA CNFG"));
     }
 
@@ -3627,14 +3623,13 @@ de4x5_alloc_rx_buff(struct net_device *dev, int index, int len)
     struct de4x5_private *lp = netdev_priv(dev);
     struct sk_buff *p;
 
-#if !defined(__alpha__) && !defined(__powerpc__) && !defined(__sparc_v9__) && !defined(DE4X5_DO_MEMCPY)
+#if !defined(__alpha__) && !defined(__powerpc__) && !defined(CONFIG_SPARC) && !defined(DE4X5_DO_MEMCPY)
     struct sk_buff *ret;
     u_long i=0, tmp;
 
     p = dev_alloc_skb(IEEE802_3_SZ + DE4X5_ALIGN + 2);
     if (!p) return NULL;
 
-    p->dev = dev;
     tmp = virt_to_bus(p->data);
     i = ((tmp + DE4X5_ALIGN) & ~DE4X5_ALIGN) - tmp;
     skb_reserve(p, i);
@@ -3655,7 +3650,6 @@ de4x5_alloc_rx_buff(struct net_device *dev, int index, int len)
     p = dev_alloc_skb(len + 2);
     if (!p) return NULL;
 
-    p->dev = dev;
     skb_reserve(p, 2);                                /* Align */
     if (index < lp->rx_old) {                          /* Wrapped buffer */
        short tlen = (lp->rxRingSize - lp->rx_old) * RX_BUFF_SZ;
index 9aeac76..b3a64ca 100644 (file)
@@ -682,7 +682,7 @@ static int dmfe_start_xmit(struct sk_buff *skb, struct DEVICE *dev)
 
        /* transmit this packet */
        txptr = db->tx_insert_ptr;
-       memcpy(txptr->tx_buf_ptr, skb->data, skb->len);
+       skb_copy_from_linear_data(skb, txptr->tx_buf_ptr, skb->len);
        txptr->tdes1 = cpu_to_le32(0xe1000000 | skb->len);
 
        /* Point to next transmit free descriptor */
@@ -988,14 +988,14 @@ static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db)
 
                                                skb = newskb;
                                                /* size less than COPY_SIZE, allocate a rxlen SKB */
-                                               skb->dev = dev;
                                                skb_reserve(skb, 2); /* 16byte align */
-                                               memcpy(skb_put(skb, rxlen), rxptr->rx_skb_ptr->data, rxlen);
+                                               skb_copy_from_linear_data(rxptr->rx_skb_ptr,
+                                                         skb_put(skb, rxlen),
+                                                                         rxlen);
                                                dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
-                                       } else {
-                                               skb->dev = dev;
+                                       } else
                                                skb_put(skb, rxlen);
-                                       }
+
                                        skb->protocol = eth_type_trans(skb, dev);
                                        netif_rx(skb);
                                        dev->last_rx = jiffies;
index e3488d7..e86df07 100644 (file)
@@ -192,7 +192,6 @@ int tulip_poll(struct net_device *dev, int *budget)
                                   to a minimally-sized skbuff. */
                                if (pkt_len < tulip_rx_copybreak
                                    && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
-                                       skb->dev = dev;
                                        skb_reserve(skb, 2);    /* 16 byte align the IP header */
                                        pci_dma_sync_single_for_cpu(tp->pdev,
                                                                   tp->rx_buffers[entry].mapping,
@@ -416,7 +415,6 @@ static int tulip_rx(struct net_device *dev)
                           to a minimally-sized skbuff. */
                        if (pkt_len < tulip_rx_copybreak
                                && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
-                               skb->dev = dev;
                                skb_reserve(skb, 2);    /* 16 byte align the IP header */
                                pci_dma_sync_single_for_cpu(tp->pdev,
                                                            tp->rx_buffers[entry].mapping,
index e3774a5..e9bf526 100644 (file)
@@ -36,8 +36,8 @@
 #include <asm/unaligned.h>
 #include <asm/uaccess.h>
 
-#ifdef __sparc__
-#include <asm/pbm.h>
+#ifdef CONFIG_SPARC
+#include <asm/prom.h>
 #endif
 
 static char version[] __devinitdata =
@@ -67,7 +67,7 @@ const char * const medianame[32] = {
 
 /* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */
 #if defined(__alpha__) || defined(__arm__) || defined(__hppa__) \
-       || defined(__sparc__) || defined(__ia64__) \
+       || defined(CONFIG_SPARC) || defined(__ia64__) \
        || defined(__sh__) || defined(__mips__)
 static int rx_copybreak = 1518;
 #else
@@ -91,7 +91,7 @@ static int rx_copybreak = 100;
 static int csr0 = 0x01A00000 | 0xE000;
 #elif defined(__i386__) || defined(__powerpc__) || defined(__x86_64__)
 static int csr0 = 0x01A00000 | 0x8000;
-#elif defined(__sparc__) || defined(__hppa__)
+#elif defined(CONFIG_SPARC) || defined(__hppa__)
 /* The UltraSparc PCI controllers will disconnect at every 64-byte
  * crossing anyways so it makes no sense to tell Tulip to burst
  * any more than that.
@@ -1315,7 +1315,7 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
        /* DM9102A has troubles with MRM & clear reserved bits 24:22, 20, 16, 7:1 */
        if (tulip_uli_dm_quirk(pdev)) {
                csr0 &= ~0x01f100ff;
-#if defined(__sparc__)
+#if defined(CONFIG_SPARC)
                 csr0 = (csr0 & ~0xff00) | 0xe000;
 #endif
        }
@@ -1535,23 +1535,19 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
           Many PCI BIOSes also incorrectly report the IRQ line, so we correct
           that here as well. */
        if (sum == 0  || sum == 6*0xff) {
-#if defined(__sparc__)
-               struct pcidev_cookie *pcp = pdev->sysdata;
+#if defined(CONFIG_SPARC)
+               struct device_node *dp = pci_device_to_OF_node(pdev);
+               const unsigned char *addr;
+               int len;
 #endif
                eeprom_missing = 1;
                for (i = 0; i < 5; i++)
                        dev->dev_addr[i] = last_phys_addr[i];
                dev->dev_addr[i] = last_phys_addr[i] + 1;
-#if defined(__sparc__)
-               if (pcp) {
-                       unsigned char *addr;
-                       int len;
-
-                       addr = of_get_property(pcp->prom_node,
-                                              "local-mac-address", &len);
-                       if (addr && len == 6)
-                               memcpy(dev->dev_addr, addr, 6);
-               }
+#if defined(CONFIG_SPARC)
+               addr = of_get_property(dp, "local-mac-address", &len);
+               if (addr && len == 6)
+                       memcpy(dev->dev_addr, addr, 6);
 #endif
 #if defined(__i386__) || defined(__x86_64__)   /* Patch up x86 BIOS bug. */
                if (last_irq)
index 229158e..ca2548e 100644 (file)
@@ -583,7 +583,7 @@ static int uli526x_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
        /* transmit this packet */
        txptr = db->tx_insert_ptr;
-       memcpy(txptr->tx_buf_ptr, skb->data, skb->len);
+       skb_copy_from_linear_data(skb, txptr->tx_buf_ptr, skb->len);
        txptr->tdes1 = cpu_to_le32(0xe1000000 | skb->len);
 
        /* Point to next transmit free descriptor */
@@ -828,14 +828,14 @@ static void uli526x_rx_packet(struct net_device *dev, struct uli526x_board_info
                                        ( (skb = dev_alloc_skb(rxlen + 2) )
                                        != NULL) ) {
                                        /* size less than COPY_SIZE, allocate a rxlen SKB */
-                                       skb->dev = dev;
                                        skb_reserve(skb, 2); /* 16byte align */
-                                       memcpy(skb_put(skb, rxlen), rxptr->rx_skb_ptr->tail, rxlen);
+                                       memcpy(skb_put(skb, rxlen),
+                                              skb_tail_pointer(rxptr->rx_skb_ptr),
+                                              rxlen);
                                        uli526x_reuse_skb(db, rxptr->rx_skb_ptr);
-                               } else {
-                                       skb->dev = dev;
+                               } else
                                        skb_put(skb, rxlen);
-                               }
+
                                skb->protocol = eth_type_trans(skb, dev);
                                netif_rx(skb);
                                dev->last_rx = jiffies;
@@ -1177,7 +1177,10 @@ static void uli526x_reuse_skb(struct uli526x_board_info *db, struct sk_buff * sk
 
        if (!(rxptr->rdes0 & cpu_to_le32(0x80000000))) {
                rxptr->rx_skb_ptr = skb;
-               rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->tail, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
+               rxptr->rdes2 = cpu_to_le32(pci_map_single(db->pdev,
+                                                         skb_tail_pointer(skb),
+                                                         RX_ALLOC_SIZE,
+                                                         PCI_DMA_FROMDEVICE));
                wmb();
                rxptr->rdes0 = cpu_to_le32(0x80000000);
                db->rx_avail_cnt++;
@@ -1341,7 +1344,10 @@ static void allocate_rx_buffer(struct uli526x_board_info *db)
                if ( ( skb = dev_alloc_skb(RX_ALLOC_SIZE) ) == NULL )
                        break;
                rxptr->rx_skb_ptr = skb; /* FIXME (?) */
-               rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->tail, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
+               rxptr->rdes2 = cpu_to_le32(pci_map_single(db->pdev,
+                                                         skb_tail_pointer(skb),
+                                                         RX_ALLOC_SIZE,
+                                                         PCI_DMA_FROMDEVICE));
                wmb();
                rxptr->rdes0 = cpu_to_le32(0x80000000);
                rxptr = rxptr->next_rx_desc;
index 002a05e..5b71ac7 100644 (file)
@@ -813,7 +813,6 @@ static void init_rxtx_rings(struct net_device *dev)
                np->rx_skbuff[i] = skb;
                if (skb == NULL)
                        break;
-               skb->dev = dev;                 /* Mark as being used by this device. */
                np->rx_addr[i] = pci_map_single(np->pci_dev,skb->data,
                                        np->rx_buf_sz,PCI_DMA_FROMDEVICE);
 
@@ -903,7 +902,7 @@ static void init_registers(struct net_device *dev)
        }
 #elif defined(__powerpc__) || defined(__i386__) || defined(__alpha__) || defined(__ia64__) || defined(__x86_64__)
        i |= 0xE000;
-#elif defined(__sparc__) || defined (CONFIG_PARISC)
+#elif defined(CONFIG_SPARC) || defined (CONFIG_PARISC)
        i |= 0x4800;
 #else
 #warning Processor architecture undefined
@@ -1229,7 +1228,6 @@ static int netdev_rx(struct net_device *dev)
                           to a minimally-sized skbuff. */
                        if (pkt_len < rx_copybreak
                                && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
-                               skb->dev = dev;
                                skb_reserve(skb, 2);    /* 16 byte align the IP header */
                                pci_dma_sync_single_for_cpu(np->pci_dev,np->rx_addr[entry],
                                                            np->rx_skbuff[entry]->len,
@@ -1278,7 +1276,6 @@ static int netdev_rx(struct net_device *dev)
                        np->rx_skbuff[entry] = skb;
                        if (skb == NULL)
                                break;                  /* Better luck next round. */
-                       skb->dev = dev;                 /* Mark as being used by this device. */
                        np->rx_addr[entry] = pci_map_single(np->pci_dev,
                                                        skb->data,
                                                        np->rx_buf_sz, PCI_DMA_FROMDEVICE);
index 61d3130..985a181 100644 (file)
@@ -411,9 +411,9 @@ static int xircom_start_xmit(struct sk_buff *skb, struct net_device *dev)
                           sometimes sends more than you ask it to. */
 
                        memset(&card->tx_buffer[bufferoffsets[desc]/4],0,1536);
-                       memcpy(&(card->tx_buffer[bufferoffsets[desc]/4]),skb->data,skb->len);
-
-
+                       skb_copy_from_linear_data(skb,
+                                 &(card->tx_buffer[bufferoffsets[desc] / 4]),
+                                                 skb->len);
                        /* FIXME: The specification tells us that the length we send HAS to be a multiple of
                           4 bytes. */
 
@@ -1207,7 +1207,6 @@ static void investigate_read_descriptor(struct net_device *dev,struct xircom_pri
                                card->stats.rx_dropped++;
                                goto out;
                        }
-                       skb->dev = dev;
                        skb_reserve(skb, 2);
                        eth_copy_and_sum(skb, (unsigned char*)&card->rx_buffer[bufferoffset / 4], pkt_len, 0);
                        skb_put(skb, pkt_len);
index a998c5d..f641729 100644 (file)
@@ -65,7 +65,7 @@ static int rx_copybreak = 100;
 static int csr0 = 0x01A00000 | 0xE000;
 #elif defined(__powerpc__)
 static int csr0 = 0x01B00000 | 0x8000;
-#elif defined(__sparc__)
+#elif defined(CONFIG_SPARC)
 static int csr0 = 0x01B00080 | 0x8000;
 #elif defined(__i386__)
 static int csr0 = 0x01A00000 | 0x8000;
@@ -915,7 +915,9 @@ xircom_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
        tp->tx_skbuff[entry] = skb;
        if (tp->chip_id == X3201_3) {
-               memcpy(tp->tx_aligned_skbuff[entry]->data,skb->data,skb->len);
+               skb_copy_from_linear_data(skb,
+                                         tp->tx_aligned_skbuff[entry]->data,
+                                         skb->len);
                tp->tx_ring[entry].buffer1 = virt_to_bus(tp->tx_aligned_skbuff[entry]->data);
        } else
                tp->tx_ring[entry].buffer1 = virt_to_bus(skb->data);
@@ -1238,7 +1240,6 @@ xircom_rx(struct net_device *dev)
                           to a minimally-sized skbuff. */
                        if (pkt_len < rx_copybreak
                                && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
-                               skb->dev = dev;
                                skb_reserve(skb, 2);    /* 16 byte align the IP header */
 #if ! defined(__alpha__)
                                eth_copy_and_sum(skb, bus_to_virt(tp->rx_ring[entry].buffer1),
index 5643d1e..a2c6caa 100644 (file)
 /*
  *  Changes:
  *
+ *  Brian Braunstein <linuxkernel@bristyle.com> 2007/03/23
+ *    Fixed hw address handling.  Now net_device.dev_addr is kept consistent
+ *    with tun.dev_addr when the address is set by this module.
+ *
  *  Mike Kershaw <dragorn@kismetwireless.net> 2005/08/14
  *    Add TUNSETLINK ioctl to set the link encapsulation
  *
@@ -196,7 +200,10 @@ static void tun_net_init(struct net_device *dev)
                dev->set_multicast_list = tun_net_mclist;
 
                ether_setup(dev);
-               random_ether_addr(dev->dev_addr);
+
+               /* random address already created for us by tun_set_iff, use it */
+               memcpy(dev->dev_addr, tun->dev_addr, min(sizeof(tun->dev_addr), sizeof(dev->dev_addr)) );
+
                dev->tx_queue_len = TUN_READQ_SIZE;  /* We prefer our own queue length */
                break;
        }
@@ -254,11 +261,11 @@ static __inline__ ssize_t tun_get_user(struct tun_struct *tun, struct iovec *iv,
                return -EFAULT;
        }
 
-       skb->dev = tun->dev;
        switch (tun->flags & TUN_TYPE_MASK) {
        case TUN_TUN_DEV:
-               skb->mac.raw = skb->data;
+               skb_reset_mac_header(skb);
                skb->protocol = pi.proto;
+               skb->dev = tun->dev;
                break;
        case TUN_TAP_DEV:
                skb->protocol = eth_type_trans(skb, tun->dev);
@@ -386,8 +393,8 @@ static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv,
                 *   - we are multicast promiscous.
                 *   - we belong to the multicast group.
                 */
-               memcpy(addr, skb->data,
-                      min_t(size_t, sizeof addr, skb->len));
+               skb_copy_from_linear_data(skb, addr, min_t(size_t, sizeof addr,
+                                                                  skb->len));
                bit_nr = ether_crc(sizeof addr, addr) >> 26;
                if ((tun->if_flags & IFF_PROMISC) ||
                                memcmp(addr, tun->dev_addr, sizeof addr) == 0 ||
@@ -636,6 +643,7 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file,
                return 0;
 
        case SIOCGIFHWADDR:
+               /* Note: the actual net device's address may be different */
                memcpy(ifr.ifr_hwaddr.sa_data, tun->dev_addr,
                                min(sizeof ifr.ifr_hwaddr.sa_data, sizeof tun->dev_addr));
                if (copy_to_user( argp, &ifr, sizeof ifr))
@@ -643,16 +651,24 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file,
                return 0;
 
        case SIOCSIFHWADDR:
-               /** Set the character device's hardware address. This is used when
-                * filtering packets being sent from the network device to the character
-                * device. */
-               memcpy(tun->dev_addr, ifr.ifr_hwaddr.sa_data,
-                               min(sizeof ifr.ifr_hwaddr.sa_data, sizeof tun->dev_addr));
-               DBG(KERN_DEBUG "%s: set hardware address: %x:%x:%x:%x:%x:%x\n",
-                               tun->dev->name,
-                               tun->dev_addr[0], tun->dev_addr[1], tun->dev_addr[2],
-                               tun->dev_addr[3], tun->dev_addr[4], tun->dev_addr[5]);
-               return 0;
+       {
+               /* try to set the actual net device's hw address */
+               int ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr);
+
+               if (ret == 0) {
+                       /** Set the character device's hardware address. This is used when
+                        * filtering packets being sent from the network device to the character
+                        * device. */
+                       memcpy(tun->dev_addr, ifr.ifr_hwaddr.sa_data,
+                                       min(sizeof ifr.ifr_hwaddr.sa_data, sizeof tun->dev_addr));
+                       DBG(KERN_DEBUG "%s: set hardware address: %x:%x:%x:%x:%x:%x\n",
+                                       tun->dev->name,
+                                       tun->dev_addr[0], tun->dev_addr[1], tun->dev_addr[2],
+                                       tun->dev_addr[3], tun->dev_addr[4], tun->dev_addr[5]);
+               }
+
+               return  ret;
+       }
 
        case SIOCADDMULTI:
                /** Add the specified group to the character device's multicast filter
index 0d91d09..f2dd776 100644 (file)
@@ -1708,7 +1708,6 @@ typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile u32 * ready,
 
                if(pkt_len < rx_copybreak &&
                   (new_skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
-                       new_skb->dev = tp->dev;
                        skb_reserve(new_skb, 2);
                        pci_dma_sync_single_for_cpu(tp->pdev, dma_addr,
                                                    PKT_BUF_SZ,
index f3a972e..adea290 100644 (file)
@@ -1486,7 +1486,6 @@ static int rhine_rx(struct net_device *dev, int limit)
                           copying to a minimally-sized skbuff. */
                        if (pkt_len < rx_copybreak &&
                                (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
-                               skb->dev = dev;
                                skb_reserve(skb, 2);    /* 16 byte align the IP header */
                                pci_dma_sync_single_for_cpu(rp->pdev,
                                                            rp->rx_skbuff_dma[entry],
index 8e5d820..25b75b6 100644 (file)
@@ -1339,7 +1339,8 @@ static inline int velocity_rx_copy(struct sk_buff **rx_skb, int pkt_size,
                        if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN)
                                skb_reserve(new_skb, 2);
 
-                       memcpy(new_skb->data, rx_skb[0]->data, pkt_size);
+                       skb_copy_from_linear_data(rx_skb[0], new_skb->data,
+                                                 pkt_size);
                        *rx_skb = new_skb;
                        ret = 0;
                }
@@ -1398,7 +1399,6 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
                vptr->stats.multicast++;
 
        skb = rd_info->skb;
-       skb->dev = vptr->dev;
 
        pci_dma_sync_single_for_cpu(vptr->pdev, rd_info->skb_dma,
                                    vptr->rx_buf_sz, PCI_DMA_FROMDEVICE);
@@ -1428,7 +1428,7 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
                   PCI_DMA_FROMDEVICE);
 
        skb_put(skb, pkt_len - 4);
-       skb->protocol = eth_type_trans(skb, skb->dev);
+       skb->protocol = eth_type_trans(skb, vptr->dev);
 
        stats->rx_bytes += pkt_len;
        netif_rx(skb);
@@ -1928,7 +1928,7 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
        if (pktlen < ETH_ZLEN) {
                /* Cannot occur until ZC support */
                pktlen = ETH_ZLEN;
-               memcpy(tdinfo->buf, skb->data, skb->len);
+               skb_copy_from_linear_data(skb, tdinfo->buf, skb->len);
                memset(tdinfo->buf + skb->len, 0, ETH_ZLEN - skb->len);
                tdinfo->skb = skb;
                tdinfo->skb_dma[0] = tdinfo->buf_dma;
@@ -1944,7 +1944,7 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
                int nfrags = skb_shinfo(skb)->nr_frags;
                tdinfo->skb = skb;
                if (nfrags > 6) {
-                       memcpy(tdinfo->buf, skb->data, skb->len);
+                       skb_copy_from_linear_data(skb, tdinfo->buf, skb->len);
                        tdinfo->skb_dma[0] = tdinfo->buf_dma;
                        td_ptr->tdesc0.pktsize =
                        td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
@@ -2007,7 +2007,7 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
         */
        if ((vptr->flags & VELOCITY_FLAGS_TX_CSUM)
                                 && (skb->ip_summed == CHECKSUM_PARTIAL)) {
-               struct iphdr *ip = skb->nh.iph;
+               const struct iphdr *ip = ip_hdr(skb);
                if (ip->protocol == IPPROTO_TCP)
                        td_ptr->tdesc1.TCR |= TCR0_TCPCK;
                else if (ip->protocol == IPPROTO_UDP)
index 5b82e4f..2346473 100644 (file)
@@ -773,7 +773,7 @@ static int sppp_rx_done(struct channel_data *chan)
        }
        chan->rx_skb->protocol = htons(ETH_P_WAN_PPP);
        chan->rx_skb->dev = chan->pppdev.dev;
-       chan->rx_skb->mac.raw = chan->rx_skb->data;
+       skb_reset_mac_header(chan->rx_skb);
        chan->stats.rx_packets++;
        chan->stats.rx_bytes += chan->cosa->rxsize;
        netif_rx(chan->rx_skb);
index a631d1c..016b3ff 100644 (file)
@@ -834,7 +834,7 @@ static void cycx_x25_irq_rx(struct cycx_device *card, struct cycx_x25_cmd *cmd)
        ++chan->ifstats.rx_packets;
        chan->ifstats.rx_bytes += pktlen;
 
-       skb->mac.raw = skb->data;
+       skb_reset_mac_header(skb);
        netif_rx(skb);
        dev->last_rx = jiffies;         /* timestamp */
 }
index 7369875..66be20c 100644 (file)
@@ -176,7 +176,7 @@ static void dlci_receive(struct sk_buff *skb, struct net_device *dev)
        if (process)
        {
                /* we've set up the protocol, so discard the header */
-               skb->mac.raw = skb->data; 
+               skb_reset_mac_header(skb);
                skb_pull(skb, header);
                dlp->stats.rx_bytes += skb->len;
                netif_rx(skb);
index 25021a7..dca0244 100644 (file)
@@ -1904,7 +1904,8 @@ static struct sk_buff *dscc4_init_dummy_skb(struct dscc4_dev_priv *dpriv)
                struct TxFD *tx_fd = dpriv->tx_fd + last;
 
                skb->len = DUMMY_SKB_SIZE;
-               memcpy(skb->data, version, strlen(version)%DUMMY_SKB_SIZE);
+               skb_copy_to_linear_data(skb, version,
+                                       strlen(version) % DUMMY_SKB_SIZE);
                tx_fd->state = FrameEnd | TO_STATE_TX(DUMMY_SKB_SIZE);
                tx_fd->data = pci_map_single(dpriv->pci_priv->pdev, skb->data,
                                             DUMMY_SKB_SIZE, PCI_DMA_TODEVICE);
index c45d6a8..58a53b6 100644 (file)
@@ -864,7 +864,7 @@ fst_tx_dma_complete(struct fst_card_info *card, struct fst_port_info *port,
 static __be16 farsync_type_trans(struct sk_buff *skb, struct net_device *dev)
 {
        skb->dev = dev;
-       skb->mac.raw = skb->data;
+       skb_reset_mac_header(skb);
        skb->pkt_type = PACKET_HOST;
        return htons(ETH_P_CUST);
 }
index c9664fd..00e0aaa 100644 (file)
@@ -124,7 +124,7 @@ static void cisco_keepalive_send(struct net_device *dev, u32 type,
        skb_put(skb, sizeof(struct cisco_packet));
        skb->priority = TC_PRIO_CONTROL;
        skb->dev = dev;
-       skb->nh.raw = skb->data;
+       skb_reset_network_header(skb);
 
        dev_queue_xmit(skb);
 }
index c6c3c75..aeb2789 100644 (file)
@@ -533,7 +533,7 @@ static void fr_lmi_send(struct net_device *dev, int fullrep)
                skb->protocol = __constant_htons(NLPID_CCITT_ANSI_LMI);
                fr_hard_header(&skb, LMI_CCITT_ANSI_DLCI);
        }
-       data = skb->tail;
+       data = skb_tail_pointer(skb);
        data[i++] = LMI_CALLREF;
        data[i++] = dce ? LMI_STATUS : LMI_STATUS_ENQUIRY;
        if (lmi == LMI_ANSI)
@@ -590,7 +590,7 @@ static void fr_lmi_send(struct net_device *dev, int fullrep)
        skb_put(skb, i);
        skb->priority = TC_PRIO_CONTROL;
        skb->dev = dev;
-       skb->nh.raw = skb->data;
+       skb_reset_network_header(skb);
 
        dev_queue_xmit(skb);
 }
@@ -1011,7 +1011,6 @@ static int fr_rx(struct sk_buff *skb)
                stats->rx_bytes += skb->len;
                if (pvc->state.becn)
                        stats->rx_compressed++;
-               skb->dev = dev;
                netif_rx(skb);
                return NET_RX_SUCCESS;
        } else {
index a02c5fb..9ba3e4e 100644 (file)
@@ -59,7 +59,7 @@ static void hostess_input(struct z8530_channel *c, struct sk_buff *skb)
        /* Drop the CRC - it's not a good idea to try and negotiate it ;) */
        skb_trim(skb, skb->len-2);
        skb->protocol=__constant_htons(ETH_P_WAN_PPP);
-       skb->mac.raw=skb->data;
+       skb_reset_mac_header(skb);
        skb->dev=c->netdevice;
        /*
         *      Send it to the PPP layer. We don't have time to process
index 2b54f1b..ae132c1 100644 (file)
@@ -1636,7 +1636,7 @@ static int lmc_rx (struct net_device *dev) /*fold00*/
             if (nsb) {
                 sc->lmc_rxq[i] = nsb;
                 nsb->dev = dev;
-                sc->lmc_rxring[i].buffer1 = virt_to_bus (nsb->tail);
+                sc->lmc_rxring[i].buffer1 = virt_to_bus(skb_tail_pointer(nsb));
             }
             sc->failed_recv_alloc = 1;
             goto skip_packet;
@@ -1667,8 +1667,8 @@ static int lmc_rx (struct net_device *dev) /*fold00*/
             skb_put (skb, len);
             skb->protocol = lmc_proto_type(sc, skb);
             skb->protocol = htons(ETH_P_WAN_PPP);
-            skb->mac.raw = skb->data;
-//            skb->nh.raw = skb->data;
+            skb_reset_mac_header(skb);
+            /* skb_reset_network_header(skb); */
             skb->dev = dev;
             lmc_proto_netif(sc, skb);
 
@@ -1679,7 +1679,7 @@ static int lmc_rx (struct net_device *dev) /*fold00*/
             if (nsb) {
                 sc->lmc_rxq[i] = nsb;
                 nsb->dev = dev;
-                sc->lmc_rxring[i].buffer1 = virt_to_bus (nsb->tail);
+                sc->lmc_rxring[i].buffer1 = virt_to_bus(skb_tail_pointer(nsb));
                 /* Transferred to 21140 below */
             }
             else {
@@ -1702,11 +1702,11 @@ static int lmc_rx (struct net_device *dev) /*fold00*/
             if(!nsb) {
                 goto give_it_anyways;
             }
-            memcpy(skb_put(nsb, len), skb->data, len);
+            skb_copy_from_linear_data(skb, skb_put(nsb, len), len);
             
             nsb->protocol = lmc_proto_type(sc, skb);
-            nsb->mac.raw = nsb->data;
-//            nsb->nh.raw = nsb->data;
+            skb_reset_mac_header(nsb);
+            /* skb_reset_network_header(nsb); */
             nsb->dev = dev;
             lmc_proto_netif(sc, nsb);
         }
@@ -1932,7 +1932,7 @@ static void lmc_softreset (lmc_softc_t * const sc) /*fold00*/
         sc->lmc_rxring[i].status = 0x80000000;
 
         /* used to be PKT_BUF_SZ now uses skb since we lose some to head room */
-        sc->lmc_rxring[i].length = skb->end - skb->data;
+        sc->lmc_rxring[i].length = skb_tailroom(skb);
 
         /* use to be tail which is dumb since you're thinking why write
          * to the end of the packj,et but since there's nothing there tail == data
index 62184de..999bf71 100644 (file)
@@ -1755,17 +1755,17 @@ cpc_trace(struct net_device *dev, struct sk_buff *skb_main, char rx_tx)
 
        skb->dev = dev;
        skb->protocol = htons(ETH_P_CUST);
-       skb->mac.raw = skb->data;
+       skb_reset_mac_header(skb);
        skb->pkt_type = PACKET_HOST;
        skb->len = 10 + skb_main->len;
 
-       memcpy(skb->data, dev->name, 5);
+       skb_copy_to_linear_data(skb, dev->name, 5);
        skb->data[5] = '[';
        skb->data[6] = rx_tx;
        skb->data[7] = ']';
        skb->data[8] = ':';
        skb->data[9] = ' ';
-       memcpy(&skb->data[10], skb_main->data, skb_main->len);
+       skb_copy_from_linear_data(skb_main, &skb->data[10], skb_main->len);
 
        netif_rx(skb);
 }
index 5873c34..07dbdfb 100644 (file)
@@ -1003,17 +1003,17 @@ static void cpc_tty_trace(pc300dev_t *dev, char* buf, int len, char rxtx)
        skb_put (skb, 10 + len); 
        skb->dev = dev->dev; 
        skb->protocol = htons(ETH_P_CUST); 
-       skb->mac.raw = skb->data; 
+       skb_reset_mac_header(skb);
        skb->pkt_type = PACKET_HOST; 
        skb->len = 10 + len; 
 
-       memcpy(skb->data,dev->dev->name,5);
+       skb_copy_to_linear_data(skb, dev->dev->name, 5);
        skb->data[5] = '['; 
        skb->data[6] = rxtx; 
        skb->data[7] = ']'; 
        skb->data[8] = ':'; 
        skb->data[9] = ' '; 
-       memcpy(&skb->data[10], buf, len); 
+       skb_copy_to_linear_data_offset(skb, 10, buf, len);
        netif_rx(skb); 
 }      
 
index fc5c0c6..35eded7 100644 (file)
@@ -999,11 +999,6 @@ get_rx_buf( struct net_device  *dev )
        if( !skb )
                return  NULL;
 
-#ifdef CONFIG_SBNI_MULTILINE
-       skb->dev = ((struct net_local *) dev->priv)->master;
-#else
-       skb->dev = dev;
-#endif
        skb_reserve( skb, 2 );          /* Align IP on longword boundaries */
        return  skb;
 }
index 70fb1b9..1313581 100644 (file)
@@ -61,7 +61,7 @@ static void sealevel_input(struct z8530_channel *c, struct sk_buff *skb)
        /* Drop the CRC - it's not a good idea to try and negotiate it ;) */
        skb_trim(skb, skb->len-2);
        skb->protocol=htons(ETH_P_WAN_PPP);
-       skb->mac.raw=skb->data;
+       skb_reset_mac_header(skb);
        skb->dev=c->netdevice;
        /*
         *      Send it to the PPP layer. We don't have time to process
index 218f7b5..67fc67c 100644 (file)
@@ -227,7 +227,7 @@ static void sppp_input (struct net_device *dev, struct sk_buff *skb)
        unsigned long flags;
 
        skb->dev=dev;
-       skb->mac.raw=skb->data;
+       skb_reset_mac_header(skb);
 
        if (dev->flags & IFF_RUNNING)
        {
index 8b4540b..98ef400 100644 (file)
@@ -1656,7 +1656,7 @@ static void z8530_rx_done(struct z8530_channel *c)
                else
                {
                        skb_put(skb, ct);
-                       memcpy(skb->data, rxb, ct);
+                       skb_copy_to_linear_data(skb, rxb, ct);
                        c->stats.rx_packets++;
                        c->stats.rx_bytes+=ct;
                }
@@ -1782,7 +1782,7 @@ int z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb)
                 */
                c->tx_next_ptr=c->tx_dma_buf[c->tx_dma_used];
                c->tx_dma_used^=1;      /* Flip temp buffer */
-               memcpy(c->tx_next_ptr, skb->data, skb->len);
+               skb_copy_from_linear_data(skb, c->tx_next_ptr, skb->len);
        }
        else
                c->tx_next_ptr=skb->data;       
index ece3d9c..4426841 100644 (file)
@@ -2,47 +2,21 @@
 # Wireless LAN device configuration
 #
 
-menu "Wireless LAN (non-hamradio)"
-       depends on NETDEVICES
-
-config NET_RADIO
-       bool "Wireless LAN drivers (non-hamradio) & Wireless Extensions"
-       select WIRELESS_EXT
-       ---help---
-         Support for wireless LANs and everything having to do with radio,
-         but not with amateur radio or FM broadcasting.
-
-         Saying Y here also enables the Wireless Extensions (creates
-         /proc/net/wireless and enables iwconfig access). The Wireless
-         Extension is a generic API allowing a driver to expose to the user
-         space configuration and statistics specific to common Wireless LANs.
-         The beauty of it is that a single set of tool can support all the
-         variations of Wireless LANs, regardless of their type (as long as
-         the driver supports Wireless Extension). Another advantage is that
-         these parameters may be changed on the fly without restarting the
-         driver (or Linux). If you wish to use Wireless Extensions with
-         wireless PCMCIA (PC-) cards, you need to say Y here; you can fetch
-         the tools from
-         <http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>.
+menu "Wireless LAN"
 
-config NET_WIRELESS_RTNETLINK
-       bool "Wireless Extension API over RtNetlink"
-       depends on NET_RADIO
+config WLAN_PRE80211
+       bool "Wireless LAN (pre-802.11)"
+       depends on NETDEVICES
        ---help---
-         Support the Wireless Extension API over the RtNetlink socket
-         in addition to the traditional ioctl interface (selected above).
+         Say Y if you have any pre-802.11 wireless LAN hardware.
 
-         For now, few tools use this facility, but it might grow in the
-         future. The only downside is that it adds 4.5 kB to your kernel.
-
-# Note : the cards are obsolete (can't buy them anymore), but the drivers
-# are not, as people are still using them...
-comment "Obsolete Wireless cards support (pre-802.11)"
-       depends on NET_RADIO && (INET || ISA || PCMCIA)
+         This option does not affect the kernel build, it only
+         lets you choose drivers.
 
 config STRIP
        tristate "STRIP (Metricom starmode radio IP)"
-       depends on NET_RADIO && INET
+       depends on INET && WLAN_PRE80211
+       select WIRELESS_EXT
        ---help---
          Say Y if you have a Metricom radio and intend to use Starmode Radio
          IP. STRIP is a radio protocol developed for the MosquitoNet project
@@ -65,7 +39,8 @@ config STRIP
 
 config ARLAN
        tristate "Aironet Arlan 655 & IC2200 DS support"
-       depends on NET_RADIO && ISA && !64BIT
+       depends on ISA && !64BIT && WLAN_PRE80211
+       select WIRELESS_EXT
        ---help---
          Aironet makes Arlan, a class of wireless LAN adapters. These use the
          www.Telxon.com chip, which is also used on several similar cards.
@@ -80,7 +55,8 @@ config ARLAN
 
 config WAVELAN
        tristate "AT&T/Lucent old WaveLAN & DEC RoamAbout DS ISA support"
-       depends on NET_RADIO && ISA
+       depends on ISA && WLAN_PRE80211
+       select WIRELESS_EXT
        ---help---
          The Lucent WaveLAN (formerly NCR and AT&T; or DEC RoamAbout DS) is
          a Radio LAN (wireless Ethernet-like Local Area Network) using the
@@ -107,7 +83,8 @@ config WAVELAN
 
 config PCMCIA_WAVELAN
        tristate "AT&T/Lucent old WaveLAN Pcmcia wireless support"
-       depends on NET_RADIO && PCMCIA
+       depends on PCMCIA && WLAN_PRE80211
+       select WIRELESS_EXT
        help
          Say Y here if you intend to attach an AT&T/Lucent Wavelan PCMCIA
          (PC-card) wireless Ethernet networking card to your computer.  This
@@ -118,7 +95,8 @@ config PCMCIA_WAVELAN
 
 config PCMCIA_NETWAVE
        tristate "Xircom Netwave AirSurfer Pcmcia wireless support"
-       depends on NET_RADIO && PCMCIA
+       depends on PCMCIA && WLAN_PRE80211
+       select WIRELESS_EXT
        help
          Say Y here if you intend to attach this type of PCMCIA (PC-card)
          wireless Ethernet networking card to your computer.
@@ -126,12 +104,20 @@ config PCMCIA_NETWAVE
          To compile this driver as a module, choose M here: the module will be
          called netwave_cs.  If unsure, say N.
 
-comment "Wireless 802.11 Frequency Hopping cards support"
-       depends on NET_RADIO && PCMCIA
+
+config WLAN_80211
+       bool "Wireless LAN (IEEE 802.11)"
+       depends on NETDEVICES
+       ---help---
+         Say Y if you have any 802.11 wireless LAN hardware.
+
+         This option does not affect the kernel build, it only
+         lets you choose drivers.
 
 config PCMCIA_RAYCS
        tristate "Aviator/Raytheon 2.4MHz wireless support"
-       depends on NET_RADIO && PCMCIA
+       depends on PCMCIA && WLAN_80211
+       select WIRELESS_EXT
        ---help---
          Say Y here if you intend to attach an Aviator/Raytheon PCMCIA
          (PC-card) wireless Ethernet networking card to your computer.
@@ -141,12 +127,10 @@ config PCMCIA_RAYCS
          To compile this driver as a module, choose M here: the module will be
          called ray_cs.  If unsure, say N.
 
-comment "Wireless 802.11b ISA/PCI cards support"
-       depends on NET_RADIO && (ISA || PCI || PPC_PMAC || PCMCIA)
-
 config IPW2100
        tristate "Intel PRO/Wireless 2100 Network Connection"
-       depends on NET_RADIO && PCI
+       depends on PCI && WLAN_80211
+       select WIRELESS_EXT
        select FW_LOADER
        select IEEE80211
        ---help---
@@ -200,7 +184,8 @@ config IPW2100_DEBUG
 
 config IPW2200
        tristate "Intel PRO/Wireless 2200BG and 2915ABG Network Connection"
-       depends on NET_RADIO && PCI
+       depends on PCI && WLAN_80211
+       select WIRELESS_EXT
        select FW_LOADER
        select IEEE80211
        ---help---
@@ -282,7 +267,8 @@ config IPW2200_DEBUG
 
 config AIRO
        tristate "Cisco/Aironet 34X/35X/4500/4800 ISA and PCI cards"
-       depends on NET_RADIO && ISA_DMA_API && (PCI || BROKEN)
+       depends on ISA_DMA_API && WLAN_80211 && (PCI || BROKEN)
+       select WIRELESS_EXT
        select CRYPTO
        ---help---
          This is the standard Linux driver to support Cisco/Aironet ISA and
@@ -299,7 +285,8 @@ config AIRO
 
 config HERMES
        tristate "Hermes chipset 802.11b support (Orinoco/Prism2/Symbol)"
-       depends on NET_RADIO && (PPC_PMAC || PCI || PCMCIA)
+       depends on (PPC_PMAC || PCI || PCMCIA) && WLAN_80211
+       select WIRELESS_EXT
        ---help---
          A driver for 802.11b wireless cards based on the "Hermes" or
          Intersil HFA384x (Prism 2) MAC controller.  This includes the vast
@@ -373,7 +360,8 @@ config PCI_HERMES
 
 config ATMEL
       tristate "Atmel at76c50x chipset  802.11b support"
-      depends on NET_RADIO && (PCI || PCMCIA)
+      depends on (PCI || PCMCIA) && WLAN_80211
+      select WIRELESS_EXT
       select FW_LOADER
       select CRC32
        ---help---
@@ -394,13 +382,9 @@ config PCI_ATMEL
         Enable support for PCI and mini-PCI cards containing the
         Atmel at76c506 chip.
 
-# If Pcmcia is compiled in, offer Pcmcia cards...
-comment "Wireless 802.11b Pcmcia/Cardbus cards support"
-       depends on NET_RADIO && PCMCIA
-
 config PCMCIA_HERMES
        tristate "Hermes PCMCIA card support"
-       depends on NET_RADIO && PCMCIA && HERMES
+       depends on PCMCIA && HERMES
        ---help---
          A driver for "Hermes" chipset based PCMCIA wireless adaptors, such
          as the Lucent WavelanIEEE/Orinoco cards and their OEM (Cabletron/
@@ -420,7 +404,7 @@ config PCMCIA_HERMES
 
 config PCMCIA_SPECTRUM
        tristate "Symbol Spectrum24 Trilogy PCMCIA card support"
-       depends on NET_RADIO && PCMCIA && HERMES
+       depends on PCMCIA && HERMES
        select FW_LOADER
        ---help---
 
@@ -434,7 +418,8 @@ config PCMCIA_SPECTRUM
 
 config AIRO_CS
        tristate "Cisco/Aironet 34X/35X/4500/4800 PCMCIA cards"
-       depends on NET_RADIO && PCMCIA && (BROKEN || !M32R)
+       depends on PCMCIA && (BROKEN || !M32R) && WLAN_80211
+       select WIRELESS_EXT
        select CRYPTO
        select CRYPTO_AES
        ---help---
@@ -458,7 +443,8 @@ config AIRO_CS
 
 config PCMCIA_ATMEL
        tristate "Atmel at76c502/at76c504 PCMCIA cards"
-       depends on NET_RADIO && ATMEL && PCMCIA
+       depends on ATMEL && PCMCIA
+       select WIRELESS_EXT
        select FW_LOADER
        select CRC32
        ---help---
@@ -467,17 +453,17 @@ config PCMCIA_ATMEL
 
 config PCMCIA_WL3501
       tristate "Planet WL3501 PCMCIA cards"
-      depends on NET_RADIO && EXPERIMENTAL && PCMCIA
+      depends on EXPERIMENTAL && PCMCIA && WLAN_80211
+      select WIRELESS_EXT
        ---help---
          A driver for WL3501 PCMCIA 802.11 wireless cards made by Planet.
         It has basic support for Linux wireless extensions and initial
         micro support for ethtool.
 
-comment "Prism GT/Duette 802.11(a/b/g) PCI/Cardbus support"
-       depends on NET_RADIO && PCI
 config PRISM54
        tristate 'Intersil Prism GT/Duette/Indigo PCI/Cardbus' 
-       depends on PCI && NET_RADIO && EXPERIMENTAL
+       depends on PCI && EXPERIMENTAL && WLAN_80211
+       select WIRELESS_EXT
        select FW_LOADER
        ---help---
          Enable PCI and Cardbus support for the following chipset based cards:
@@ -523,7 +509,8 @@ config PRISM54
 
 config USB_ZD1201
        tristate "USB ZD1201 based Wireless device support"
-       depends on USB && NET_RADIO
+       depends on USB && WLAN_80211
+       select WIRELESS_EXT
        select FW_LOADER
        ---help---
          Say Y if you want to use wireless LAN adapters based on the ZyDAS
@@ -542,11 +529,4 @@ source "drivers/net/wireless/hostap/Kconfig"
 source "drivers/net/wireless/bcm43xx/Kconfig"
 source "drivers/net/wireless/zd1211rw/Kconfig"
 
-# yes, this works even when no drivers are selected
-config NET_WIRELESS
-       bool
-       depends on NET_RADIO && (ISA || PCI || PPC_PMAC || PCMCIA)
-       default y
-
 endmenu
-
index 2ada76a..7fe0a61 100644 (file)
@@ -2444,7 +2444,7 @@ static int add_airo_dev( struct net_device *dev );
 
 static int wll_header_parse(struct sk_buff *skb, unsigned char *haddr)
 {
-       memcpy(haddr, skb->mac.raw + 10, ETH_ALEN);
+       memcpy(haddr, skb_mac_header(skb) + 10, ETH_ALEN);
        return ETH_ALEN;
 }
 
@@ -3411,14 +3411,12 @@ badrx:
                        OUT4500( apriv, EVACK, EV_RX);
 
                        if (test_bit(FLAG_802_11, &apriv->flags)) {
-                               skb->mac.raw = skb->data;
+                               skb_reset_mac_header(skb);
                                skb->pkt_type = PACKET_OTHERHOST;
                                skb->dev = apriv->wifidev;
                                skb->protocol = htons(ETH_P_802_2);
-                       } else {
-                               skb->dev = dev;
+                       } else
                                skb->protocol = eth_type_trans(skb,dev);
-                       }
                        skb->dev->last_rx = jiffies;
                        skb->ip_summed = CHECKSUM_NONE;
 
@@ -3641,7 +3639,6 @@ badmic:
                }
 #endif /* WIRELESS_SPY */
 
-               skb->dev = ai->dev;
                skb->ip_summed = CHECKSUM_NONE;
                skb->protocol = eth_type_trans(skb, ai->dev);
                skb->dev->last_rx = jiffies;
@@ -3749,7 +3746,7 @@ void mpi_receive_802_11 (struct airo_info *ai)
                wireless_spy_update(ai->dev, sa, &wstats);
        }
 #endif /* IW_WIRELESS_SPY */
-       skb->mac.raw = skb->data;
+       skb_reset_mac_header(skb);
        skb->pkt_type = PACKET_OTHERHOST;
        skb->dev = ai->wifidev;
        skb->protocol = htons(ETH_P_802_2);
index 4688e56..498e848 100644 (file)
@@ -1500,7 +1500,6 @@ static void arlan_rx_interrupt(struct net_device *dev, u_char rxStatus, u_short
                                break;
                        }
                        skb_reserve(skb, 2);
-                       skb->dev = dev;
                        skbtmp = skb_put(skb, pkt_len);
 
                        memcpy_fromio(skbtmp + ARLAN_FAKE_HDR_LEN, ((char __iomem *) arlan) + rxOffset, pkt_len - ARLAN_FAKE_HDR_LEN);
index 23eba69..51a7db5 100644 (file)
@@ -827,14 +827,14 @@ static int start_tx(struct sk_buff *skb, struct net_device *dev)
        if (priv->wep_is_on)
                frame_ctl |= IEEE80211_FCTL_PROTECTED;
        if (priv->operating_mode == IW_MODE_ADHOC) {
-               memcpy(&header.addr1, skb->data, 6);
+               skb_copy_from_linear_data(skb, &header.addr1, 6);
                memcpy(&header.addr2, dev->dev_addr, 6);
                memcpy(&header.addr3, priv->BSSID, 6);
        } else {
                frame_ctl |= IEEE80211_FCTL_TODS;
                memcpy(&header.addr1, priv->CurrentBSSID, 6);
                memcpy(&header.addr2, dev->dev_addr, 6);
-               memcpy(&header.addr3, skb->data, 6);
+               skb_copy_from_linear_data(skb, &header.addr3, 6);
        }
 
        if (priv->use_wpa)
@@ -920,7 +920,6 @@ static void fast_rx_path(struct atmel_private *priv,
                memcpy(&skbp[6], header->addr2, 6); /* source address */
 
        priv->dev->last_rx = jiffies;
-       skb->dev = priv->dev;
        skb->protocol = eth_type_trans(skb, priv->dev);
        skb->ip_summed = CHECKSUM_NONE;
        netif_rx(skb);
@@ -1028,7 +1027,6 @@ static void frag_rx_path(struct atmel_private *priv,
                                       priv->rx_buf,
                                       priv->frag_len + 12);
                                priv->dev->last_rx = jiffies;
-                               skb->dev = priv->dev;
                                skb->protocol = eth_type_trans(skb, priv->dev);
                                skb->ip_summed = CHECKSUM_NONE;
                                netif_rx(skb);
index 533993f..ce397e4 100644 (file)
@@ -1,6 +1,7 @@
 config BCM43XX
        tristate "Broadcom BCM43xx wireless support"
-       depends on PCI && IEEE80211 && IEEE80211_SOFTMAC && NET_RADIO && EXPERIMENTAL
+       depends on PCI && IEEE80211 && IEEE80211_SOFTMAC && WLAN_80211 && EXPERIMENTAL
+       select WIRELESS_EXT
        select FW_LOADER
        select HW_RANDOM
        ---help---
index 6e0dc76..e3d2e61 100644 (file)
@@ -998,7 +998,8 @@ static void dma_tx_fragment(struct bcm43xx_dmaring *ring,
                        assert(0);
                        return;
                }
-               memcpy(skb_put(bounce_skb, skb->len), skb->data, skb->len);
+               skb_copy_from_linear_data(skb, skb_put(bounce_skb, skb->len),
+                                         skb->len);
                dev_kfree_skb_any(skb);
                skb = bounce_skb;
        }
index 308f773..1fef331 100644 (file)
@@ -1,6 +1,7 @@
 config HOSTAP
        tristate "IEEE 802.11 for Host AP (Prism2/2.5/3 and WEP/TKIP/CCMP)"
-       depends on NET_RADIO
+       depends on WLAN_80211
+       select WIRELESS_EXT
        select IEEE80211
        select IEEE80211_CRYPT_WEP
        ---help---
index 7e04dc9..cbedc9e 100644 (file)
@@ -167,7 +167,7 @@ hdr->f.status = s; hdr->f.len = l; hdr->f.data = d
 
        ret = skb->len - phdrlen;
        skb->dev = dev;
-       skb->mac.raw = skb->data;
+       skb_reset_mac_header(skb);
        skb_pull(skb, hdrlen);
        if (prism_header)
                skb_pull(skb, phdrlen);
@@ -933,12 +933,14 @@ void hostap_80211_rx(struct net_device *dev, struct sk_buff *skb,
                if (frag == 0) {
                        /* copy first fragment (including full headers) into
                         * beginning of the fragment cache skb */
-                       memcpy(skb_put(frag_skb, flen), skb->data, flen);
+                       skb_copy_from_linear_data(skb, skb_put(frag_skb, flen),
+                                                 flen);
                } else {
                        /* append frame payload to the end of the fragment
                         * cache skb */
-                       memcpy(skb_put(frag_skb, flen), skb->data + hdrlen,
-                              flen);
+                       skb_copy_from_linear_data_offset(skb, hdrlen,
+                                                        skb_put(frag_skb,
+                                                                flen), flen);
                }
                dev_kfree_skb(skb);
                skb = NULL;
@@ -1044,8 +1046,9 @@ void hostap_80211_rx(struct net_device *dev, struct sk_buff *skb,
            skb->len >= ETH_HLEN + ETH_ALEN) {
                /* Non-standard frame: get addr4 from its bogus location after
                 * the payload */
-               memcpy(skb->data + ETH_ALEN,
-                      skb->data + skb->len - ETH_ALEN, ETH_ALEN);
+               skb_copy_from_linear_data_offset(skb, skb->len - ETH_ALEN,
+                                                skb->data + ETH_ALEN,
+                                                ETH_ALEN);
                skb_trim(skb, skb->len - ETH_ALEN);
        }
 
@@ -1073,17 +1076,17 @@ void hostap_80211_rx(struct net_device *dev, struct sk_buff *skb,
 
        if (skb2 != NULL) {
                /* send to wireless media */
-               skb2->protocol = __constant_htons(ETH_P_802_3);
-               skb2->mac.raw = skb2->nh.raw = skb2->data;
-               /* skb2->nh.raw = skb2->data + ETH_HLEN; */
                skb2->dev = dev;
+               skb2->protocol = __constant_htons(ETH_P_802_3);
+               skb_reset_mac_header(skb2);
+               skb_reset_network_header(skb2);
+               /* skb2->network_header += ETH_HLEN; */
                dev_queue_xmit(skb2);
        }
 
        if (skb) {
                skb->protocol = eth_type_trans(skb, dev);
                memset(skb->cb, 0, sizeof(skb->cb));
-               skb->dev = dev;
                netif_rx(skb);
        }
 
index 4a5be70..246fac0 100644 (file)
@@ -146,7 +146,8 @@ int hostap_data_start_xmit(struct sk_buff *skb, struct net_device *dev)
                        fc |= IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS;
                        /* From&To DS: Addr1 = RA, Addr2 = TA, Addr3 = DA,
                         * Addr4 = SA */
-                       memcpy(&hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN);
+                       skb_copy_from_linear_data_offset(skb, ETH_ALEN,
+                                                        &hdr.addr4, ETH_ALEN);
                        hdr_len += ETH_ALEN;
                } else {
                        /* bogus 4-addr format to workaround Prism2 station
@@ -159,7 +160,8 @@ int hostap_data_start_xmit(struct sk_buff *skb, struct net_device *dev)
                        /* SA from skb->data + ETH_ALEN will be added after
                         * frame payload; use hdr.addr4 as a temporary buffer
                         */
-                       memcpy(&hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN);
+                       skb_copy_from_linear_data_offset(skb, ETH_ALEN,
+                                                        &hdr.addr4, ETH_ALEN);
                        need_tailroom += ETH_ALEN;
                }
 
@@ -174,24 +176,27 @@ int hostap_data_start_xmit(struct sk_buff *skb, struct net_device *dev)
                else
                        memcpy(&hdr.addr1, local->bssid, ETH_ALEN);
                memcpy(&hdr.addr2, dev->dev_addr, ETH_ALEN);
-               memcpy(&hdr.addr3, skb->data, ETH_ALEN);
+               skb_copy_from_linear_data(skb, &hdr.addr3, ETH_ALEN);
        } else if (local->iw_mode == IW_MODE_MASTER && !to_assoc_ap) {
                fc |= IEEE80211_FCTL_FROMDS;
                /* From DS: Addr1 = DA, Addr2 = BSSID, Addr3 = SA */
-               memcpy(&hdr.addr1, skb->data, ETH_ALEN);
+               skb_copy_from_linear_data(skb, &hdr.addr1, ETH_ALEN);
                memcpy(&hdr.addr2, dev->dev_addr, ETH_ALEN);
-               memcpy(&hdr.addr3, skb->data + ETH_ALEN, ETH_ALEN);
+               skb_copy_from_linear_data_offset(skb, ETH_ALEN, &hdr.addr3,
+                                                ETH_ALEN);
        } else if (local->iw_mode == IW_MODE_INFRA || to_assoc_ap) {
                fc |= IEEE80211_FCTL_TODS;
                /* To DS: Addr1 = BSSID, Addr2 = SA, Addr3 = DA */
                memcpy(&hdr.addr1, to_assoc_ap ?
                       local->assoc_ap_addr : local->bssid, ETH_ALEN);
-               memcpy(&hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN);
-               memcpy(&hdr.addr3, skb->data, ETH_ALEN);
+               skb_copy_from_linear_data_offset(skb, ETH_ALEN, &hdr.addr2,
+                                                ETH_ALEN);
+               skb_copy_from_linear_data(skb, &hdr.addr3, ETH_ALEN);
        } else if (local->iw_mode == IW_MODE_ADHOC) {
                /* not From/To DS: Addr1 = DA, Addr2 = SA, Addr3 = BSSID */
-               memcpy(&hdr.addr1, skb->data, ETH_ALEN);
-               memcpy(&hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN);
+               skb_copy_from_linear_data(skb, &hdr.addr1, ETH_ALEN);
+               skb_copy_from_linear_data_offset(skb, ETH_ALEN, &hdr.addr2,
+                                                ETH_ALEN);
                memcpy(&hdr.addr3, local->bssid, ETH_ALEN);
        }
 
@@ -237,7 +242,7 @@ int hostap_data_start_xmit(struct sk_buff *skb, struct net_device *dev)
        iface->stats.tx_packets++;
        iface->stats.tx_bytes += skb->len;
 
-       skb->mac.raw = skb->data;
+       skb_reset_mac_header(skb);
        meta = (struct hostap_skb_tx_data *) skb->cb;
        memset(meta, 0, sizeof(*meta));
        meta->magic = HOSTAP_SKB_TX_DATA_MAGIC;
index efb8cf3..4ca8a27 100644 (file)
@@ -982,7 +982,8 @@ static void prism2_send_mgmt(struct net_device *dev,
        meta->tx_cb_idx = tx_cb_idx;
 
        skb->dev = dev;
-       skb->mac.raw = skb->nh.raw = skb->data;
+       skb_reset_mac_header(skb);
+       skb_reset_network_header(skb);
        dev_queue_xmit(skb);
 }
 #endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
@@ -1276,8 +1277,8 @@ static char * ap_auth_make_challenge(struct ap_data *ap)
                return NULL;
        }
 
-       memcpy(tmpbuf, skb->data + ap->crypt->extra_mpdu_prefix_len,
-              WLAN_AUTH_CHALLENGE_LEN);
+       skb_copy_from_linear_data_offset(skb, ap->crypt->extra_mpdu_prefix_len,
+                                        tmpbuf, WLAN_AUTH_CHALLENGE_LEN);
        dev_kfree_skb(skb);
 
        return tmpbuf;
index 3079378..fb01fb9 100644 (file)
@@ -1838,13 +1838,14 @@ static int prism2_tx_80211(struct sk_buff *skb, struct net_device *dev)
 
        /* skb->data starts with txdesc->frame_control */
        hdr_len = 24;
-       memcpy(&txdesc.frame_control, skb->data, hdr_len);
+       skb_copy_from_linear_data(skb, &txdesc.frame_control, hdr_len);
        fc = le16_to_cpu(txdesc.frame_control);
        if (WLAN_FC_GET_TYPE(fc) == IEEE80211_FTYPE_DATA &&
            (fc & IEEE80211_FCTL_FROMDS) && (fc & IEEE80211_FCTL_TODS) &&
            skb->len >= 30) {
                /* Addr4 */
-               memcpy(txdesc.addr4, skb->data + hdr_len, ETH_ALEN);
+               skb_copy_from_linear_data_offset(skb, hdr_len, txdesc.addr4,
+                                                ETH_ALEN);
                hdr_len += ETH_ALEN;
        }
 
@@ -2217,7 +2218,7 @@ static void hostap_tx_callback(local_info_t *local,
                memcpy(skb_put(skb, len), payload, len);
 
        skb->dev = local->dev;
-       skb->mac.raw = skb->data;
+       skb_reset_mac_header(skb);
 
        cb->func(skb, ok, cb->data);
 }
index 9077e6e..1f9edd9 100644 (file)
@@ -590,20 +590,20 @@ void hostap_dump_tx_header(const char *name, const struct hfa384x_tx_frame *tx)
 
 int hostap_80211_header_parse(struct sk_buff *skb, unsigned char *haddr)
 {
-       memcpy(haddr, skb->mac.raw + 10, ETH_ALEN); /* addr2 */
+       memcpy(haddr, skb_mac_header(skb) + 10, ETH_ALEN); /* addr2 */
        return ETH_ALEN;
 }
 
 
 int hostap_80211_prism_header_parse(struct sk_buff *skb, unsigned char *haddr)
 {
-       if (*(u32 *)skb->mac.raw == LWNG_CAP_DID_BASE) {
-               memcpy(haddr, skb->mac.raw +
-                      sizeof(struct linux_wlan_ng_prism_hdr) + 10,
+       const unsigned char *mac = skb_mac_header(skb);
+
+       if (*(u32 *)mac == LWNG_CAP_DID_BASE) {
+               memcpy(haddr, mac + sizeof(struct linux_wlan_ng_prism_hdr) + 10,
                       ETH_ALEN); /* addr2 */
-       } else { /* (*(u32 *)skb->mac.raw == htonl(LWNG_CAPHDR_VERSION)) */
-               memcpy(haddr, skb->mac.raw +
-                      sizeof(struct linux_wlan_ng_cap_hdr) + 10,
+       } else { /* (*(u32 *)mac == htonl(LWNG_CAPHDR_VERSION)) */
+               memcpy(haddr, mac + sizeof(struct linux_wlan_ng_cap_hdr) + 10,
                       ETH_ALEN); /* addr2 */
        }
        return ETH_ALEN;
@@ -1063,7 +1063,8 @@ int prism2_sta_send_mgmt(local_info_t *local, u8 *dst, u16 stype,
        meta->iface = netdev_priv(dev);
 
        skb->dev = dev;
-       skb->mac.raw = skb->nh.raw = skb->data;
+       skb_reset_mac_header(skb);
+       skb_reset_network_header(skb);
        dev_queue_xmit(skb);
 
        return 0;
index ad6e4a4..9137a4d 100644 (file)
@@ -2416,8 +2416,9 @@ static void isr_rx(struct ipw2100_priv *priv, int i,
 #ifdef IPW2100_RX_DEBUG
        /* Make a copy of the frame so we can dump it to the logs if
         * ieee80211_rx fails */
-       memcpy(packet_data, packet->skb->data,
-              min_t(u32, status->frame_size, IPW_RX_NIC_BUFFER_LENGTH));
+       skb_copy_from_linear_data(packet->skb, packet_data,
+                                 min_t(u32, status->frame_size,
+                                            IPW_RX_NIC_BUFFER_LENGTH));
 #endif
 
        if (!ieee80211_rx(priv->ieee, packet->skb, stats)) {
index c878a2f..4839a45 100644 (file)
@@ -8133,7 +8133,7 @@ static void ipw_handle_mgmt_packet(struct ipw_priv *priv,
                skb->dev = priv->ieee->dev;
 
                /* Point raw at the ieee80211_stats */
-               skb->mac.raw = skb->data;
+               skb_reset_mac_header(skb);
 
                skb->pkt_type = PACKET_OTHERHOST;
                skb->protocol = __constant_htons(ETH_P_80211_STATS);
@@ -10355,7 +10355,7 @@ static void ipw_handle_promiscuous_tx(struct ipw_priv *priv,
 
                rt_hdr->it_len = dst->len;
 
-               memcpy(skb_put(dst, len), src->data, len);
+               skb_copy_from_linear_data(src, skb_put(dst, len), len);
 
                if (!ieee80211_rx(priv->prom_priv->ieee, dst, &dummystats))
                        dev_kfree_skb_any(dst);
index a009ab5..45b00e1 100644 (file)
@@ -1283,7 +1283,6 @@ static int netwave_rx(struct net_device *dev)
 
        skb_reserve( skb, 2);  /* Align IP on 16 byte */
        skb_put( skb, rcvLen);
-       skb->dev = dev;
 
        /* Copy packet fragments to the skb data area */
        ptr = (u_char*) skb->data;
index 4e7f6cf..062286d 100644 (file)
@@ -689,7 +689,7 @@ static void orinoco_stat_gather(struct net_device *dev,
        /* Note : gcc will optimise the whole section away if
         * WIRELESS_SPY is not defined... - Jean II */
        if (SPY_NUMBER(priv)) {
-               orinoco_spy_gather(dev, skb->mac.raw + ETH_ALEN,
+               orinoco_spy_gather(dev, skb_mac_header(skb) + ETH_ALEN,
                                   desc->signal, desc->silence);
        }
 }
@@ -770,7 +770,7 @@ static void orinoco_rx_monitor(struct net_device *dev, u16 rxfid,
 
        /* Copy the 802.11 header to the skb */
        memcpy(skb_put(skb, hdrlen), &(desc->frame_ctl), hdrlen);
-       skb->mac.raw = skb->data;
+       skb_reset_mac_header(skb);
 
        /* If any, copy the data from the card to the skb */
        if (datalen > 0) {
@@ -915,7 +915,6 @@ static void __orinoco_ev_rx(struct net_device *dev, hermes_t *hw)
                memcpy(hdr->h_source, desc.addr2, ETH_ALEN);
 
        dev->last_rx = jiffies;
-       skb->dev = dev;
        skb->protocol = eth_type_trans(skb, dev);
        skb->ip_summed = CHECKSUM_NONE;
        if (fc & IEEE80211_FCTL_TODS)
index b112291..dd070cc 100644 (file)
@@ -136,7 +136,7 @@ islpci_eth_transmit(struct sk_buff *skb, struct net_device *ndev)
                                printk("islpci_eth_transmit:wds_mac\n");
 #endif
                                memmove(skb->data + 6, src, skb->len);
-                               memcpy(skb->data, wds_mac, 6);
+                               skb_copy_to_linear_data(skb, wds_mac, 6);
                        } else {
                                memmove(skb->data, src, skb->len);
                        }
@@ -162,13 +162,16 @@ islpci_eth_transmit(struct sk_buff *skb, struct net_device *ndev)
 
                        skb_put(newskb, init_wds ? skb->len + 6 : skb->len);
                        if (init_wds) {
-                               memcpy(newskb->data + 6, skb->data, skb->len);
-                               memcpy(newskb->data, wds_mac, 6);
+                               skb_copy_from_linear_data(skb,
+                                                         newskb->data + 6,
+                                                         skb->len);
+                               skb_copy_to_linear_data(newskb, wds_mac, 6);
 #ifdef ISLPCI_ETH_DEBUG
                                printk("islpci_eth_transmit:wds_mac\n");
 #endif
                        } else
-                               memcpy(newskb->data, skb->data, skb->len);
+                               skb_copy_from_linear_data(skb, newskb->data,
+                                                         skb->len);
 
 #if VERBOSE > SHOW_ERROR_MESSAGES
                        DEBUG(SHOW_TRACING, "memcpy %p %p %i wds %i\n",
@@ -303,7 +306,7 @@ islpci_monitor_rx(islpci_private *priv, struct sk_buff **skb)
                skb_pull(*skb, sizeof (struct rfmon_header));
 
        (*skb)->protocol = htons(ETH_P_802_2);
-       (*skb)->mac.raw = (*skb)->data;
+       skb_reset_mac_header(*skb);
        (*skb)->pkt_type = PACKET_OTHERHOST;
 
        return 0;
@@ -374,10 +377,6 @@ islpci_eth_receive(islpci_private *priv)
        DEBUG(SHOW_BUFFER_CONTENTS, "\nrx %p ", skb->data);
        display_buffer((char *) skb->data, skb->len);
 #endif
-
-       /* do some additional sk_buff and network layer parameters */
-       skb->dev = ndev;
-
        /* take care of monitor mode and spy monitoring. */
        if (unlikely(priv->iw_mode == IW_MODE_MONITOR))
                discard = islpci_monitor_rx(priv, &skb);
@@ -398,8 +397,10 @@ islpci_eth_receive(islpci_private *priv)
                        /* Update spy records */
                        wireless_spy_update(ndev, annex->addr2, &wstats);
 
-                       memcpy(skb->data + sizeof (struct rfmon_header),
-                              skb->data, 2 * ETH_ALEN);
+                       skb_copy_from_linear_data(skb,
+                                                 (skb->data +
+                                                  sizeof(struct rfmon_header)),
+                                                 2 * ETH_ALEN);
                        skb_pull(skb, sizeof (struct rfmon_header));
                }
                skb->protocol = eth_type_trans(skb, ndev);
index 47b2ccb..3be6242 100644 (file)
@@ -2232,7 +2232,6 @@ static void rx_data(struct net_device *dev, struct rcs __iomem *prcs, unsigned i
         return;
     }
     skb_reserve( skb, 2);   /* Align IP on 16 byte (TBD check this)*/
-    skb->dev = dev;
 
     DEBUG(4,"ray_cs rx_data total_len = %x, rx_len = %x\n",total_len,rx_len);
 
@@ -2243,7 +2242,8 @@ static void rx_data(struct net_device *dev, struct rcs __iomem *prcs, unsigned i
     rx_ptr += copy_from_rx_buff(local, rx_ptr, pkt_addr & RX_BUFF_END, rx_len);
     /* Get source address */
 #ifdef WIRELESS_SPY
-    memcpy(linksrcaddr, ((struct mac_header *)skb->data)->addr_2, ETH_ALEN);
+    skb_copy_from_linear_data_offset(skb, offsetof(struct mac_header, addr_2),
+                                    linksrcaddr, ETH_ALEN);
 #endif
     /* Now, deal with encapsulation/translation/sniffer */
     if (!sniffer) {
index f5ce1c6..2a299a0 100644 (file)
@@ -2009,7 +2009,7 @@ static void deliver_packet(struct strip *strip_info, STRIP_Header * header,
                       packetlen);
                skb->dev = get_strip_dev(strip_info);
                skb->protocol = header->protocol;
-               skb->mac.raw = skb->data;
+               skb_reset_mac_header(skb);
 
                /* Having put a fake header on the front of the sk_buff for the */
                /* benefit of tools like tcpdump, skb_pull now 'consumes' that  */
index 2aa3c76..1cf090d 100644 (file)
@@ -2512,14 +2512,13 @@ wv_packet_read(struct net_device * dev, u16 buf_off, int sksize)
                return;
        }
 
-       skb->dev = dev;
-
        /* Copy the packet to the buffer. */
        obram_read(ioaddr, buf_off, skb_put(skb, sksize), sksize);
        skb->protocol = eth_type_trans(skb, dev);
 
 #ifdef DEBUG_RX_INFO
-       wv_packet_info(skb->mac.raw, sksize, dev->name, "wv_packet_read");
+       wv_packet_info(skb_mac_header(skb), sksize, dev->name,
+                      "wv_packet_read");
 #endif                         /* DEBUG_RX_INFO */
 
        /* Statistics-gathering and associated stuff.
@@ -2555,7 +2554,7 @@ wv_packet_read(struct net_device * dev, u16 buf_off, int sksize)
 
                /* Spying stuff */
 #ifdef IW_WIRELESS_SPY
-               wl_spy_gather(dev, skb->mac.raw + WAVELAN_ADDR_SIZE,
+               wl_spy_gather(dev, skb_mac_header(skb) + WAVELAN_ADDR_SIZE,
                              stats);
 #endif /* IW_WIRELESS_SPY */
 #ifdef HISTOGRAM
@@ -2939,7 +2938,7 @@ static int wavelan_packet_xmit(struct sk_buff *skb, struct net_device * dev)
         * need to pad. Jean II */
        if (skb->len < ETH_ZLEN) {
                memset(data, 0, ETH_ZLEN);
-               memcpy(data, skb->data, skb->len);
+               skb_copy_from_linear_data(skb, data, skb->len);
                /* Write packet on the card */
                if(wv_packet_write(dev, data, ETH_ZLEN))
                        return 1;       /* We failed */
index b042397..67b867f 100644 (file)
@@ -2884,14 +2884,12 @@ wv_packet_read(struct net_device *              dev,
       return;
     }
 
-  skb->dev = dev;
-
   skb_reserve(skb, 2);
   fd_p = read_ringbuf(dev, fd_p, (char *) skb_put(skb, sksize), sksize);
   skb->protocol = eth_type_trans(skb, dev);
 
 #ifdef DEBUG_RX_INFO
-  wv_packet_info(skb->mac.raw, sksize, dev->name, "wv_packet_read");
+  wv_packet_info(skb_mac_header(skb), sksize, dev->name, "wv_packet_read");
 #endif /* DEBUG_RX_INFO */
      
   /* Statistics gathering & stuff associated.
@@ -2925,7 +2923,7 @@ wv_packet_read(struct net_device *                dev,
 #endif /* WAVELAN_ROAMING */
          
 #ifdef WIRELESS_SPY
-      wl_spy_gather(dev, skb->mac.raw + WAVELAN_ADDR_SIZE, stats);
+      wl_spy_gather(dev, skb_mac_header(skb) + WAVELAN_ADDR_SIZE, stats);
 #endif /* WIRELESS_SPY */
 #ifdef HISTOGRAM
       wl_his_gather(dev, stats);
index 6cb66a3..935b144 100644 (file)
@@ -327,7 +327,6 @@ static void zd1201_usbrx(struct urb *urb)
                        memcpy(skb_put(skb, 6), &data[datalen-8], 6);
                        memcpy(skb_put(skb, 2), &data[datalen-24], 2);
                        memcpy(skb_put(skb, len), data, len);
-                       skb->dev = zd->dev;
                        skb->dev->last_rx = jiffies;
                        skb->protocol = eth_type_trans(skb, zd->dev);
                        zd->stats.rx_packets++;
@@ -385,7 +384,6 @@ static void zd1201_usbrx(struct urb *urb)
                        memcpy(skb_put(skb, 2), &data[6], 2);
                        memcpy(skb_put(skb, len), data+8, len);
                }
-               skb->dev = zd->dev;
                skb->dev->last_rx = jiffies;
                skb->protocol = eth_type_trans(skb, zd->dev);
                zd->stats.rx_packets++;
@@ -809,10 +807,10 @@ static int zd1201_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
        txbuf[4] = 0x00;
        txbuf[5] = 0x00;
 
-       memcpy(txbuf+6, skb->data+12, skb->len-12);
+       skb_copy_from_linear_data_offset(skb, 12, txbuf + 6, skb->len - 12);
        if (pad)
                txbuf[skb->len-12+6]=0;
-       memcpy(txbuf+skb->len-12+6+pad, skb->data, 12);
+       skb_copy_from_linear_data(skb, txbuf + skb->len - 12 + 6 + pad, 12);
        *(__be16*)&txbuf[skb->len+6+pad] = htons(skb->len-12+6);
        txbuf[txbuflen-1] = 0;
 
index 66ed55b..d1ab24a 100644 (file)
@@ -1,6 +1,7 @@
 config ZD1211RW
        tristate "ZyDAS ZD1211/ZD1211B USB-wireless support"
-       depends on USB && IEEE80211 && IEEE80211_SOFTMAC && NET_RADIO && EXPERIMENTAL
+       depends on USB && IEEE80211_SOFTMAC && WLAN_80211 && EXPERIMENTAL
+       select WIRELESS_EXT
        select FW_LOADER
        ---help---
          This is an experimental driver for the ZyDAS ZD1211/ZD1211B wireless
index aac8a1c..edaaad2 100644 (file)
@@ -62,6 +62,7 @@ static struct usb_device_id usb_ids[] = {
        { USB_DEVICE(0x0471, 0x1236), .driver_info = DEVICE_ZD1211B },
        { USB_DEVICE(0x13b1, 0x0024), .driver_info = DEVICE_ZD1211B },
        { USB_DEVICE(0x0586, 0x340f), .driver_info = DEVICE_ZD1211B },
+       { USB_DEVICE(0x0baf, 0x0121), .driver_info = DEVICE_ZD1211B },
        /* "Driverless" devices that need ejecting */
        { USB_DEVICE(0x0ace, 0x2011), .driver_info = DEVICE_INSTALLER },
        {}
index 2412ce4..3f4a7cf 100644 (file)
@@ -1137,7 +1137,6 @@ static int yellowfin_rx(struct net_device *dev)
                                skb = dev_alloc_skb(pkt_len + 2);
                                if (skb == NULL)
                                        break;
-                               skb->dev = dev;
                                skb_reserve(skb, 2);    /* 16 byte align the IP header */
                                eth_copy_and_sum(skb, rx_skb->data, pkt_len, 0);
                                skb_put(skb, pkt_len);
index b24b072..4032e9f 100644 (file)
@@ -774,7 +774,6 @@ static void znet_rx(struct net_device *dev)
                                znet->stats.rx_dropped++;
                                break;
                        }
-                       skb->dev = dev;
 
                        if (&znet->rx_cur[(pkt_len+1)>>1] > znet->rx_end) {
                                int semi_cnt = (znet->rx_end - znet->rx_cur)<<1;
index d190c05..453e682 100644 (file)
@@ -372,9 +372,9 @@ static __inline__ int led_get_net_activity(void)
                continue;
            if (LOOPBACK(in_dev->ifa_list->ifa_local))
                continue;
-           if (!dev->get_stats) 
-               continue;
            stats = dev->get_stats(dev);
+           if (!stats)
+               continue;
            rx_total += stats->rx_packets;
            tx_total += stats->tx_packets;
        }
index 9793533..400bb90 100644 (file)
@@ -126,7 +126,7 @@ static unsigned char status_sunbpp_to_pc(struct parport *p)
        if (!(value_tcr & P_TCR_BUSY))
                bits |= PARPORT_STATUS_BUSY;
 
-       dprintk((KERN_DEBUG "tcr 0x%x ir 0x%x\n", regs->p_tcr, regs->p_ir));
+       dprintk((KERN_DEBUG "tcr 0x%x ir 0x%x\n", value_tcr, value_ir));
        dprintk((KERN_DEBUG "read status 0x%x\n", bits));
        return bits;
 }
@@ -147,7 +147,7 @@ static unsigned char control_sunbpp_to_pc(struct parport *p)
        if (value_or & P_OR_SLCT_IN)
                bits |= PARPORT_CONTROL_SELECT;
 
-       dprintk((KERN_DEBUG "tcr 0x%x or 0x%x\n", regs->p_tcr, regs->p_or));
+       dprintk((KERN_DEBUG "tcr 0x%x or 0x%x\n", value_tcr, value_or));
        dprintk((KERN_DEBUG "read control 0x%x\n", bits));
        return bits;
 }
@@ -165,7 +165,8 @@ static unsigned char parport_sunbpp_frob_control(struct parport *p,
        unsigned char value_tcr = sbus_readb(&regs->p_tcr);
        unsigned char value_or = sbus_readb(&regs->p_or);
 
-       dprintk((KERN_DEBUG "frob1: tcr 0x%x or 0x%x\n", regs->p_tcr, regs->p_or));
+       dprintk((KERN_DEBUG "frob1: tcr 0x%x or 0x%x\n",
+                value_tcr, value_or));
        if (mask & PARPORT_CONTROL_STROBE) {
                if (val & PARPORT_CONTROL_STROBE) {
                        value_tcr &= ~P_TCR_DS;
@@ -197,7 +198,8 @@ static unsigned char parport_sunbpp_frob_control(struct parport *p,
 
        sbus_writeb(value_or, &regs->p_or);
        sbus_writeb(value_tcr, &regs->p_tcr);
-       dprintk((KERN_DEBUG "frob2: tcr 0x%x or 0x%x\n", regs->p_tcr, regs->p_or));
+       dprintk((KERN_DEBUG "frob2: tcr 0x%x or 0x%x\n",
+                value_tcr, value_or));
        return parport_sunbpp_read_control(p);
 }
 
index a4a9682..2fe1d69 100644 (file)
@@ -682,34 +682,7 @@ static void pci_read_irq(struct pci_dev *dev)
        dev->irq = irq;
 }
 
-static void change_legacy_io_resource(struct pci_dev * dev, unsigned index,
-                                      unsigned start, unsigned end)
-{
-       unsigned base = start & PCI_BASE_ADDRESS_IO_MASK;
-       unsigned len = (end | ~PCI_BASE_ADDRESS_IO_MASK) - base + 1;
-
-       /*
-        * Some X versions get confused when the BARs reported through
-        * /sys or /proc differ from those seen in config space, thus
-        * try to update the config space values, too.
-        */
-       if (!(pci_resource_flags(dev, index) & IORESOURCE_IO))
-               printk(KERN_WARNING "%s: cannot adjust BAR%u (not I/O)\n",
-                      pci_name(dev), index);
-       else if (pci_resource_len(dev, index) != len)
-               printk(KERN_WARNING "%s: cannot adjust BAR%u (size %04X)\n",
-                      pci_name(dev), index, (unsigned)pci_resource_len(dev, index));
-       else {
-               printk(KERN_INFO "%s: trying to change BAR%u from %04X to %04X\n",
-                      pci_name(dev), index,
-                      (unsigned)pci_resource_start(dev, index), base);
-               pci_write_config_dword(dev, PCI_BASE_ADDRESS_0 + index * 4, base);
-       }
-       pci_resource_start(dev, index) = start;
-       pci_resource_end(dev, index)   = end;
-       pci_resource_flags(dev, index) =
-               IORESOURCE_IO | IORESOURCE_PCI_FIXED | PCI_BASE_ADDRESS_SPACE_IO;
-}
+#define LEGACY_IO_RESOURCE     (IORESOURCE_IO | IORESOURCE_PCI_FIXED)
 
 /**
  * pci_setup_device - fill in class and map information of a device
@@ -762,12 +735,20 @@ static int pci_setup_device(struct pci_dev * dev)
                        u8 progif;
                        pci_read_config_byte(dev, PCI_CLASS_PROG, &progif);
                        if ((progif & 1) == 0) {
-                               change_legacy_io_resource(dev, 0, 0x1F0, 0x1F7);
-                               change_legacy_io_resource(dev, 1, 0x3F6, 0x3F6);
+                               dev->resource[0].start = 0x1F0;
+                               dev->resource[0].end = 0x1F7;
+                               dev->resource[0].flags = LEGACY_IO_RESOURCE;
+                               dev->resource[1].start = 0x3F6;
+                               dev->resource[1].end = 0x3F6;
+                               dev->resource[1].flags = LEGACY_IO_RESOURCE;
                        }
                        if ((progif & 4) == 0) {
-                               change_legacy_io_resource(dev, 2, 0x170, 0x177);
-                               change_legacy_io_resource(dev, 3, 0x376, 0x376);
+                               dev->resource[2].start = 0x170;
+                               dev->resource[2].end = 0x177;
+                               dev->resource[2].flags = LEGACY_IO_RESOURCE;
+                               dev->resource[3].start = 0x376;
+                               dev->resource[3].end = 0x376;
+                               dev->resource[3].flags = LEGACY_IO_RESOURCE;
                        }
                }
                break;
index eb5dc62..e71929d 100644 (file)
@@ -398,6 +398,9 @@ dasd_change_state(struct dasd_device *device)
 
        if (device->state == device->target)
                wake_up(&dasd_init_waitq);
+
+       /* let user-space know that the device status changed */
+       kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE);
 }
 
 /*
index ed70852..6a89cef 100644 (file)
@@ -19,6 +19,7 @@
 
 #include <asm/debug.h>
 #include <asm/uaccess.h>
+#include <asm/ipl.h>
 
 /* This is ugly... */
 #define PRINTK_HEADER "dasd_devmap:"
@@ -133,6 +134,8 @@ dasd_call_setup(char *str)
 __setup ("dasd=", dasd_call_setup);
 #endif /* #ifndef MODULE */
 
+#define        DASD_IPLDEV     "ipldev"
+
 /*
  * Read a device busid/devno from a string.
  */
@@ -141,6 +144,20 @@ dasd_busid(char **str, int *id0, int *id1, int *devno)
 {
        int val, old_style;
 
+       /* Interpret ipldev busid */
+       if (strncmp(DASD_IPLDEV, *str, strlen(DASD_IPLDEV)) == 0) {
+               if (ipl_info.type != IPL_TYPE_CCW) {
+                       MESSAGE(KERN_ERR, "%s", "ipl device is not a ccw "
+                               "device");
+                       return -EINVAL;
+               }
+               *id0 = 0;
+               *id1 = ipl_info.data.ccw.dev_id.ssid;
+               *devno = ipl_info.data.ccw.dev_id.devno;
+               *str += strlen(DASD_IPLDEV);
+
+               return 0;
+       }
        /* check for leading '0x' */
        old_style = 0;
        if ((*str)[0] == '0' && (*str)[1] == 'x') {
@@ -828,6 +845,46 @@ dasd_discipline_show(struct device *dev, struct device_attribute *attr,
 
 static DEVICE_ATTR(discipline, 0444, dasd_discipline_show, NULL);
 
+static ssize_t
+dasd_device_status_show(struct device *dev, struct device_attribute *attr,
+                    char *buf)
+{
+       struct dasd_device *device;
+       ssize_t len;
+
+       device = dasd_device_from_cdev(to_ccwdev(dev));
+       if (!IS_ERR(device)) {
+               switch (device->state) {
+               case DASD_STATE_NEW:
+                       len = snprintf(buf, PAGE_SIZE, "new\n");
+                       break;
+               case DASD_STATE_KNOWN:
+                       len = snprintf(buf, PAGE_SIZE, "detected\n");
+                       break;
+               case DASD_STATE_BASIC:
+                       len = snprintf(buf, PAGE_SIZE, "basic\n");
+                       break;
+               case DASD_STATE_UNFMT:
+                       len = snprintf(buf, PAGE_SIZE, "unformatted\n");
+                       break;
+               case DASD_STATE_READY:
+                       len = snprintf(buf, PAGE_SIZE, "ready\n");
+                       break;
+               case DASD_STATE_ONLINE:
+                       len = snprintf(buf, PAGE_SIZE, "online\n");
+                       break;
+               default:
+                       len = snprintf(buf, PAGE_SIZE, "no stat\n");
+                       break;
+               }
+               dasd_put_device(device);
+       } else
+               len = snprintf(buf, PAGE_SIZE, "unknown\n");
+       return len;
+}
+
+static DEVICE_ATTR(status, 0444, dasd_device_status_show, NULL);
+
 static ssize_t
 dasd_alias_show(struct device *dev, struct device_attribute *attr, char *buf)
 {
@@ -939,6 +996,7 @@ static DEVICE_ATTR(eer_enabled, 0644, dasd_eer_show, dasd_eer_store);
 static struct attribute * dasd_attrs[] = {
        &dev_attr_readonly.attr,
        &dev_attr_discipline.attr,
+       &dev_attr_status.attr,
        &dev_attr_alias.attr,
        &dev_attr_vendor.attr,
        &dev_attr_uid.attr,
index 293e667..c210784 100644 (file)
@@ -3,7 +3,7 @@
 #
 
 obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \
-        sclp_info.o
+        sclp_info.o sclp_config.o sclp_chp.o
 
 obj-$(CONFIG_TN3270) += raw3270.o
 obj-$(CONFIG_TN3270_CONSOLE) += con3270.o
@@ -29,3 +29,6 @@ obj-$(CONFIG_S390_TAPE_34XX) += tape_34xx.o
 obj-$(CONFIG_S390_TAPE_3590) += tape_3590.o
 obj-$(CONFIG_MONREADER) += monreader.o
 obj-$(CONFIG_MONWRITER) += monwriter.o
+
+zcore_mod-objs := sclp_sdias.o zcore.o
+obj-$(CONFIG_ZFCPDUMP) += zcore_mod.o
index 9a328f1..6000bde 100644 (file)
@@ -813,12 +813,6 @@ con3215_unblank(void)
        spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
 }
 
-static int __init 
-con3215_consetup(struct console *co, char *options)
-{
-       return 0;
-}
-
 /*
  *  The console structure for the 3215 console
  */
@@ -827,7 +821,6 @@ static struct console con3215 = {
        .write   = con3215_write,
        .device  = con3215_device,
        .unblank = con3215_unblank,
-       .setup   = con3215_consetup,
        .flags   = CON_PRINTBUFFER,
 };
 
index 8e7f2d7..fd34791 100644 (file)
@@ -555,12 +555,6 @@ con3270_unblank(void)
        spin_unlock_irqrestore(&cp->view.lock, flags);
 }
 
-static int __init 
-con3270_consetup(struct console *co, char *options)
-{
-       return 0;
-}
-
 /*
  *  The console structure for the 3270 console
  */
@@ -569,7 +563,6 @@ static struct console con3270 = {
        .write   = con3270_write,
        .device  = con3270_device,
        .unblank = con3270_unblank,
-       .setup   = con3270_consetup,
        .flags   = CON_PRINTBUFFER,
 };
 
index f171de3..fa62e69 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/timer.h>
 #include <linux/reboot.h>
 #include <linux/jiffies.h>
+#include <linux/init.h>
 #include <asm/types.h>
 #include <asm/s390_ext.h>
 
@@ -510,7 +511,7 @@ sclp_state_change_cb(struct evbuf_header *evbuf)
 }
 
 static struct sclp_register sclp_state_change_event = {
-       .receive_mask = EvTyp_StateChange_Mask,
+       .receive_mask = EVTYP_STATECHANGE_MASK,
        .receiver_fn = sclp_state_change_cb
 };
 
@@ -930,3 +931,10 @@ sclp_init(void)
        sclp_init_mask(1);
        return 0;
 }
+
+static __init int sclp_initcall(void)
+{
+       return sclp_init();
+}
+
+arch_initcall(sclp_initcall);
index 7d29ab4..87ac4a3 100644 (file)
 #define MAX_KMEM_PAGES (sizeof(unsigned long) << 3)
 #define MAX_CONSOLE_PAGES      4
 
-#define EvTyp_OpCmd            0x01
-#define EvTyp_Msg              0x02
-#define EvTyp_StateChange      0x08
-#define EvTyp_PMsgCmd          0x09
-#define EvTyp_CntlProgOpCmd    0x20
-#define EvTyp_CntlProgIdent    0x0B
-#define EvTyp_SigQuiesce       0x1D
-#define EvTyp_VT220Msg         0x1A
-
-#define EvTyp_OpCmd_Mask       0x80000000
-#define EvTyp_Msg_Mask         0x40000000
-#define EvTyp_StateChange_Mask 0x01000000
-#define EvTyp_PMsgCmd_Mask     0x00800000
-#define EvTyp_CtlProgOpCmd_Mask        0x00000001
-#define EvTyp_CtlProgIdent_Mask        0x00200000
-#define EvTyp_SigQuiesce_Mask  0x00000008
-#define EvTyp_VT220Msg_Mask    0x00000040
-
-#define GnrlMsgFlgs_DOM                0x8000
-#define GnrlMsgFlgs_SndAlrm    0x4000
-#define GnrlMsgFlgs_HoldMsg    0x2000
-
-#define LnTpFlgs_CntlText      0x8000
-#define LnTpFlgs_LabelText     0x4000
-#define LnTpFlgs_DataText      0x2000
-#define LnTpFlgs_EndText       0x1000
-#define LnTpFlgs_PromptText    0x0800
+#define EVTYP_OPCMD            0x01
+#define EVTYP_MSG              0x02
+#define EVTYP_STATECHANGE      0x08
+#define EVTYP_PMSGCMD          0x09
+#define EVTYP_CNTLPROGOPCMD    0x20
+#define EVTYP_CNTLPROGIDENT    0x0B
+#define EVTYP_SIGQUIESCE       0x1D
+#define EVTYP_VT220MSG         0x1A
+#define EVTYP_CONFMGMDATA      0x04
+#define EVTYP_SDIAS            0x1C
+
+#define EVTYP_OPCMD_MASK       0x80000000
+#define EVTYP_MSG_MASK         0x40000000
+#define EVTYP_STATECHANGE_MASK 0x01000000
+#define EVTYP_PMSGCMD_MASK     0x00800000
+#define EVTYP_CTLPROGOPCMD_MASK        0x00000001
+#define EVTYP_CTLPROGIDENT_MASK        0x00200000
+#define EVTYP_SIGQUIESCE_MASK  0x00000008
+#define EVTYP_VT220MSG_MASK    0x00000040
+#define EVTYP_CONFMGMDATA_MASK 0x10000000
+#define EVTYP_SDIAS_MASK       0x00000010
+
+#define GNRLMSGFLGS_DOM                0x8000
+#define GNRLMSGFLGS_SNDALRM    0x4000
+#define GNRLMSGFLGS_HOLDMSG    0x2000
+
+#define LNTPFLGS_CNTLTEXT      0x8000
+#define LNTPFLGS_LABELTEXT     0x4000
+#define LNTPFLGS_DATATEXT      0x2000
+#define LNTPFLGS_ENDTEXT       0x1000
+#define LNTPFLGS_PROMPTTEXT    0x0800
 
 typedef unsigned int sclp_cmdw_t;
 
@@ -56,15 +60,15 @@ typedef unsigned int sclp_cmdw_t;
 #define SCLP_CMDW_READ_SCP_INFO_FORCED 0x00120001
 
 #define GDS_ID_MDSMU           0x1310
-#define GDS_ID_MDSRouteInfo    0x1311
-#define GDS_ID_AgUnWrkCorr     0x1549
-#define GDS_ID_SNACondReport   0x1532
+#define GDS_ID_MDSROUTEINFO    0x1311
+#define GDS_ID_AGUNWRKCORR     0x1549
+#define GDS_ID_SNACONDREPORT   0x1532
 #define GDS_ID_CPMSU           0x1212
-#define GDS_ID_RoutTargInstr   0x154D
-#define GDS_ID_OpReq           0x8070
-#define GDS_ID_TextCmd         0x1320
+#define GDS_ID_ROUTTARGINSTR   0x154D
+#define GDS_ID_OPREQ           0x8070
+#define GDS_ID_TEXTCMD         0x1320
 
-#define GDS_KEY_SelfDefTextMsg 0x31
+#define GDS_KEY_SELFDEFTEXTMSG 0x31
 
 typedef u32 sccb_mask_t;       /* ATTENTION: assumes 32bit mask !!! */
 
diff --git a/drivers/s390/char/sclp_chp.c b/drivers/s390/char/sclp_chp.c
new file mode 100644 (file)
index 0000000..a66b914
--- /dev/null
@@ -0,0 +1,196 @@
+/*
+ *  drivers/s390/char/sclp_chp.c
+ *
+ *    Copyright IBM Corp. 2007
+ *    Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
+ */
+
+#include <linux/types.h>
+#include <linux/gfp.h>
+#include <linux/errno.h>
+#include <linux/completion.h>
+#include <asm/sclp.h>
+#include <asm/chpid.h>
+
+#include "sclp.h"
+
+#define TAG    "sclp_chp: "
+
+#define SCLP_CMDW_CONFIGURE_CHANNEL_PATH       0x000f0001
+#define SCLP_CMDW_DECONFIGURE_CHANNEL_PATH     0x000e0001
+#define SCLP_CMDW_READ_CHANNEL_PATH_INFORMATION        0x00030001
+
+static inline sclp_cmdw_t get_configure_cmdw(struct chp_id chpid)
+{
+       return SCLP_CMDW_CONFIGURE_CHANNEL_PATH | chpid.id << 8;
+}
+
+static inline sclp_cmdw_t get_deconfigure_cmdw(struct chp_id chpid)
+{
+       return SCLP_CMDW_DECONFIGURE_CHANNEL_PATH | chpid.id << 8;
+}
+
+static void chp_callback(struct sclp_req *req, void *data)
+{
+       struct completion *completion = data;
+
+       complete(completion);
+}
+
+struct chp_cfg_sccb {
+       struct sccb_header header;
+       u8 ccm;
+       u8 reserved[6];
+       u8 cssid;
+} __attribute__((packed));
+
+struct chp_cfg_data {
+       struct chp_cfg_sccb sccb;
+       struct sclp_req req;
+       struct completion completion;
+} __attribute__((packed));
+
+static int do_configure(sclp_cmdw_t cmd)
+{
+       struct chp_cfg_data *data;
+       int rc;
+
+       /* Prepare sccb. */
+       data = (struct chp_cfg_data *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+       if (!data)
+               return -ENOMEM;
+       data->sccb.header.length = sizeof(struct chp_cfg_sccb);
+       data->req.command = cmd;
+       data->req.sccb = &(data->sccb);
+       data->req.status = SCLP_REQ_FILLED;
+       data->req.callback = chp_callback;
+       data->req.callback_data = &(data->completion);
+       init_completion(&data->completion);
+
+       /* Perform sclp request. */
+       rc = sclp_add_request(&(data->req));
+       if (rc)
+               goto out;
+       wait_for_completion(&data->completion);
+
+       /* Check response .*/
+       if (data->req.status != SCLP_REQ_DONE) {
+               printk(KERN_WARNING TAG "configure channel-path request failed "
+                      "(status=0x%02x)\n", data->req.status);
+               rc = -EIO;
+               goto out;
+       }
+       switch (data->sccb.header.response_code) {
+       case 0x0020:
+       case 0x0120:
+       case 0x0440:
+       case 0x0450:
+               break;
+       default:
+               printk(KERN_WARNING TAG "configure channel-path failed "
+                      "(cmd=0x%08x, response=0x%04x)\n", cmd,
+                      data->sccb.header.response_code);
+               rc = -EIO;
+               break;
+       }
+out:
+       free_page((unsigned long) data);
+
+       return rc;
+}
+
+/**
+ * sclp_chp_configure - perform configure channel-path sclp command
+ * @chpid: channel-path ID
+ *
+ * Perform configure channel-path command sclp command for specified chpid.
+ * Return 0 after command successfully finished, non-zero otherwise.
+ */
+int sclp_chp_configure(struct chp_id chpid)
+{
+       return do_configure(get_configure_cmdw(chpid));
+}
+
+/**
+ * sclp_chp_deconfigure - perform deconfigure channel-path sclp command
+ * @chpid: channel-path ID
+ *
+ * Perform deconfigure channel-path command sclp command for specified chpid
+ * and wait for completion. On success return 0. Return non-zero otherwise.
+ */
+int sclp_chp_deconfigure(struct chp_id chpid)
+{
+       return do_configure(get_deconfigure_cmdw(chpid));
+}
+
+struct chp_info_sccb {
+       struct sccb_header header;
+       u8 recognized[SCLP_CHP_INFO_MASK_SIZE];
+       u8 standby[SCLP_CHP_INFO_MASK_SIZE];
+       u8 configured[SCLP_CHP_INFO_MASK_SIZE];
+       u8 ccm;
+       u8 reserved[6];
+       u8 cssid;
+} __attribute__((packed));
+
+struct chp_info_data {
+       struct chp_info_sccb sccb;
+       struct sclp_req req;
+       struct completion completion;
+} __attribute__((packed));
+
+/**
+ * sclp_chp_read_info - perform read channel-path information sclp command
+ * @info: resulting channel-path information data
+ *
+ * Perform read channel-path information sclp command and wait for completion.
+ * On success, store channel-path information in @info and return 0. Return
+ * non-zero otherwise.
+ */
+int sclp_chp_read_info(struct sclp_chp_info *info)
+{
+       struct chp_info_data *data;
+       int rc;
+
+       /* Prepare sccb. */
+       data = (struct chp_info_data *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+       if (!data)
+               return -ENOMEM;
+       data->sccb.header.length = sizeof(struct chp_info_sccb);
+       data->req.command = SCLP_CMDW_READ_CHANNEL_PATH_INFORMATION;
+       data->req.sccb = &(data->sccb);
+       data->req.status = SCLP_REQ_FILLED;
+       data->req.callback = chp_callback;
+       data->req.callback_data = &(data->completion);
+       init_completion(&data->completion);
+
+       /* Perform sclp request. */
+       rc = sclp_add_request(&(data->req));
+       if (rc)
+               goto out;
+       wait_for_completion(&data->completion);
+
+       /* Check response .*/
+       if (data->req.status != SCLP_REQ_DONE) {
+               printk(KERN_WARNING TAG "read channel-path info request failed "
+                      "(status=0x%02x)\n", data->req.status);
+               rc = -EIO;
+               goto out;
+       }
+       if (data->sccb.header.response_code != 0x0010) {
+               printk(KERN_WARNING TAG "read channel-path info failed "
+                      "(response=0x%04x)\n", data->sccb.header.response_code);
+               rc = -EIO;
+               goto out;
+       }
+       memcpy(info->recognized, data->sccb.recognized,
+              SCLP_CHP_INFO_MASK_SIZE);
+       memcpy(info->standby, data->sccb.standby,
+              SCLP_CHP_INFO_MASK_SIZE);
+       memcpy(info->configured, data->sccb.configured,
+              SCLP_CHP_INFO_MASK_SIZE);
+out:
+       free_page((unsigned long) data);
+
+       return rc;
+}
diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c
new file mode 100644 (file)
index 0000000..5322e5e
--- /dev/null
@@ -0,0 +1,75 @@
+/*
+ *  drivers/s390/char/sclp_config.c
+ *
+ *    Copyright IBM Corp. 2007
+ *    Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
+ */
+
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/cpu.h>
+#include <linux/sysdev.h>
+#include <linux/workqueue.h>
+#include "sclp.h"
+
+#define TAG    "sclp_config: "
+
+struct conf_mgm_data {
+       u8 reserved;
+       u8 ev_qualifier;
+} __attribute__((packed));
+
+#define EV_QUAL_CAP_CHANGE     3
+
+static struct work_struct sclp_cpu_capability_work;
+
+static void sclp_cpu_capability_notify(struct work_struct *work)
+{
+       int cpu;
+       struct sys_device *sysdev;
+
+       printk(KERN_WARNING TAG "cpu capability changed.\n");
+       lock_cpu_hotplug();
+       for_each_online_cpu(cpu) {
+               sysdev = get_cpu_sysdev(cpu);
+               kobject_uevent(&sysdev->kobj, KOBJ_CHANGE);
+       }
+       unlock_cpu_hotplug();
+}
+
+static void sclp_conf_receiver_fn(struct evbuf_header *evbuf)
+{
+       struct conf_mgm_data *cdata;
+
+       cdata = (struct conf_mgm_data *)(evbuf + 1);
+       if (cdata->ev_qualifier == EV_QUAL_CAP_CHANGE)
+               schedule_work(&sclp_cpu_capability_work);
+}
+
+static struct sclp_register sclp_conf_register =
+{
+       .receive_mask = EVTYP_CONFMGMDATA_MASK,
+       .receiver_fn  = sclp_conf_receiver_fn,
+};
+
+static int __init sclp_conf_init(void)
+{
+       int rc;
+
+       INIT_WORK(&sclp_cpu_capability_work, sclp_cpu_capability_notify);
+
+       rc = sclp_register(&sclp_conf_register);
+       if (rc) {
+               printk(KERN_ERR TAG "failed to register (%d).\n", rc);
+               return rc;
+       }
+
+       if (!(sclp_conf_register.sclp_receive_mask & EVTYP_CONFMGMDATA_MASK)) {
+               printk(KERN_WARNING TAG "no configuration management.\n");
+               sclp_unregister(&sclp_conf_register);
+               rc = -ENOSYS;
+       }
+       return rc;
+}
+
+__initcall(sclp_conf_init);
index 65aa2c8..29fe2a5 100644 (file)
@@ -46,7 +46,7 @@ struct cpi_sccb {
 /* Event type structure for write message and write priority message */
 static struct sclp_register sclp_cpi_event =
 {
-       .send_mask = EvTyp_CtlProgIdent_Mask
+       .send_mask = EVTYP_CTLPROGIDENT_MASK
 };
 
 MODULE_LICENSE("GPL");
@@ -201,7 +201,7 @@ cpi_module_init(void)
                       "console.\n");
                return -EINVAL;
        }
-       if (!(sclp_cpi_event.sclp_send_mask & EvTyp_CtlProgIdent_Mask)) {
+       if (!(sclp_cpi_event.sclp_send_mask & EVTYP_CTLPROGIDENT_MASK)) {
                printk(KERN_WARNING "cpi: no control program identification "
                       "support\n");
                sclp_unregister(&sclp_cpi_event);
index baa8fe6..45ff25e 100644 (file)
@@ -43,7 +43,7 @@ sclp_quiesce_handler(struct evbuf_header *evbuf)
 }
 
 static struct sclp_register sclp_quiesce_event = {
-       .receive_mask = EvTyp_SigQuiesce_Mask,
+       .receive_mask = EVTYP_SIGQUIESCE_MASK,
        .receiver_fn = sclp_quiesce_handler
 };
 
index 2486783..bbd5b8b 100644 (file)
@@ -30,7 +30,7 @@
 
 /* Event type structure for write message and write priority message */
 static struct sclp_register sclp_rw_event = {
-       .send_mask = EvTyp_Msg_Mask | EvTyp_PMsgCmd_Mask
+       .send_mask = EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK
 };
 
 /*
@@ -64,7 +64,7 @@ sclp_make_buffer(void *page, unsigned short columns, unsigned short htab)
        memset(sccb, 0, sizeof(struct write_sccb));
        sccb->header.length = sizeof(struct write_sccb);
        sccb->msg_buf.header.length = sizeof(struct msg_buf);
-       sccb->msg_buf.header.type = EvTyp_Msg;
+       sccb->msg_buf.header.type = EVTYP_MSG;
        sccb->msg_buf.mdb.header.length = sizeof(struct mdb);
        sccb->msg_buf.mdb.header.type = 1;
        sccb->msg_buf.mdb.header.tag = 0xD4C4C240;      /* ebcdic "MDB " */
@@ -114,7 +114,7 @@ sclp_initialize_mto(struct sclp_buffer *buffer, int max_len)
        memset(mto, 0, sizeof(struct mto));
        mto->length = sizeof(struct mto);
        mto->type = 4;  /* message text object */
-       mto->line_type_flags = LnTpFlgs_EndText; /* end text */
+       mto->line_type_flags = LNTPFLGS_ENDTEXT; /* end text */
 
        /* set pointer to first byte after struct mto. */
        buffer->current_line = (char *) (mto + 1);
@@ -215,7 +215,7 @@ sclp_write(struct sclp_buffer *buffer, const unsigned char *msg, int count)
                case '\a':      /* bell, one for several times  */
                        /* set SCLP sound alarm bit in General Object */
                        buffer->sccb->msg_buf.mdb.go.general_msg_flags |=
-                               GnrlMsgFlgs_SndAlrm;
+                               GNRLMSGFLGS_SNDALRM;
                        break;
                case '\t':      /* horizontal tabulator  */
                        /* check if new mto needs to be created */
@@ -452,12 +452,12 @@ sclp_emit_buffer(struct sclp_buffer *buffer,
                return -EIO;
 
        sccb = buffer->sccb;
-       if (sclp_rw_event.sclp_send_mask & EvTyp_Msg_Mask)
+       if (sclp_rw_event.sclp_send_mask & EVTYP_MSG_MASK)
                /* Use normal write message */
-               sccb->msg_buf.header.type = EvTyp_Msg;
-       else if (sclp_rw_event.sclp_send_mask & EvTyp_PMsgCmd_Mask)
+               sccb->msg_buf.header.type = EVTYP_MSG;
+       else if (sclp_rw_event.sclp_send_mask & EVTYP_PMSGCMD_MASK)
                /* Use write priority message */
-               sccb->msg_buf.header.type = EvTyp_PMsgCmd;
+               sccb->msg_buf.header.type = EVTYP_PMSGCMD;
        else
                return -ENOSYS;
        buffer->request.command = SCLP_CMDW_WRITE_EVENT_DATA;
diff --git a/drivers/s390/char/sclp_sdias.c b/drivers/s390/char/sclp_sdias.c
new file mode 100644 (file)
index 0000000..52283da
--- /dev/null
@@ -0,0 +1,255 @@
+/*
+ * Sclp "store data in absolut storage"
+ *
+ * Copyright IBM Corp. 2003,2007
+ * Author(s): Michael Holzheu
+ */
+
+#include <linux/sched.h>
+#include <asm/sclp.h>
+#include <asm/debug.h>
+#include <asm/ipl.h>
+#include "sclp.h"
+#include "sclp_rw.h"
+
+#define TRACE(x...) debug_sprintf_event(sdias_dbf, 1, x)
+#define ERROR_MSG(x...) printk ( KERN_ALERT "SDIAS: " x )
+
+#define SDIAS_RETRIES 300
+#define SDIAS_SLEEP_TICKS 50
+
+#define EQ_STORE_DATA  0x0
+#define EQ_SIZE                0x1
+#define DI_FCP_DUMP    0x0
+#define ASA_SIZE_32    0x0
+#define ASA_SIZE_64    0x1
+#define EVSTATE_ALL_STORED     0x0
+#define EVSTATE_NO_DATA                0x3
+#define EVSTATE_PART_STORED    0x10
+
+static struct debug_info *sdias_dbf;
+
+static struct sclp_register sclp_sdias_register = {
+       .send_mask = EVTYP_SDIAS_MASK,
+};
+
+struct sdias_evbuf {
+       struct  evbuf_header hdr;
+       u8      event_qual;
+       u8      data_id;
+       u64     reserved2;
+       u32     event_id;
+       u16     reserved3;
+       u8      asa_size;
+       u8      event_status;
+       u32     reserved4;
+       u32     blk_cnt;
+       u64     asa;
+       u32     reserved5;
+       u32     fbn;
+       u32     reserved6;
+       u32     lbn;
+       u16     reserved7;
+       u16     dbs;
+} __attribute__((packed));
+
+struct sdias_sccb {
+       struct sccb_header  hdr;
+       struct sdias_evbuf  evbuf;
+} __attribute__((packed));
+
+static struct sdias_sccb sccb __attribute__((aligned(4096)));
+
+static int sclp_req_done;
+static wait_queue_head_t sdias_wq;
+static DEFINE_MUTEX(sdias_mutex);
+
+static void sdias_callback(struct sclp_req *request, void *data)
+{
+       struct sdias_sccb *sccb;
+
+       sccb = (struct sdias_sccb *) request->sccb;
+       sclp_req_done = 1;
+       wake_up(&sdias_wq); /* Inform caller, that request is complete */
+       TRACE("callback done\n");
+}
+
+static int sdias_sclp_send(struct sclp_req *req)
+{
+       int retries;
+       int rc;
+
+       for (retries = SDIAS_RETRIES; retries; retries--) {
+               sclp_req_done = 0;
+               TRACE("add request\n");
+               rc = sclp_add_request(req);
+               if (rc) {
+                       /* not initiated, wait some time and retry */
+                       set_current_state(TASK_INTERRUPTIBLE);
+                       TRACE("add request failed: rc = %i\n",rc);
+                       schedule_timeout(SDIAS_SLEEP_TICKS);
+                       continue;
+               }
+               /* initiated, wait for completion of service call */
+               wait_event(sdias_wq, (sclp_req_done == 1));
+               if (req->status == SCLP_REQ_FAILED) {
+                       TRACE("sclp request failed\n");
+                       rc = -EIO;
+                       continue;
+               }
+               TRACE("request done\n");
+               break;
+       }
+       return rc;
+}
+
+/*
+ * Get number of blocks (4K) available in the HSA
+ */
+int sclp_sdias_blk_count(void)
+{
+       struct sclp_req request;
+       int rc;
+
+       mutex_lock(&sdias_mutex);
+
+       memset(&sccb, 0, sizeof(sccb));
+       memset(&request, 0, sizeof(request));
+
+       sccb.hdr.length = sizeof(sccb);
+       sccb.evbuf.hdr.length = sizeof(struct sdias_evbuf);
+       sccb.evbuf.hdr.type = EVTYP_SDIAS;
+       sccb.evbuf.event_qual = EQ_SIZE;
+       sccb.evbuf.data_id = DI_FCP_DUMP;
+       sccb.evbuf.event_id = 4712;
+       sccb.evbuf.dbs = 1;
+
+       request.sccb = &sccb;
+       request.command = SCLP_CMDW_WRITE_EVENT_DATA;
+       request.status = SCLP_REQ_FILLED;
+       request.callback = sdias_callback;
+
+       rc = sdias_sclp_send(&request);
+       if (rc) {
+               ERROR_MSG("sclp_send failed for get_nr_blocks\n");
+               goto out;
+       }
+       if (sccb.hdr.response_code != 0x0020) {
+               TRACE("send failed: %x\n", sccb.hdr.response_code);
+               rc = -EIO;
+               goto out;
+       }
+
+       switch (sccb.evbuf.event_status) {
+               case 0:
+                       rc = sccb.evbuf.blk_cnt;
+                       break;
+               default:
+                       ERROR_MSG("SCLP error: %x\n", sccb.evbuf.event_status);
+                       rc = -EIO;
+                       goto out;
+       }
+       TRACE("%i blocks\n", rc);
+out:
+       mutex_unlock(&sdias_mutex);
+       return rc;
+}
+
+/*
+ * Copy from HSA to absolute storage (not reentrant):
+ *
+ * @dest     : Address of buffer where data should be copied
+ * @start_blk: Start Block (beginning with 1)
+ * @nr_blks  : Number of 4K blocks to copy
+ *
+ * Return Value: 0 : Requested 'number' of blocks of data copied
+ *              <0: ERROR - negative event status
+ */
+int sclp_sdias_copy(void *dest, int start_blk, int nr_blks)
+{
+       struct sclp_req request;
+       int rc;
+
+       mutex_lock(&sdias_mutex);
+
+       memset(&sccb, 0, sizeof(sccb));
+       memset(&request, 0, sizeof(request));
+
+       sccb.hdr.length = sizeof(sccb);
+       sccb.evbuf.hdr.length = sizeof(struct sdias_evbuf);
+       sccb.evbuf.hdr.type = EVTYP_SDIAS;
+       sccb.evbuf.hdr.flags = 0;
+       sccb.evbuf.event_qual = EQ_STORE_DATA;
+       sccb.evbuf.data_id = DI_FCP_DUMP;
+       sccb.evbuf.event_id = 4712;
+#ifdef __s390x__
+       sccb.evbuf.asa_size = ASA_SIZE_64;
+#else
+       sccb.evbuf.asa_size = ASA_SIZE_32;
+#endif
+       sccb.evbuf.event_status = 0;
+       sccb.evbuf.blk_cnt = nr_blks;
+       sccb.evbuf.asa = (unsigned long)dest;
+       sccb.evbuf.fbn = start_blk;
+       sccb.evbuf.lbn = 0;
+       sccb.evbuf.dbs = 1;
+
+       request.sccb     = &sccb;
+       request.command  = SCLP_CMDW_WRITE_EVENT_DATA;
+       request.status   = SCLP_REQ_FILLED;
+       request.callback = sdias_callback;
+
+       rc = sdias_sclp_send(&request);
+       if (rc) {
+               ERROR_MSG("sclp_send failed: %x\n", rc);
+               goto out;
+       }
+       if (sccb.hdr.response_code != 0x0020) {
+               TRACE("copy failed: %x\n", sccb.hdr.response_code);
+               rc = -EIO;
+               goto out;
+       }
+
+       switch (sccb.evbuf.event_status) {
+               case EVSTATE_ALL_STORED:
+                       TRACE("all stored\n");
+               case EVSTATE_PART_STORED:
+                       TRACE("part stored: %i\n", sccb.evbuf.blk_cnt);
+                       break;
+               case EVSTATE_NO_DATA:
+                       TRACE("no data\n");
+               default:
+                       ERROR_MSG("Error from SCLP while copying hsa. "
+                                 "Event status = %x\n",
+                               sccb.evbuf.event_status);
+                       rc = -EIO;
+       }
+out:
+       mutex_unlock(&sdias_mutex);
+       return rc;
+}
+
+int __init sdias_init(void)
+{
+       int rc;
+
+       if (ipl_info.type != IPL_TYPE_FCP_DUMP)
+               return 0;
+       sdias_dbf = debug_register("dump_sdias", 4, 1, 4 * sizeof(long));
+       debug_register_view(sdias_dbf, &debug_sprintf_view);
+       debug_set_level(sdias_dbf, 6);
+       rc = sclp_register(&sclp_sdias_register);
+       if (rc) {
+               ERROR_MSG("sclp register failed\n");
+               return rc;
+       }
+       init_waitqueue_head(&sdias_wq);
+       TRACE("init done\n");
+       return 0;
+}
+
+void __exit sdias_exit(void)
+{
+       debug_unregister(sdias_dbf);
+       sclp_unregister(&sclp_sdias_register);
+}
index 076816b..e3b3d39 100644 (file)
@@ -648,7 +648,7 @@ sclp_eval_textcmd(struct gds_subvector *start,
        subvec = start;
        while (subvec < end) {
                subvec = find_gds_subvector(subvec, end,
-                                           GDS_KEY_SelfDefTextMsg);
+                                           GDS_KEY_SELFDEFTEXTMSG);
                if (!subvec)
                        break;
                sclp_eval_selfdeftextmsg((struct gds_subvector *)(subvec + 1),
@@ -664,7 +664,7 @@ sclp_eval_cpmsu(struct gds_vector *start, struct gds_vector *end)
 
        vec = start;
        while (vec < end) {
-               vec = find_gds_vector(vec, end, GDS_ID_TextCmd);
+               vec = find_gds_vector(vec, end, GDS_ID_TEXTCMD);
                if (!vec)
                        break;
                sclp_eval_textcmd((struct gds_subvector *)(vec + 1),
@@ -703,7 +703,7 @@ sclp_tty_state_change(struct sclp_register *reg)
 
 static struct sclp_register sclp_input_event =
 {
-       .receive_mask = EvTyp_OpCmd_Mask | EvTyp_PMsgCmd_Mask,
+       .receive_mask = EVTYP_OPCMD_MASK | EVTYP_PMSGCMD_MASK,
        .state_change_fn = sclp_tty_state_change,
        .receiver_fn = sclp_tty_receiver
 };
index f77dc33..7263347 100644 (file)
@@ -99,8 +99,8 @@ static void sclp_vt220_emit_current(void);
 
 /* Registration structure for our interest in SCLP event buffers */
 static struct sclp_register sclp_vt220_register = {
-       .send_mask              = EvTyp_VT220Msg_Mask,
-       .receive_mask           = EvTyp_VT220Msg_Mask,
+       .send_mask              = EVTYP_VT220MSG_MASK,
+       .receive_mask           = EVTYP_VT220MSG_MASK,
        .state_change_fn        = NULL,
        .receiver_fn            = sclp_vt220_receiver_fn
 };
@@ -202,7 +202,7 @@ sclp_vt220_callback(struct sclp_req *request, void *data)
 static int
 __sclp_vt220_emit(struct sclp_vt220_request *request)
 {
-       if (!(sclp_vt220_register.sclp_send_mask & EvTyp_VT220Msg_Mask)) {
+       if (!(sclp_vt220_register.sclp_send_mask & EVTYP_VT220MSG_MASK)) {
                request->sclp_req.status = SCLP_REQ_FAILED;
                return -EIO;
        }
@@ -284,7 +284,7 @@ sclp_vt220_initialize_page(void *page)
        sccb->header.length = sizeof(struct sclp_vt220_sccb);
        sccb->header.function_code = SCLP_NORMAL_WRITE;
        sccb->header.response_code = 0x0000;
-       sccb->evbuf.type = EvTyp_VT220Msg;
+       sccb->evbuf.type = EVTYP_VT220MSG;
        sccb->evbuf.length = sizeof(struct evbuf_header);
 
        return request;
index b87d3b0..a5a00e9 100644 (file)
@@ -125,7 +125,7 @@ static struct vmlogrdr_priv_t sys_ser[] = {
          .recording_name = "EREP",
          .minor_num      = 0,
          .buffer_free    = 1,
-         .priv_lock      = SPIN_LOCK_UNLOCKED,
+         .priv_lock      = __SPIN_LOCK_UNLOCKED(sys_ser[0].priv_lock),
          .autorecording  = 1,
          .autopurge      = 1,
        },
@@ -134,7 +134,7 @@ static struct vmlogrdr_priv_t sys_ser[] = {
          .recording_name = "ACCOUNT",
          .minor_num      = 1,
          .buffer_free    = 1,
-         .priv_lock      = SPIN_LOCK_UNLOCKED,
+         .priv_lock      = __SPIN_LOCK_UNLOCKED(sys_ser[1].priv_lock),
          .autorecording  = 1,
          .autopurge      = 1,
        },
@@ -143,7 +143,7 @@ static struct vmlogrdr_priv_t sys_ser[] = {
          .recording_name = "SYMPTOM",
          .minor_num      = 2,
          .buffer_free    = 1,
-         .priv_lock      = SPIN_LOCK_UNLOCKED,
+         .priv_lock      = __SPIN_LOCK_UNLOCKED(sys_ser[2].priv_lock),
          .autorecording  = 1,
          .autopurge      = 1,
        }
@@ -385,6 +385,9 @@ static int vmlogrdr_release (struct inode *inode, struct file *filp)
 
        struct vmlogrdr_priv_t * logptr = filp->private_data;
 
+       iucv_path_sever(logptr->path, NULL);
+       kfree(logptr->path);
+       logptr->path = NULL;
        if (logptr->autorecording) {
                ret = vmlogrdr_recording(logptr,0,logptr->autopurge);
                if (ret)
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
new file mode 100644 (file)
index 0000000..89d4393
--- /dev/null
@@ -0,0 +1,651 @@
+/*
+ * zcore module to export memory content and register sets for creating system
+ * dumps on SCSI disks (zfcpdump). The "zcore/mem" debugfs file shows the same
+ * dump format as s390 standalone dumps.
+ *
+ * For more information please refer to Documentation/s390/zfcpdump.txt
+ *
+ * Copyright IBM Corp. 2003,2007
+ * Author(s): Michael Holzheu
+ */
+
+#include <linux/init.h>
+#include <linux/miscdevice.h>
+#include <linux/utsname.h>
+#include <linux/debugfs.h>
+#include <asm/ipl.h>
+#include <asm/sclp.h>
+#include <asm/setup.h>
+#include <asm/sigp.h>
+#include <asm/uaccess.h>
+#include <asm/debug.h>
+#include <asm/processor.h>
+#include <asm/irqflags.h>
+
+#define TRACE(x...) debug_sprintf_event(zcore_dbf, 1, x)
+#define MSG(x...) printk( KERN_ALERT x )
+#define ERROR_MSG(x...) printk ( KERN_ALERT "DUMP: " x )
+
+#define TO_USER                0
+#define TO_KERNEL      1
+
+enum arch_id {
+       ARCH_S390       = 0,
+       ARCH_S390X      = 1,
+};
+
+/* dump system info */
+
+struct sys_info {
+       enum arch_id    arch;
+       unsigned long   sa_base;
+       u32             sa_size;
+       int             cpu_map[NR_CPUS];
+       unsigned long   mem_size;
+       union save_area lc_mask;
+};
+
+static struct sys_info sys_info;
+static struct debug_info *zcore_dbf;
+static int hsa_available;
+static struct dentry *zcore_dir;
+static struct dentry *zcore_file;
+
+/*
+ * Copy memory from HSA to kernel or user memory (not reentrant):
+ *
+ * @dest:  Kernel or user buffer where memory should be copied to
+ * @src:   Start address within HSA where data should be copied
+ * @count: Size of buffer, which should be copied
+ * @mode:  Either TO_KERNEL or TO_USER
+ */
+static int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode)
+{
+       int offs, blk_num;
+       static char buf[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
+
+       if (count == 0)
+               return 0;
+
+       /* copy first block */
+       offs = 0;
+       if ((src % PAGE_SIZE) != 0) {
+               blk_num = src / PAGE_SIZE + 2;
+               if (sclp_sdias_copy(buf, blk_num, 1)) {
+                       TRACE("sclp_sdias_copy() failed\n");
+                       return -EIO;
+               }
+               offs = min((PAGE_SIZE - (src % PAGE_SIZE)), count);
+               if (mode == TO_USER) {
+                       if (copy_to_user((__force __user void*) dest,
+                                        buf + (src % PAGE_SIZE), offs))
+                               return -EFAULT;
+               } else
+                       memcpy(dest, buf + (src % PAGE_SIZE), offs);
+       }
+       if (offs == count)
+               goto out;
+
+       /* copy middle */
+       for (; (offs + PAGE_SIZE) <= count; offs += PAGE_SIZE) {
+               blk_num = (src + offs) / PAGE_SIZE + 2;
+               if (sclp_sdias_copy(buf, blk_num, 1)) {
+                       TRACE("sclp_sdias_copy() failed\n");
+                       return -EIO;
+               }
+               if (mode == TO_USER) {
+                       if (copy_to_user((__force __user void*) dest + offs,
+                                        buf, PAGE_SIZE))
+                               return -EFAULT;
+               } else
+                       memcpy(dest + offs, buf, PAGE_SIZE);
+       }
+       if (offs == count)
+               goto out;
+
+       /* copy last block */
+       blk_num = (src + offs) / PAGE_SIZE + 2;
+       if (sclp_sdias_copy(buf, blk_num, 1)) {
+               TRACE("sclp_sdias_copy() failed\n");
+               return -EIO;
+       }
+       if (mode == TO_USER) {
+               if (copy_to_user((__force __user void*) dest + offs, buf,
+                                PAGE_SIZE))
+                       return -EFAULT;
+       } else
+               memcpy(dest + offs, buf, count - offs);
+out:
+       return 0;
+}
+
+static int memcpy_hsa_user(void __user *dest, unsigned long src, size_t count)
+{
+       return memcpy_hsa((void __force *) dest, src, count, TO_USER);
+}
+
+static int memcpy_hsa_kernel(void *dest, unsigned long src, size_t count)
+{
+       return memcpy_hsa(dest, src, count, TO_KERNEL);
+}
+
+static int memcpy_real(void *dest, unsigned long src, size_t count)
+{
+       unsigned long flags;
+       int rc = -EFAULT;
+       register unsigned long _dest asm("2") = (unsigned long) dest;
+       register unsigned long _len1 asm("3") = (unsigned long) count;
+       register unsigned long _src  asm("4") = src;
+       register unsigned long _len2 asm("5") = (unsigned long) count;
+
+       if (count == 0)
+               return 0;
+       flags = __raw_local_irq_stnsm(0xf8); /* switch to real mode */
+       asm volatile (
+               "0:     mvcle   %1,%2,0x0\n"
+               "1:     jo      0b\n"
+               "       lhi     %0,0x0\n"
+               "2:\n"
+               EX_TABLE(1b,2b)
+               : "+d" (rc)
+               : "d" (_dest), "d" (_src), "d" (_len1), "d" (_len2)
+               : "cc", "memory");
+       __raw_local_irq_ssm(flags);
+
+       return rc;
+}
+
+static int memcpy_real_user(__user void *dest, unsigned long src, size_t count)
+{
+       static char buf[4096];
+       int offs = 0, size;
+
+       while (offs < count) {
+               size = min(sizeof(buf), count - offs);
+               if (memcpy_real(buf, src + offs, size))
+                       return -EFAULT;
+               if (copy_to_user(dest + offs, buf, size))
+                       return -EFAULT;
+               offs += size;
+       }
+       return 0;
+}
+
+#ifdef __s390x__
+/*
+ * Convert s390x (64 bit) cpu info to s390 (32 bit) cpu info
+ */
+static void __init s390x_to_s390_regs(union save_area *out, union save_area *in,
+                                     int cpu)
+{
+       int i;
+
+       for (i = 0; i < 16; i++) {
+               out->s390.gp_regs[i] = in->s390x.gp_regs[i] & 0x00000000ffffffff;
+               out->s390.acc_regs[i] = in->s390x.acc_regs[i];
+               out->s390.ctrl_regs[i] =
+                       in->s390x.ctrl_regs[i] & 0x00000000ffffffff;
+       }
+       /* locore for 31 bit has only space for fpregs 0,2,4,6 */
+       out->s390.fp_regs[0] = in->s390x.fp_regs[0];
+       out->s390.fp_regs[1] = in->s390x.fp_regs[2];
+       out->s390.fp_regs[2] = in->s390x.fp_regs[4];
+       out->s390.fp_regs[3] = in->s390x.fp_regs[6];
+       memcpy(&(out->s390.psw[0]), &(in->s390x.psw[0]), 4);
+       out->s390.psw[1] |= 0x8; /* set bit 12 */
+       memcpy(&(out->s390.psw[4]),&(in->s390x.psw[12]), 4);
+       out->s390.psw[4] |= 0x80; /* set (31bit) addressing bit */
+       out->s390.pref_reg = in->s390x.pref_reg;
+       out->s390.timer = in->s390x.timer;
+       out->s390.clk_cmp = in->s390x.clk_cmp;
+}
+
+static void __init s390x_to_s390_save_areas(void)
+{
+       int i = 1;
+       static union save_area tmp;
+
+       while (zfcpdump_save_areas[i]) {
+               s390x_to_s390_regs(&tmp, zfcpdump_save_areas[i], i);
+               memcpy(zfcpdump_save_areas[i], &tmp, sizeof(tmp));
+               i++;
+       }
+}
+
+#endif /* __s390x__ */
+
+static int __init init_cpu_info(enum arch_id arch)
+{
+       union save_area *sa;
+
+       /* get info for boot cpu from lowcore, stored in the HSA */
+
+       sa = kmalloc(sizeof(*sa), GFP_KERNEL);
+       if (!sa) {
+               ERROR_MSG("kmalloc failed: %s: %i\n",__FUNCTION__, __LINE__);
+               return -ENOMEM;
+       }
+       if (memcpy_hsa_kernel(sa, sys_info.sa_base, sys_info.sa_size) < 0) {
+               ERROR_MSG("could not copy from HSA\n");
+               kfree(sa);
+               return -EIO;
+       }
+       zfcpdump_save_areas[0] = sa;
+
+#ifdef __s390x__
+       /* convert s390x regs to s390, if we are dumping an s390 Linux */
+
+       if (arch == ARCH_S390)
+               s390x_to_s390_save_areas();
+#endif
+
+       return 0;
+}
+
+static DEFINE_MUTEX(zcore_mutex);
+
+#define DUMP_VERSION   0x3
+#define DUMP_MAGIC     0xa8190173618f23fdULL
+#define DUMP_ARCH_S390X        2
+#define DUMP_ARCH_S390 1
+#define HEADER_SIZE    4096
+
+/* dump header dumped according to s390 crash dump format */
+
+struct zcore_header {
+       u64 magic;
+       u32 version;
+       u32 header_size;
+       u32 dump_level;
+       u32 page_size;
+       u64 mem_size;
+       u64 mem_start;
+       u64 mem_end;
+       u32 num_pages;
+       u32 pad1;
+       u64 tod;
+       cpuid_t cpu_id;
+       u32 arch_id;
+       u32 build_arch;
+       char pad2[4016];
+} __attribute__((packed,__aligned__(16)));
+
+static struct zcore_header zcore_header = {
+       .magic          = DUMP_MAGIC,
+       .version        = DUMP_VERSION,
+       .header_size    = 4096,
+       .dump_level     = 0,
+       .page_size      = PAGE_SIZE,
+       .mem_start      = 0,
+#ifdef __s390x__
+       .build_arch     = DUMP_ARCH_S390X,
+#else
+       .build_arch     = DUMP_ARCH_S390,
+#endif
+};
+
+/*
+ * Copy lowcore info to buffer. Use map in order to copy only register parts.
+ *
+ * @buf:    User buffer
+ * @sa:     Pointer to save area
+ * @sa_off: Offset in save area to copy
+ * @len:    Number of bytes to copy
+ */
+static int copy_lc(void __user *buf, void *sa, int sa_off, int len)
+{
+       int i;
+       char *lc_mask = (char*)&sys_info.lc_mask;
+
+       for (i = 0; i < len; i++) {
+               if (!lc_mask[i + sa_off])
+                       continue;
+               if (copy_to_user(buf + i, sa + sa_off + i, 1))
+                       return -EFAULT;
+       }
+       return 0;
+}
+
+/*
+ * Copy lowcores info to memory, if necessary
+ *
+ * @buf:   User buffer
+ * @addr:  Start address of buffer in dump memory
+ * @count: Size of buffer
+ */
+static int zcore_add_lc(char __user *buf, unsigned long start, size_t count)
+{
+       unsigned long end;
+       int i = 0;
+
+       if (count == 0)
+               return 0;
+
+       end = start + count;
+       while (zfcpdump_save_areas[i]) {
+               unsigned long cp_start, cp_end; /* copy range */
+               unsigned long sa_start, sa_end; /* save area range */
+               unsigned long prefix;
+               unsigned long sa_off, len, buf_off;
+
+               if (sys_info.arch == ARCH_S390)
+                       prefix = zfcpdump_save_areas[i]->s390.pref_reg;
+               else
+                       prefix = zfcpdump_save_areas[i]->s390x.pref_reg;
+
+               sa_start = prefix + sys_info.sa_base;
+               sa_end = prefix + sys_info.sa_base + sys_info.sa_size;
+
+               if ((end < sa_start) || (start > sa_end))
+                       goto next;
+               cp_start = max(start, sa_start);
+               cp_end = min(end, sa_end);
+
+               buf_off = cp_start - start;
+               sa_off = cp_start - sa_start;
+               len = cp_end - cp_start;
+
+               TRACE("copy_lc for: %lx\n", start);
+               if (copy_lc(buf + buf_off, zfcpdump_save_areas[i], sa_off, len))
+                       return -EFAULT;
+next:
+               i++;
+       }
+       return 0;
+}
+
+/*
+ * Read routine for zcore character device
+ * First 4K are dump header
+ * Next 32MB are HSA Memory
+ * Rest is read from absolute Memory
+ */
+static ssize_t zcore_read(struct file *file, char __user *buf, size_t count,
+                         loff_t *ppos)
+{
+       unsigned long mem_start; /* Start address in memory */
+       size_t mem_offs;         /* Offset in dump memory */
+       size_t hdr_count;        /* Size of header part of output buffer */
+       size_t size;
+       int rc;
+
+       mutex_lock(&zcore_mutex);
+
+       if (*ppos > (sys_info.mem_size + HEADER_SIZE)) {
+               rc = -EINVAL;
+               goto fail;
+       }
+
+       count = min(count, (size_t) (sys_info.mem_size + HEADER_SIZE - *ppos));
+
+       /* Copy dump header */
+       if (*ppos < HEADER_SIZE) {
+               size = min(count, (size_t) (HEADER_SIZE - *ppos));
+               if (copy_to_user(buf, &zcore_header + *ppos, size)) {
+                       rc = -EFAULT;
+                       goto fail;
+               }
+               hdr_count = size;
+               mem_start = 0;
+       } else {
+               hdr_count = 0;
+               mem_start = *ppos - HEADER_SIZE;
+       }
+
+       mem_offs = 0;
+
+       /* Copy from HSA data */
+       if (*ppos < (ZFCPDUMP_HSA_SIZE + HEADER_SIZE)) {
+               size = min((count - hdr_count), (size_t) (ZFCPDUMP_HSA_SIZE
+                          - mem_start));
+               rc = memcpy_hsa_user(buf + hdr_count, mem_start, size);
+               if (rc)
+                       goto fail;
+
+               mem_offs += size;
+       }
+
+       /* Copy from real mem */
+       size = count - mem_offs - hdr_count;
+       rc = memcpy_real_user(buf + hdr_count + mem_offs, mem_start + mem_offs,
+                             size);
+       if (rc)
+               goto fail;
+
+       /*
+        * Since s390 dump analysis tools like lcrash or crash
+        * expect register sets in the prefix pages of the cpus,
+        * we copy them into the read buffer, if necessary.
+        * buf + hdr_count: Start of memory part of output buffer
+        * mem_start: Start memory address to copy from
+        * count - hdr_count: Size of memory area to copy
+        */
+       if (zcore_add_lc(buf + hdr_count, mem_start, count - hdr_count)) {
+               rc = -EFAULT;
+               goto fail;
+       }
+       *ppos += count;
+fail:
+       mutex_unlock(&zcore_mutex);
+       return (rc < 0) ? rc : count;
+}
+
+static int zcore_open(struct inode *inode, struct file *filp)
+{
+       if (!hsa_available)
+               return -ENODATA;
+       else
+               return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
+}
+
+static int zcore_release(struct inode *inode, struct file *filep)
+{
+       diag308(DIAG308_REL_HSA, NULL);
+       hsa_available = 0;
+       return 0;
+}
+
+static loff_t zcore_lseek(struct file *file, loff_t offset, int orig)
+{
+       loff_t rc;
+
+       mutex_lock(&zcore_mutex);
+       switch (orig) {
+       case 0:
+               file->f_pos = offset;
+               rc = file->f_pos;
+               break;
+       case 1:
+               file->f_pos += offset;
+               rc = file->f_pos;
+               break;
+       default:
+               rc = -EINVAL;
+       }
+       mutex_unlock(&zcore_mutex);
+       return rc;
+}
+
+static struct file_operations zcore_fops = {
+       .owner          = THIS_MODULE,
+       .llseek         = zcore_lseek,
+       .read           = zcore_read,
+       .open           = zcore_open,
+       .release        = zcore_release,
+};
+
+
+static void __init set_s390_lc_mask(union save_area *map)
+{
+       memset(&map->s390.ext_save, 0xff, sizeof(map->s390.ext_save));
+       memset(&map->s390.timer, 0xff, sizeof(map->s390.timer));
+       memset(&map->s390.clk_cmp, 0xff, sizeof(map->s390.clk_cmp));
+       memset(&map->s390.psw, 0xff, sizeof(map->s390.psw));
+       memset(&map->s390.pref_reg, 0xff, sizeof(map->s390.pref_reg));
+       memset(&map->s390.acc_regs, 0xff, sizeof(map->s390.acc_regs));
+       memset(&map->s390.fp_regs, 0xff, sizeof(map->s390.fp_regs));
+       memset(&map->s390.gp_regs, 0xff, sizeof(map->s390.gp_regs));
+       memset(&map->s390.ctrl_regs, 0xff, sizeof(map->s390.ctrl_regs));
+}
+
+static void __init set_s390x_lc_mask(union save_area *map)
+{
+       memset(&map->s390x.fp_regs, 0xff, sizeof(map->s390x.fp_regs));
+       memset(&map->s390x.gp_regs, 0xff, sizeof(map->s390x.gp_regs));
+       memset(&map->s390x.psw, 0xff, sizeof(map->s390x.psw));
+       memset(&map->s390x.pref_reg, 0xff, sizeof(map->s390x.pref_reg));
+       memset(&map->s390x.fp_ctrl_reg, 0xff, sizeof(map->s390x.fp_ctrl_reg));
+       memset(&map->s390x.tod_reg, 0xff, sizeof(map->s390x.tod_reg));
+       memset(&map->s390x.timer, 0xff, sizeof(map->s390x.timer));
+       memset(&map->s390x.clk_cmp, 0xff, sizeof(map->s390x.clk_cmp));
+       memset(&map->s390x.acc_regs, 0xff, sizeof(map->s390x.acc_regs));
+       memset(&map->s390x.ctrl_regs, 0xff, sizeof(map->s390x.ctrl_regs));
+}
+
+/*
+ * Initialize dump globals for a given architecture
+ */
+static int __init sys_info_init(enum arch_id arch)
+{
+       switch (arch) {
+       case ARCH_S390X:
+               MSG("DETECTED 'S390X (64 bit) OS'\n");
+               sys_info.sa_base = SAVE_AREA_BASE_S390X;
+               sys_info.sa_size = sizeof(struct save_area_s390x);
+               set_s390x_lc_mask(&sys_info.lc_mask);
+               break;
+       case ARCH_S390:
+               MSG("DETECTED 'S390 (32 bit) OS'\n");
+               sys_info.sa_base = SAVE_AREA_BASE_S390;
+               sys_info.sa_size = sizeof(struct save_area_s390);
+               set_s390_lc_mask(&sys_info.lc_mask);
+               break;
+       default:
+               ERROR_MSG("unknown architecture 0x%x.\n",arch);
+               return -EINVAL;
+       }
+       sys_info.arch = arch;
+       if (init_cpu_info(arch)) {
+               ERROR_MSG("get cpu info failed\n");
+               return -ENOMEM;
+       }
+       sys_info.mem_size = real_memory_size;
+
+       return 0;
+}
+
+static int __init check_sdias(void)
+{
+       int rc, act_hsa_size;
+
+       rc = sclp_sdias_blk_count();
+       if (rc < 0) {
+               ERROR_MSG("Could not determine HSA size\n");
+               return rc;
+       }
+       act_hsa_size = (rc - 1) * PAGE_SIZE;
+       if (act_hsa_size < ZFCPDUMP_HSA_SIZE) {
+               ERROR_MSG("HSA size too small: %i\n", act_hsa_size);
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static void __init zcore_header_init(int arch, struct zcore_header *hdr)
+{
+       if (arch == ARCH_S390X)
+               hdr->arch_id = DUMP_ARCH_S390X;
+       else
+               hdr->arch_id = DUMP_ARCH_S390;
+       hdr->mem_size = sys_info.mem_size;
+       hdr->mem_end = sys_info.mem_size;
+       hdr->num_pages = sys_info.mem_size / PAGE_SIZE;
+       hdr->tod = get_clock();
+       get_cpu_id(&hdr->cpu_id);
+}
+
+extern int sdias_init(void);
+
+static int __init zcore_init(void)
+{
+       unsigned char arch;
+       int rc;
+
+       if (ipl_info.type != IPL_TYPE_FCP_DUMP)
+               return -ENODATA;
+
+       zcore_dbf = debug_register("zcore", 4, 1, 4 * sizeof(long));
+       debug_register_view(zcore_dbf, &debug_sprintf_view);
+       debug_set_level(zcore_dbf, 6);
+
+       TRACE("devno:  %x\n", ipl_info.data.fcp.dev_id.devno);
+       TRACE("wwpn:   %llx\n", (unsigned long long) ipl_info.data.fcp.wwpn);
+       TRACE("lun:    %llx\n", (unsigned long long) ipl_info.data.fcp.lun);
+
+       rc = sdias_init();
+       if (rc)
+               goto fail;
+
+       rc = check_sdias();
+       if (rc) {
+               ERROR_MSG("Dump initialization failed\n");
+               goto fail;
+       }
+
+       rc = memcpy_hsa_kernel(&arch, __LC_AR_MODE_ID, 1);
+       if (rc) {
+               ERROR_MSG("sdial memcpy for arch id failed\n");
+               goto fail;
+       }
+
+#ifndef __s390x__
+       if (arch == ARCH_S390X) {
+               ERROR_MSG("32 bit dumper can't dump 64 bit system!\n");
+               rc = -EINVAL;
+               goto fail;
+       }
+#endif
+
+       rc = sys_info_init(arch);
+       if (rc) {
+               ERROR_MSG("arch init failed\n");
+               goto fail;
+       }
+
+       zcore_header_init(arch, &zcore_header);
+
+       zcore_dir = debugfs_create_dir("zcore" , NULL);
+       if (!zcore_dir) {
+               rc = -ENOMEM;
+               goto fail;
+       }
+       zcore_file = debugfs_create_file("mem", S_IRUSR, zcore_dir, NULL,
+                                        &zcore_fops);
+       if (!zcore_file) {
+               debugfs_remove(zcore_dir);
+               rc = -ENOMEM;
+               goto fail;
+       }
+       hsa_available = 1;
+       return 0;
+
+fail:
+       diag308(DIAG308_REL_HSA, NULL);
+       return rc;
+}
+
+extern void sdias_exit(void);
+
+static void __exit zcore_exit(void)
+{
+       debug_unregister(zcore_dbf);
+       sdias_exit();
+       diag308(DIAG308_REL_HSA, NULL);
+}
+
+MODULE_AUTHOR("Copyright IBM Corp. 2003,2007");
+MODULE_DESCRIPTION("zcore module for zfcpdump support");
+MODULE_LICENSE("GPL");
+
+subsys_initcall(zcore_init);
+module_exit(zcore_exit);
index c490c2a..cfaf77b 100644 (file)
@@ -2,7 +2,7 @@
 # Makefile for the S/390 common i/o drivers
 #
 
-obj-y += airq.o blacklist.o chsc.o cio.o css.o
+obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o
 ccw_device-objs += device.o device_fsm.o device_ops.o
 ccw_device-objs += device_id.o device_pgid.o device_status.o
 obj-y += ccw_device.o cmf.o
index 5aeb68e..e5ccda6 100644 (file)
@@ -75,8 +75,10 @@ static void ccwgroup_ungroup_callback(struct device *dev)
 {
        struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
 
+       mutex_lock(&gdev->reg_mutex);
        __ccwgroup_remove_symlinks(gdev);
        device_unregister(dev);
+       mutex_unlock(&gdev->reg_mutex);
 }
 
 static ssize_t
@@ -173,7 +175,8 @@ ccwgroup_create(struct device *root,
                return -ENOMEM;
 
        atomic_set(&gdev->onoff, 0);
-
+       mutex_init(&gdev->reg_mutex);
+       mutex_lock(&gdev->reg_mutex);
        for (i = 0; i < argc; i++) {
                gdev->cdev[i] = get_ccwdev_by_busid(cdrv, argv[i]);
 
@@ -183,12 +186,12 @@ ccwgroup_create(struct device *root,
                    || gdev->cdev[i]->id.driver_info !=
                    gdev->cdev[0]->id.driver_info) {
                        rc = -EINVAL;
-                       goto free_dev;
+                       goto error;
                }
                /* Don't allow a device to belong to more than one group. */
                if (gdev->cdev[i]->dev.driver_data) {
                        rc = -EINVAL;
-                       goto free_dev;
+                       goto error;
                }
                gdev->cdev[i]->dev.driver_data = gdev;
        }
@@ -203,9 +206,8 @@ ccwgroup_create(struct device *root,
                        gdev->cdev[0]->dev.bus_id);
 
        rc = device_register(&gdev->dev);
-       
        if (rc)
-               goto free_dev;
+               goto error;
        get_device(&gdev->dev);
        rc = device_create_file(&gdev->dev, &dev_attr_ungroup);
 
@@ -216,27 +218,21 @@ ccwgroup_create(struct device *root,
 
        rc = __ccwgroup_create_symlinks(gdev);
        if (!rc) {
+               mutex_unlock(&gdev->reg_mutex);
                put_device(&gdev->dev);
                return 0;
        }
        device_remove_file(&gdev->dev, &dev_attr_ungroup);
        device_unregister(&gdev->dev);
 error:
-       for (i = 0; i < argc; i++)
-               if (gdev->cdev[i]) {
-                       put_device(&gdev->cdev[i]->dev);
-                       gdev->cdev[i]->dev.driver_data = NULL;
-               }
-       put_device(&gdev->dev);
-       return rc;
-free_dev:
        for (i = 0; i < argc; i++)
                if (gdev->cdev[i]) {
                        if (gdev->cdev[i]->dev.driver_data == gdev)
                                gdev->cdev[i]->dev.driver_data = NULL;
                        put_device(&gdev->cdev[i]->dev);
                }
-       kfree(gdev);
+       mutex_unlock(&gdev->reg_mutex);
+       put_device(&gdev->dev);
        return rc;
 }
 
@@ -422,8 +418,12 @@ ccwgroup_driver_unregister (struct ccwgroup_driver *cdriver)
        get_driver(&cdriver->driver);
        while ((dev = driver_find_device(&cdriver->driver, NULL, NULL,
                                         __ccwgroup_match_all))) {
-               __ccwgroup_remove_symlinks(to_ccwgroupdev(dev));
+               struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
+
+               mutex_lock(&gdev->reg_mutex);
+               __ccwgroup_remove_symlinks(gdev);
                device_unregister(dev);
+               mutex_unlock(&gdev->reg_mutex);
                put_device(dev);
        }
        put_driver(&cdriver->driver);
@@ -444,8 +444,10 @@ __ccwgroup_get_gdev_by_cdev(struct ccw_device *cdev)
        if (cdev->dev.driver_data) {
                gdev = (struct ccwgroup_device *)cdev->dev.driver_data;
                if (get_device(&gdev->dev)) {
+                       mutex_lock(&gdev->reg_mutex);
                        if (device_is_registered(&gdev->dev))
                                return gdev;
+                       mutex_unlock(&gdev->reg_mutex);
                        put_device(&gdev->dev);
                }
                return NULL;
@@ -465,6 +467,7 @@ ccwgroup_remove_ccwdev(struct ccw_device *cdev)
        if (gdev) {
                __ccwgroup_remove_symlinks(gdev);
                device_unregister(&gdev->dev);
+               mutex_unlock(&gdev->reg_mutex);
                put_device(&gdev->dev);
        }
 }
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
new file mode 100644 (file)
index 0000000..ac289e6
--- /dev/null
@@ -0,0 +1,683 @@
+/*
+ *  drivers/s390/cio/chp.c
+ *
+ *    Copyright IBM Corp. 1999,2007
+ *    Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
+ *              Arnd Bergmann (arndb@de.ibm.com)
+ *              Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
+ */
+
+#include <linux/bug.h>
+#include <linux/workqueue.h>
+#include <linux/spinlock.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <linux/wait.h>
+#include <linux/mutex.h>
+#include <asm/errno.h>
+#include <asm/chpid.h>
+#include <asm/sclp.h>
+
+#include "cio.h"
+#include "css.h"
+#include "ioasm.h"
+#include "cio_debug.h"
+#include "chp.h"
+
+#define to_channelpath(device) container_of(device, struct channel_path, dev)
+#define CHP_INFO_UPDATE_INTERVAL       1*HZ
+
+enum cfg_task_t {
+       cfg_none,
+       cfg_configure,
+       cfg_deconfigure
+};
+
+/* Map for pending configure tasks. */
+static enum cfg_task_t chp_cfg_task[__MAX_CSSID + 1][__MAX_CHPID + 1];
+static DEFINE_MUTEX(cfg_lock);
+static int cfg_busy;
+
+/* Map for channel-path status. */
+static struct sclp_chp_info chp_info;
+static DEFINE_MUTEX(info_lock);
+
+/* Time after which channel-path status may be outdated. */
+static unsigned long chp_info_expires;
+
+/* Workqueue to perform pending configure tasks. */
+static struct workqueue_struct *chp_wq;
+static struct work_struct cfg_work;
+
+/* Wait queue for configure completion events. */
+static wait_queue_head_t cfg_wait_queue;
+
+/* Return channel_path struct for given chpid. */
+static inline struct channel_path *chpid_to_chp(struct chp_id chpid)
+{
+       return css[chpid.cssid]->chps[chpid.id];
+}
+
+/* Set vary state for given chpid. */
+static void set_chp_logically_online(struct chp_id chpid, int onoff)
+{
+       chpid_to_chp(chpid)->state = onoff;
+}
+
+/* On succes return 0 if channel-path is varied offline, 1 if it is varied
+ * online. Return -ENODEV if channel-path is not registered. */
+int chp_get_status(struct chp_id chpid)
+{
+       return (chpid_to_chp(chpid) ? chpid_to_chp(chpid)->state : -ENODEV);
+}
+
+/**
+ * chp_get_sch_opm - return opm for subchannel
+ * @sch: subchannel
+ *
+ * Calculate and return the operational path mask (opm) based on the chpids
+ * used by the subchannel and the status of the associated channel-paths.
+ */
+u8 chp_get_sch_opm(struct subchannel *sch)
+{
+       struct chp_id chpid;
+       int opm;
+       int i;
+
+       opm = 0;
+       chp_id_init(&chpid);
+       for (i=0; i < 8; i++) {
+               opm <<= 1;
+               chpid.id = sch->schib.pmcw.chpid[i];
+               if (chp_get_status(chpid) != 0)
+                       opm |= 1;
+       }
+       return opm;
+}
+
+/**
+ * chp_is_registered - check if a channel-path is registered
+ * @chpid: channel-path ID
+ *
+ * Return non-zero if a channel-path with the given chpid is registered,
+ * zero otherwise.
+ */
+int chp_is_registered(struct chp_id chpid)
+{
+       return chpid_to_chp(chpid) != NULL;
+}
+
+/*
+ * Function: s390_vary_chpid
+ * Varies the specified chpid online or offline
+ */
+static int s390_vary_chpid(struct chp_id chpid, int on)
+{
+       char dbf_text[15];
+       int status;
+
+       sprintf(dbf_text, on?"varyon%x.%02x":"varyoff%x.%02x", chpid.cssid,
+               chpid.id);
+       CIO_TRACE_EVENT( 2, dbf_text);
+
+       status = chp_get_status(chpid);
+       if (status < 0) {
+               printk(KERN_ERR "Can't vary unknown chpid %x.%02x\n",
+                      chpid.cssid, chpid.id);
+               return -EINVAL;
+       }
+
+       if (!on && !status) {
+               printk(KERN_ERR "chpid %x.%02x is already offline\n",
+                      chpid.cssid, chpid.id);
+               return -EINVAL;
+       }
+
+       set_chp_logically_online(chpid, on);
+       chsc_chp_vary(chpid, on);
+       return 0;
+}
+
+/*
+ * Channel measurement related functions
+ */
+static ssize_t chp_measurement_chars_read(struct kobject *kobj, char *buf,
+                                         loff_t off, size_t count)
+{
+       struct channel_path *chp;
+       unsigned int size;
+
+       chp = to_channelpath(container_of(kobj, struct device, kobj));
+       if (!chp->cmg_chars)
+               return 0;
+
+       size = sizeof(struct cmg_chars);
+
+       if (off > size)
+               return 0;
+       if (off + count > size)
+               count = size - off;
+       memcpy(buf, chp->cmg_chars + off, count);
+       return count;
+}
+
+static struct bin_attribute chp_measurement_chars_attr = {
+       .attr = {
+               .name = "measurement_chars",
+               .mode = S_IRUSR,
+               .owner = THIS_MODULE,
+       },
+       .size = sizeof(struct cmg_chars),
+       .read = chp_measurement_chars_read,
+};
+
+static void chp_measurement_copy_block(struct cmg_entry *buf,
+                                      struct channel_subsystem *css,
+                                      struct chp_id chpid)
+{
+       void *area;
+       struct cmg_entry *entry, reference_buf;
+       int idx;
+
+       if (chpid.id < 128) {
+               area = css->cub_addr1;
+               idx = chpid.id;
+       } else {
+               area = css->cub_addr2;
+               idx = chpid.id - 128;
+       }
+       entry = area + (idx * sizeof(struct cmg_entry));
+       do {
+               memcpy(buf, entry, sizeof(*entry));
+               memcpy(&reference_buf, entry, sizeof(*entry));
+       } while (reference_buf.values[0] != buf->values[0]);
+}
+
+static ssize_t chp_measurement_read(struct kobject *kobj, char *buf,
+                                   loff_t off, size_t count)
+{
+       struct channel_path *chp;
+       struct channel_subsystem *css;
+       unsigned int size;
+
+       chp = to_channelpath(container_of(kobj, struct device, kobj));
+       css = to_css(chp->dev.parent);
+
+       size = sizeof(struct cmg_entry);
+
+       /* Only allow single reads. */
+       if (off || count < size)
+               return 0;
+       chp_measurement_copy_block((struct cmg_entry *)buf, css, chp->chpid);
+       count = size;
+       return count;
+}
+
+static struct bin_attribute chp_measurement_attr = {
+       .attr = {
+               .name = "measurement",
+               .mode = S_IRUSR,
+               .owner = THIS_MODULE,
+       },
+       .size = sizeof(struct cmg_entry),
+       .read = chp_measurement_read,
+};
+
+void chp_remove_cmg_attr(struct channel_path *chp)
+{
+       device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr);
+       device_remove_bin_file(&chp->dev, &chp_measurement_attr);
+}
+
+int chp_add_cmg_attr(struct channel_path *chp)
+{
+       int ret;
+
+       ret = device_create_bin_file(&chp->dev, &chp_measurement_chars_attr);
+       if (ret)
+               return ret;
+       ret = device_create_bin_file(&chp->dev, &chp_measurement_attr);
+       if (ret)
+               device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr);
+       return ret;
+}
+
+/*
+ * Files for the channel path entries.
+ */
+static ssize_t chp_status_show(struct device *dev,
+                              struct device_attribute *attr, char *buf)
+{
+       struct channel_path *chp = container_of(dev, struct channel_path, dev);
+
+       if (!chp)
+               return 0;
+       return (chp_get_status(chp->chpid) ? sprintf(buf, "online\n") :
+               sprintf(buf, "offline\n"));
+}
+
+static ssize_t chp_status_write(struct device *dev,
+                               struct device_attribute *attr,
+                               const char *buf, size_t count)
+{
+       struct channel_path *cp = container_of(dev, struct channel_path, dev);
+       char cmd[10];
+       int num_args;
+       int error;
+
+       num_args = sscanf(buf, "%5s", cmd);
+       if (!num_args)
+               return count;
+
+       if (!strnicmp(cmd, "on", 2) || !strcmp(cmd, "1"))
+               error = s390_vary_chpid(cp->chpid, 1);
+       else if (!strnicmp(cmd, "off", 3) || !strcmp(cmd, "0"))
+               error = s390_vary_chpid(cp->chpid, 0);
+       else
+               error = -EINVAL;
+
+       return error < 0 ? error : count;
+
+}
+
+static DEVICE_ATTR(status, 0644, chp_status_show, chp_status_write);
+
+static ssize_t chp_configure_show(struct device *dev,
+                                 struct device_attribute *attr, char *buf)
+{
+       struct channel_path *cp;
+       int status;
+
+       cp = container_of(dev, struct channel_path, dev);
+       status = chp_info_get_status(cp->chpid);
+       if (status < 0)
+               return status;
+
+       return snprintf(buf, PAGE_SIZE, "%d\n", status);
+}
+
+static int cfg_wait_idle(void);
+
+static ssize_t chp_configure_write(struct device *dev,
+                                  struct device_attribute *attr,
+                                  const char *buf, size_t count)
+{
+       struct channel_path *cp;
+       int val;
+       char delim;
+
+       if (sscanf(buf, "%d %c", &val, &delim) != 1)
+               return -EINVAL;
+       if (val != 0 && val != 1)
+               return -EINVAL;
+       cp = container_of(dev, struct channel_path, dev);
+       chp_cfg_schedule(cp->chpid, val);
+       cfg_wait_idle();
+
+       return count;
+}
+
+static DEVICE_ATTR(configure, 0644, chp_configure_show, chp_configure_write);
+
+static ssize_t chp_type_show(struct device *dev, struct device_attribute *attr,
+                            char *buf)
+{
+       struct channel_path *chp = container_of(dev, struct channel_path, dev);
+
+       if (!chp)
+               return 0;
+       return sprintf(buf, "%x\n", chp->desc.desc);
+}
+
+static DEVICE_ATTR(type, 0444, chp_type_show, NULL);
+
+static ssize_t chp_cmg_show(struct device *dev, struct device_attribute *attr,
+                           char *buf)
+{
+       struct channel_path *chp = to_channelpath(dev);
+
+       if (!chp)
+               return 0;
+       if (chp->cmg == -1) /* channel measurements not available */
+               return sprintf(buf, "unknown\n");
+       return sprintf(buf, "%x\n", chp->cmg);
+}
+
+static DEVICE_ATTR(cmg, 0444, chp_cmg_show, NULL);
+
+static ssize_t chp_shared_show(struct device *dev,
+                              struct device_attribute *attr, char *buf)
+{
+       struct channel_path *chp = to_channelpath(dev);
+
+       if (!chp)
+               return 0;
+       if (chp->shared == -1) /* channel measurements not available */
+               return sprintf(buf, "unknown\n");
+       return sprintf(buf, "%x\n", chp->shared);
+}
+
+static DEVICE_ATTR(shared, 0444, chp_shared_show, NULL);
+
+static struct attribute * chp_attrs[] = {
+       &dev_attr_status.attr,
+       &dev_attr_configure.attr,
+       &dev_attr_type.attr,
+       &dev_attr_cmg.attr,
+       &dev_attr_shared.attr,
+       NULL,
+};
+
+static struct attribute_group chp_attr_group = {
+       .attrs = chp_attrs,
+};
+
+static void chp_release(struct device *dev)
+{
+       struct channel_path *cp;
+
+       cp = container_of(dev, struct channel_path, dev);
+       kfree(cp);
+}
+
+/**
+ * chp_new - register a new channel-path
+ * @chpid - channel-path ID
+ *
+ * Create and register data structure representing new channel-path. Return
+ * zero on success, non-zero otherwise.
+ */
+int chp_new(struct chp_id chpid)
+{
+       struct channel_path *chp;
+       int ret;
+
+       if (chp_is_registered(chpid))
+               return 0;
+       chp = kzalloc(sizeof(struct channel_path), GFP_KERNEL);
+       if (!chp)
+               return -ENOMEM;
+
+       /* fill in status, etc. */
+       chp->chpid = chpid;
+       chp->state = 1;
+       chp->dev.parent = &css[chpid.cssid]->device;
+       chp->dev.release = chp_release;
+       snprintf(chp->dev.bus_id, BUS_ID_SIZE, "chp%x.%02x", chpid.cssid,
+                chpid.id);
+
+       /* Obtain channel path description and fill it in. */
+       ret = chsc_determine_channel_path_description(chpid, &chp->desc);
+       if (ret)
+               goto out_free;
+       if ((chp->desc.flags & 0x80) == 0) {
+               ret = -ENODEV;
+               goto out_free;
+       }
+       /* Get channel-measurement characteristics. */
+       if (css_characteristics_avail && css_chsc_characteristics.scmc
+           && css_chsc_characteristics.secm) {
+               ret = chsc_get_channel_measurement_chars(chp);
+               if (ret)
+                       goto out_free;
+       } else {
+               static int msg_done;
+
+               if (!msg_done) {
+                       printk(KERN_WARNING "cio: Channel measurements not "
+                              "available, continuing.\n");
+                       msg_done = 1;
+               }
+               chp->cmg = -1;
+       }
+
+       /* make it known to the system */
+       ret = device_register(&chp->dev);
+       if (ret) {
+               printk(KERN_WARNING "%s: could not register %x.%02x\n",
+                      __func__, chpid.cssid, chpid.id);
+               goto out_free;
+       }
+       ret = sysfs_create_group(&chp->dev.kobj, &chp_attr_group);
+       if (ret) {
+               device_unregister(&chp->dev);
+               goto out_free;
+       }
+       mutex_lock(&css[chpid.cssid]->mutex);
+       if (css[chpid.cssid]->cm_enabled) {
+               ret = chp_add_cmg_attr(chp);
+               if (ret) {
+                       sysfs_remove_group(&chp->dev.kobj, &chp_attr_group);
+                       device_unregister(&chp->dev);
+                       mutex_unlock(&css[chpid.cssid]->mutex);
+                       goto out_free;
+               }
+       }
+       css[chpid.cssid]->chps[chpid.id] = chp;
+       mutex_unlock(&css[chpid.cssid]->mutex);
+       return ret;
+out_free:
+       kfree(chp);
+       return ret;
+}
+
+/**
+ * chp_get_chp_desc - return newly allocated channel-path description
+ * @chpid: channel-path ID
+ *
+ * On success return a newly allocated copy of the channel-path description
+ * data associated with the given channel-path ID. Return %NULL on error.
+ */
+void *chp_get_chp_desc(struct chp_id chpid)
+{
+       struct channel_path *chp;
+       struct channel_path_desc *desc;
+
+       chp = chpid_to_chp(chpid);
+       if (!chp)
+               return NULL;
+       desc = kmalloc(sizeof(struct channel_path_desc), GFP_KERNEL);
+       if (!desc)
+               return NULL;
+       memcpy(desc, &chp->desc, sizeof(struct channel_path_desc));
+       return desc;
+}
+
+/**
+ * chp_process_crw - process channel-path status change
+ * @id: channel-path ID number
+ * @status: non-zero if channel-path has become available, zero otherwise
+ *
+ * Handle channel-report-words indicating that the status of a channel-path
+ * has changed.
+ */
+void chp_process_crw(int id, int status)
+{
+       struct chp_id chpid;
+
+       chp_id_init(&chpid);
+       chpid.id = id;
+       if (status) {
+               if (!chp_is_registered(chpid))
+                       chp_new(chpid);
+               chsc_chp_online(chpid);
+       } else
+               chsc_chp_offline(chpid);
+}
+
+static inline int info_bit_num(struct chp_id id)
+{
+       return id.id + id.cssid * (__MAX_CHPID + 1);
+}
+
+/* Force chp_info refresh on next call to info_validate(). */
+static void info_expire(void)
+{
+       mutex_lock(&info_lock);
+       chp_info_expires = jiffies - 1;
+       mutex_unlock(&info_lock);
+}
+
+/* Ensure that chp_info is up-to-date. */
+static int info_update(void)
+{
+       int rc;
+
+       mutex_lock(&info_lock);
+       rc = 0;
+       if (time_after(jiffies, chp_info_expires)) {
+               /* Data is too old, update. */
+               rc = sclp_chp_read_info(&chp_info);
+               chp_info_expires = jiffies + CHP_INFO_UPDATE_INTERVAL ;
+       }
+       mutex_unlock(&info_lock);
+
+       return rc;
+}
+
+/**
+ * chp_info_get_status - retrieve configure status of a channel-path
+ * @chpid: channel-path ID
+ *
+ * On success, return 0 for standby, 1 for configured, 2 for reserved,
+ * 3 for not recognized. Return negative error code on error.
+ */
+int chp_info_get_status(struct chp_id chpid)
+{
+       int rc;
+       int bit;
+
+       rc = info_update();
+       if (rc)
+               return rc;
+
+       bit = info_bit_num(chpid);
+       mutex_lock(&info_lock);
+       if (!chp_test_bit(chp_info.recognized, bit))
+               rc = CHP_STATUS_NOT_RECOGNIZED;
+       else if (chp_test_bit(chp_info.configured, bit))
+               rc = CHP_STATUS_CONFIGURED;
+       else if (chp_test_bit(chp_info.standby, bit))
+               rc = CHP_STATUS_STANDBY;
+       else
+               rc = CHP_STATUS_RESERVED;
+       mutex_unlock(&info_lock);
+
+       return rc;
+}
+
+/* Return configure task for chpid. */
+static enum cfg_task_t cfg_get_task(struct chp_id chpid)
+{
+       return chp_cfg_task[chpid.cssid][chpid.id];
+}
+
+/* Set configure task for chpid. */
+static void cfg_set_task(struct chp_id chpid, enum cfg_task_t cfg)
+{
+       chp_cfg_task[chpid.cssid][chpid.id] = cfg;
+}
+
+/* Perform one configure/deconfigure request. Reschedule work function until
+ * last request. */
+static void cfg_func(struct work_struct *work)
+{
+       struct chp_id chpid;
+       enum cfg_task_t t;
+
+       mutex_lock(&cfg_lock);
+       t = cfg_none;
+       chp_id_for_each(&chpid) {
+               t = cfg_get_task(chpid);
+               if (t != cfg_none) {
+                       cfg_set_task(chpid, cfg_none);
+                       break;
+               }
+       }
+       mutex_unlock(&cfg_lock);
+
+       switch (t) {
+       case cfg_configure:
+               sclp_chp_configure(chpid);
+               info_expire();
+               chsc_chp_online(chpid);
+               break;
+       case cfg_deconfigure:
+               sclp_chp_deconfigure(chpid);
+               info_expire();
+               chsc_chp_offline(chpid);
+               break;
+       case cfg_none:
+               /* Get updated information after last change. */
+               info_update();
+               mutex_lock(&cfg_lock);
+               cfg_busy = 0;
+               mutex_unlock(&cfg_lock);
+               wake_up_interruptible(&cfg_wait_queue);
+               return;
+       }
+       queue_work(chp_wq, &cfg_work);
+}
+
+/**
+ * chp_cfg_schedule - schedule chpid configuration request
+ * @chpid - channel-path ID
+ * @configure - Non-zero for configure, zero for deconfigure
+ *
+ * Schedule a channel-path configuration/deconfiguration request.
+ */
+void chp_cfg_schedule(struct chp_id chpid, int configure)
+{
+       CIO_MSG_EVENT(2, "chp_cfg_sched%x.%02x=%d\n", chpid.cssid, chpid.id,
+                     configure);
+       mutex_lock(&cfg_lock);
+       cfg_set_task(chpid, configure ? cfg_configure : cfg_deconfigure);
+       cfg_busy = 1;
+       mutex_unlock(&cfg_lock);
+       queue_work(chp_wq, &cfg_work);
+}
+
+/**
+ * chp_cfg_cancel_deconfigure - cancel chpid deconfiguration request
+ * @chpid - channel-path ID
+ *
+ * Cancel an active channel-path deconfiguration request if it has not yet
+ * been performed.
+ */
+void chp_cfg_cancel_deconfigure(struct chp_id chpid)
+{
+       CIO_MSG_EVENT(2, "chp_cfg_cancel:%x.%02x\n", chpid.cssid, chpid.id);
+       mutex_lock(&cfg_lock);
+       if (cfg_get_task(chpid) == cfg_deconfigure)
+               cfg_set_task(chpid, cfg_none);
+       mutex_unlock(&cfg_lock);
+}
+
+static int cfg_wait_idle(void)
+{
+       if (wait_event_interruptible(cfg_wait_queue, !cfg_busy))
+               return -ERESTARTSYS;
+       return 0;
+}
+
+static int __init chp_init(void)
+{
+       struct chp_id chpid;
+
+       chp_wq = create_singlethread_workqueue("cio_chp");
+       if (!chp_wq)
+               return -ENOMEM;
+       INIT_WORK(&cfg_work, cfg_func);
+       init_waitqueue_head(&cfg_wait_queue);
+       if (info_update())
+               return 0;
+       /* Register available channel-paths. */
+       chp_id_for_each(&chpid) {
+               if (chp_info_get_status(chpid) != CHP_STATUS_NOT_RECOGNIZED)
+                       chp_new(chpid);
+       }
+
+       return 0;
+}
+
+subsys_initcall(chp_init);
diff --git a/drivers/s390/cio/chp.h b/drivers/s390/cio/chp.h
new file mode 100644 (file)
index 0000000..6528656
--- /dev/null
@@ -0,0 +1,53 @@
+/*
+ *  drivers/s390/cio/chp.h
+ *
+ *    Copyright IBM Corp. 2007
+ *    Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
+ */
+
+#ifndef S390_CHP_H
+#define S390_CHP_H S390_CHP_H
+
+#include <linux/types.h>
+#include <linux/device.h>
+#include <asm/chpid.h>
+#include "chsc.h"
+
+#define CHP_STATUS_STANDBY             0
+#define CHP_STATUS_CONFIGURED          1
+#define CHP_STATUS_RESERVED            2
+#define CHP_STATUS_NOT_RECOGNIZED      3
+
+static inline int chp_test_bit(u8 *bitmap, int num)
+{
+       int byte = num >> 3;
+       int mask = 128 >> (num & 7);
+
+       return (bitmap[byte] & mask) ? 1 : 0;
+}
+
+
+struct channel_path {
+       struct chp_id chpid;
+       int state;
+       struct channel_path_desc desc;
+       /* Channel-measurement related stuff: */
+       int cmg;
+       int shared;
+       void *cmg_chars;
+       struct device dev;
+};
+
+int chp_get_status(struct chp_id chpid);
+u8 chp_get_sch_opm(struct subchannel *sch);
+int chp_is_registered(struct chp_id chpid);
+void *chp_get_chp_desc(struct chp_id chpid);
+void chp_process_crw(int id, int available);
+void chp_remove_cmg_attr(struct channel_path *chp);
+int chp_add_cmg_attr(struct channel_path *chp);
+int chp_new(struct chp_id chpid);
+void chp_cfg_schedule(struct chp_id chpid, int configure);
+void chp_cfg_cancel_deconfigure(struct chp_id chpid);
+int chp_info_get_status(struct chp_id chpid);
+
+#endif /* S390_CHP_H */
index 6f05a44..ea92ac4 100644 (file)
 #include <linux/device.h>
 
 #include <asm/cio.h>
+#include <asm/chpid.h>
 
 #include "css.h"
 #include "cio.h"
 #include "cio_debug.h"
 #include "ioasm.h"
+#include "chp.h"
 #include "chsc.h"
 
 static void *sei_page;
 
-static int new_channel_path(int chpid);
-
-static inline void
-set_chp_logically_online(int chp, int onoff)
-{
-       css[0]->chps[chp]->state = onoff;
-}
-
-static int
-get_chp_status(int chp)
-{
-       return (css[0]->chps[chp] ? css[0]->chps[chp]->state : -ENODEV);
-}
-
-void
-chsc_validate_chpids(struct subchannel *sch)
-{
-       int mask, chp;
-
-       for (chp = 0; chp <= 7; chp++) {
-               mask = 0x80 >> chp;
-               if (!get_chp_status(sch->schib.pmcw.chpid[chp]))
-                       /* disable using this path */
-                       sch->opm &= ~mask;
-       }
-}
-
-void
-chpid_is_actually_online(int chp)
-{
-       int state;
-
-       state = get_chp_status(chp);
-       if (state < 0) {
-               need_rescan = 1;
-               queue_work(slow_path_wq, &slow_path_work);
-       } else
-               WARN_ON(!state);
-}
+struct chsc_ssd_area {
+       struct chsc_header request;
+       u16 :10;
+       u16 ssid:2;
+       u16 :4;
+       u16 f_sch;        /* first subchannel */
+       u16 :16;
+       u16 l_sch;        /* last subchannel */
+       u32 :32;
+       struct chsc_header response;
+       u32 :32;
+       u8 sch_valid : 1;
+       u8 dev_valid : 1;
+       u8 st        : 3; /* subchannel type */
+       u8 zeroes    : 3;
+       u8  unit_addr;    /* unit address */
+       u16 devno;        /* device number */
+       u8 path_mask;
+       u8 fla_valid_mask;
+       u16 sch;          /* subchannel */
+       u8 chpid[8];      /* chpids 0-7 */
+       u16 fla[8];       /* full link addresses 0-7 */
+} __attribute__ ((packed));
 
-/* FIXME: this is _always_ called for every subchannel. shouldn't we
- *       process more than one at a time? */
-static int
-chsc_get_sch_desc_irq(struct subchannel *sch, void *page)
+int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
 {
-       int ccode, j;
-
-       struct {
-               struct chsc_header request;
-               u16 reserved1a:10;
-               u16 ssid:2;
-               u16 reserved1b:4;
-               u16 f_sch;        /* first subchannel */
-               u16 reserved2;
-               u16 l_sch;        /* last subchannel */
-               u32 reserved3;
-               struct chsc_header response;
-               u32 reserved4;
-               u8 sch_valid : 1;
-               u8 dev_valid : 1;
-               u8 st        : 3; /* subchannel type */
-               u8 zeroes    : 3;
-               u8  unit_addr;    /* unit address */
-               u16 devno;        /* device number */
-               u8 path_mask;
-               u8 fla_valid_mask;
-               u16 sch;          /* subchannel */
-               u8 chpid[8];      /* chpids 0-7 */
-               u16 fla[8];       /* full link addresses 0-7 */
-       } __attribute__ ((packed)) *ssd_area;
-
-       ssd_area = page;
+       unsigned long page;
+       struct chsc_ssd_area *ssd_area;
+       int ccode;
+       int ret;
+       int i;
+       int mask;
 
+       page = get_zeroed_page(GFP_KERNEL | GFP_DMA);
+       if (!page)
+               return -ENOMEM;
+       ssd_area = (struct chsc_ssd_area *) page;
        ssd_area->request.length = 0x0010;
        ssd_area->request.code = 0x0004;
-
-       ssd_area->ssid = sch->schid.ssid;
-       ssd_area->f_sch = sch->schid.sch_no;
-       ssd_area->l_sch = sch->schid.sch_no;
+       ssd_area->ssid = schid.ssid;
+       ssd_area->f_sch = schid.sch_no;
+       ssd_area->l_sch = schid.sch_no;
 
        ccode = chsc(ssd_area);
+       /* Check response. */
        if (ccode > 0) {
-               pr_debug("chsc returned with ccode = %d\n", ccode);
-               return (ccode == 3) ? -ENODEV : -EBUSY;
+               ret = (ccode == 3) ? -ENODEV : -EBUSY;
+               goto out_free;
        }
-
-       switch (ssd_area->response.code) {
-       case 0x0001: /* everything ok */
-               break;
-       case 0x0002:
-               CIO_CRW_EVENT(2, "Invalid command!\n");
-               return -EINVAL;
-       case 0x0003:
-               CIO_CRW_EVENT(2, "Error in chsc request block!\n");
-               return -EINVAL;
-       case 0x0004:
-               CIO_CRW_EVENT(2, "Model does not provide ssd\n");
-               return -EOPNOTSUPP;
-       default:
-               CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
+       if (ssd_area->response.code != 0x0001) {
+               CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n",
+                             schid.ssid, schid.sch_no,
                              ssd_area->response.code);
-               return -EIO;
+               ret = -EIO;
+               goto out_free;
        }
-
-       /*
-        * ssd_area->st stores the type of the detected
-        * subchannel, with the following definitions:
-        *
-        * 0: I/O subchannel:     All fields have meaning
-        * 1: CHSC subchannel:    Only sch_val, st and sch
-        *                        have meaning
-        * 2: Message subchannel: All fields except unit_addr
-        *                        have meaning
-        * 3: ADM subchannel:     Only sch_val, st and sch
-        *                        have meaning
-        *
-        * Other types are currently undefined.
-        */
-       if (ssd_area->st > 3) { /* uhm, that looks strange... */
-               CIO_CRW_EVENT(0, "Strange subchannel type %d"
-                             " for sch 0.%x.%04x\n", ssd_area->st,
-                             sch->schid.ssid, sch->schid.sch_no);
-               /*
-                * There may have been a new subchannel type defined in the
-                * time since this code was written; since we don't know which
-                * fields have meaning and what to do with it we just jump out
-                */
-               return 0;
-       } else {
-               const char *type[4] = {"I/O", "chsc", "message", "ADM"};
-               CIO_CRW_EVENT(6, "ssd: sch 0.%x.%04x is %s subchannel\n",
-                             sch->schid.ssid, sch->schid.sch_no,
-                             type[ssd_area->st]);
-
-               sch->ssd_info.valid = 1;
-               sch->ssd_info.type = ssd_area->st;
+       if (!ssd_area->sch_valid) {
+               ret = -ENODEV;
+               goto out_free;
        }
-
-       if (ssd_area->st == 0 || ssd_area->st == 2) {
-               for (j = 0; j < 8; j++) {
-                       if (!((0x80 >> j) & ssd_area->path_mask &
-                             ssd_area->fla_valid_mask))
-                               continue;
-                       sch->ssd_info.chpid[j] = ssd_area->chpid[j];
-                       sch->ssd_info.fla[j]   = ssd_area->fla[j];
+       /* Copy data */
+       ret = 0;
+       memset(ssd, 0, sizeof(struct chsc_ssd_info));
+       if ((ssd_area->st != 0) && (ssd_area->st != 2))
+               goto out_free;
+       ssd->path_mask = ssd_area->path_mask;
+       ssd->fla_valid_mask = ssd_area->fla_valid_mask;
+       for (i = 0; i < 8; i++) {
+               mask = 0x80 >> i;
+               if (ssd_area->path_mask & mask) {
+                       chp_id_init(&ssd->chpid[i]);
+                       ssd->chpid[i].id = ssd_area->chpid[i];
                }
+               if (ssd_area->fla_valid_mask & mask)
+                       ssd->fla[i] = ssd_area->fla[i];
        }
-       return 0;
+out_free:
+       free_page(page);
+       return ret;
 }
 
-int
-css_get_ssd_info(struct subchannel *sch)
+static int check_for_io_on_path(struct subchannel *sch, int mask)
 {
-       int ret;
-       void *page;
+       int cc;
 
-       page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
-       if (!page)
-               return -ENOMEM;
-       spin_lock_irq(sch->lock);
-       ret = chsc_get_sch_desc_irq(sch, page);
-       if (ret) {
-               static int cio_chsc_err_msg;
-               
-               if (!cio_chsc_err_msg) {
-                       printk(KERN_ERR
-                              "chsc_get_sch_descriptions:"
-                              " Error %d while doing chsc; "
-                              "processing some machine checks may "
-                              "not work\n", ret);
-                       cio_chsc_err_msg = 1;
-               }
-       }
-       spin_unlock_irq(sch->lock);
-       free_page((unsigned long)page);
-       if (!ret) {
-               int j, chpid, mask;
-               /* Allocate channel path structures, if needed. */
-               for (j = 0; j < 8; j++) {
-                       mask = 0x80 >> j;
-                       chpid = sch->ssd_info.chpid[j];
-                       if ((sch->schib.pmcw.pim & mask) &&
-                           (get_chp_status(chpid) < 0))
-                           new_channel_path(chpid);
-               }
+       cc = stsch(sch->schid, &sch->schib);
+       if (cc)
+               return 0;
+       if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == mask)
+               return 1;
+       return 0;
+}
+
+static void terminate_internal_io(struct subchannel *sch)
+{
+       if (cio_clear(sch)) {
+               /* Recheck device in case clear failed. */
+               sch->lpm = 0;
+               if (device_trigger_verify(sch) != 0)
+                       css_schedule_eval(sch->schid);
+               return;
        }
-       return ret;
+       /* Request retry of internal operation. */
+       device_set_intretry(sch);
+       /* Call handler. */
+       if (sch->driver && sch->driver->termination)
+               sch->driver->termination(&sch->dev);
 }
 
 static int
@@ -219,7 +141,7 @@ s390_subchannel_remove_chpid(struct device *dev, void *data)
        int j;
        int mask;
        struct subchannel *sch;
-       struct channel_path *chpid;
+       struct chp_id *chpid;
        struct schib schib;
 
        sch = to_subchannel(dev);
@@ -243,106 +165,50 @@ s390_subchannel_remove_chpid(struct device *dev, void *data)
        if (sch->schib.pmcw.pim == 0x80)
                goto out_unreg;
 
-       if ((sch->schib.scsw.actl & SCSW_ACTL_DEVACT) &&
-           (sch->schib.scsw.actl & SCSW_ACTL_SCHACT) &&
-           (sch->schib.pmcw.lpum == mask)) {
-               int cc;
-
-               cc = cio_clear(sch);
-               if (cc == -ENODEV)
+       if (check_for_io_on_path(sch, mask)) {
+               if (device_is_online(sch))
+                       device_kill_io(sch);
+               else {
+                       terminate_internal_io(sch);
+                       /* Re-start path verification. */
+                       if (sch->driver && sch->driver->verify)
+                               sch->driver->verify(&sch->dev);
+               }
+       } else {
+               /* trigger path verification. */
+               if (sch->driver && sch->driver->verify)
+                       sch->driver->verify(&sch->dev);
+               else if (sch->lpm == mask)
                        goto out_unreg;
-               /* Request retry of internal operation. */
-               device_set_intretry(sch);
-               /* Call handler. */
-               if (sch->driver && sch->driver->termination)
-                       sch->driver->termination(&sch->dev);
-               goto out_unlock;
        }
 
-       /* trigger path verification. */
-       if (sch->driver && sch->driver->verify)
-               sch->driver->verify(&sch->dev);
-       else if (sch->lpm == mask)
-               goto out_unreg;
-out_unlock:
        spin_unlock_irq(sch->lock);
        return 0;
+
 out_unreg:
-       spin_unlock_irq(sch->lock);
        sch->lpm = 0;
-       if (css_enqueue_subchannel_slow(sch->schid)) {
-               css_clear_subchannel_slow_list();
-               need_rescan = 1;
-       }
+       spin_unlock_irq(sch->lock);
+       css_schedule_eval(sch->schid);
        return 0;
 }
 
-static void
-s390_set_chpid_offline( __u8 chpid)
+void chsc_chp_offline(struct chp_id chpid)
 {
        char dbf_txt[15];
-       struct device *dev;
 
-       sprintf(dbf_txt, "chpr%x", chpid);
+       sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id);
        CIO_TRACE_EVENT(2, dbf_txt);
 
-       if (get_chp_status(chpid) <= 0)
+       if (chp_get_status(chpid) <= 0)
                return;
-       dev = get_device(&css[0]->chps[chpid]->dev);
-       bus_for_each_dev(&css_bus_type, NULL, to_channelpath(dev),
+       bus_for_each_dev(&css_bus_type, NULL, &chpid,
                         s390_subchannel_remove_chpid);
-
-       if (need_rescan || css_slow_subchannels_exist())
-               queue_work(slow_path_wq, &slow_path_work);
-       put_device(dev);
-}
-
-struct res_acc_data {
-       struct channel_path *chp;
-       u32 fla_mask;
-       u16 fla;
-};
-
-static int
-s390_process_res_acc_sch(struct res_acc_data *res_data, struct subchannel *sch)
-{
-       int found;
-       int chp;
-       int ccode;
-       
-       found = 0;
-       for (chp = 0; chp <= 7; chp++)
-               /*
-                * check if chpid is in information updated by ssd
-                */
-               if (sch->ssd_info.valid &&
-                   sch->ssd_info.chpid[chp] == res_data->chp->id &&
-                   (sch->ssd_info.fla[chp] & res_data->fla_mask)
-                   == res_data->fla) {
-                       found = 1;
-                       break;
-               }
-       
-       if (found == 0)
-               return 0;
-
-       /*
-        * Do a stsch to update our subchannel structure with the
-        * new path information and eventually check for logically
-        * offline chpids.
-        */
-       ccode = stsch(sch->schid, &sch->schib);
-       if (ccode > 0)
-               return 0;
-
-       return 0x80 >> chp;
 }
 
 static int
 s390_process_res_acc_new_sch(struct subchannel_id schid)
 {
        struct schib schib;
-       int ret;
        /*
         * We don't know the device yet, but since a path
         * may be available now to the device we'll have
@@ -353,14 +219,35 @@ s390_process_res_acc_new_sch(struct subchannel_id schid)
         */
        if (stsch_err(schid, &schib))
                /* We're through */
-               return need_rescan ? -EAGAIN : -ENXIO;
+               return -ENXIO;
 
        /* Put it on the slow path. */
-       ret = css_enqueue_subchannel_slow(schid);
-       if (ret) {
-               css_clear_subchannel_slow_list();
-               need_rescan = 1;
-               return -EAGAIN;
+       css_schedule_eval(schid);
+       return 0;
+}
+
+struct res_acc_data {
+       struct chp_id chpid;
+       u32 fla_mask;
+       u16 fla;
+};
+
+static int get_res_chpid_mask(struct chsc_ssd_info *ssd,
+                             struct res_acc_data *data)
+{
+       int i;
+       int mask;
+
+       for (i = 0; i < 8; i++) {
+               mask = 0x80 >> i;
+               if (!(ssd->path_mask & mask))
+                       continue;
+               if (!chp_id_is_equal(&ssd->chpid[i], &data->chpid))
+                       continue;
+               if ((ssd->fla_valid_mask & mask) &&
+                   ((ssd->fla[i] & data->fla_mask) != data->fla))
+                       continue;
+               return mask;
        }
        return 0;
 }
@@ -379,14 +266,11 @@ __s390_process_res_acc(struct subchannel_id schid, void *data)
                return s390_process_res_acc_new_sch(schid);
 
        spin_lock_irq(sch->lock);
-
-       chp_mask = s390_process_res_acc_sch(res_data, sch);
-
-       if (chp_mask == 0) {
-               spin_unlock_irq(sch->lock);
-               put_device(&sch->dev);
-               return 0;
-       }
+       chp_mask = get_res_chpid_mask(&sch->ssd_info, res_data);
+       if (chp_mask == 0)
+               goto out;
+       if (stsch(sch->schid, &sch->schib))
+               goto out;
        old_lpm = sch->lpm;
        sch->lpm = ((sch->schib.pmcw.pim &
                     sch->schib.pmcw.pam &
@@ -396,20 +280,18 @@ __s390_process_res_acc(struct subchannel_id schid, void *data)
                device_trigger_reprobe(sch);
        else if (sch->driver && sch->driver->verify)
                sch->driver->verify(&sch->dev);
-
+out:
        spin_unlock_irq(sch->lock);
        put_device(&sch->dev);
        return 0;
 }
 
-
-static int
-s390_process_res_acc (struct res_acc_data *res_data)
+static void s390_process_res_acc (struct res_acc_data *res_data)
 {
-       int rc;
        char dbf_txt[15];
 
-       sprintf(dbf_txt, "accpr%x", res_data->chp->id);
+       sprintf(dbf_txt, "accpr%x.%02x", res_data->chpid.cssid,
+               res_data->chpid.id);
        CIO_TRACE_EVENT( 2, dbf_txt);
        if (res_data->fla != 0) {
                sprintf(dbf_txt, "fla%x", res_data->fla);
@@ -423,12 +305,7 @@ s390_process_res_acc (struct res_acc_data *res_data)
         * The more information we have (info), the less scanning
         * will we have to do.
         */
-       rc = for_each_subchannel(__s390_process_res_acc, res_data);
-       if (css_slow_subchannels_exist())
-               rc = -EAGAIN;
-       else if (rc != -EAGAIN)
-               rc = 0;
-       return rc;
+       for_each_subchannel(__s390_process_res_acc, res_data);
 }
 
 static int
@@ -480,43 +357,45 @@ struct chsc_sei_area {
        /* ccdf has to be big enough for a link-incident record */
 } __attribute__ ((packed));
 
-static int chsc_process_sei_link_incident(struct chsc_sei_area *sei_area)
+static void chsc_process_sei_link_incident(struct chsc_sei_area *sei_area)
 {
-       int chpid;
+       struct chp_id chpid;
+       int id;
 
        CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n",
                      sei_area->rs, sei_area->rsid);
        if (sei_area->rs != 4)
-               return 0;
-       chpid = __get_chpid_from_lir(sei_area->ccdf);
-       if (chpid < 0)
+               return;
+       id = __get_chpid_from_lir(sei_area->ccdf);
+       if (id < 0)
                CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n");
-       else
-               s390_set_chpid_offline(chpid);
-
-       return 0;
+       else {
+               chp_id_init(&chpid);
+               chpid.id = id;
+               chsc_chp_offline(chpid);
+       }
 }
 
-static int chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
+static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
 {
        struct res_acc_data res_data;
-       struct device *dev;
+       struct chp_id chpid;
        int status;
-       int rc;
 
        CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, "
                      "rs_id=%04x)\n", sei_area->rs, sei_area->rsid);
        if (sei_area->rs != 4)
-               return 0;
+               return;
+       chp_id_init(&chpid);
+       chpid.id = sei_area->rsid;
        /* allocate a new channel path structure, if needed */
-       status = get_chp_status(sei_area->rsid);
+       status = chp_get_status(chpid);
        if (status < 0)
-               new_channel_path(sei_area->rsid);
+               chp_new(chpid);
        else if (!status)
-               return 0;
-       dev = get_device(&css[0]->chps[sei_area->rsid]->dev);
+               return;
        memset(&res_data, 0, sizeof(struct res_acc_data));
-       res_data.chp = to_channelpath(dev);
+       res_data.chpid = chpid;
        if ((sei_area->vf & 0xc0) != 0) {
                res_data.fla = sei_area->fla;
                if ((sei_area->vf & 0xc0) == 0xc0)
@@ -526,51 +405,82 @@ static int chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
                        /* link address */
                        res_data.fla_mask = 0xff00;
        }
-       rc = s390_process_res_acc(&res_data);
-       put_device(dev);
-
-       return rc;
+       s390_process_res_acc(&res_data);
 }
 
-static int chsc_process_sei(struct chsc_sei_area *sei_area)
+struct chp_config_data {
+       u8 map[32];
+       u8 op;
+       u8 pc;
+};
+
+static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area)
 {
-       int rc;
+       struct chp_config_data *data;
+       struct chp_id chpid;
+       int num;
+
+       CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n");
+       if (sei_area->rs != 0)
+               return;
+       data = (struct chp_config_data *) &(sei_area->ccdf);
+       chp_id_init(&chpid);
+       for (num = 0; num <= __MAX_CHPID; num++) {
+               if (!chp_test_bit(data->map, num))
+                       continue;
+               chpid.id = num;
+               printk(KERN_WARNING "cio: processing configure event %d for "
+                      "chpid %x.%02x\n", data->op, chpid.cssid, chpid.id);
+               switch (data->op) {
+               case 0:
+                       chp_cfg_schedule(chpid, 1);
+                       break;
+               case 1:
+                       chp_cfg_schedule(chpid, 0);
+                       break;
+               case 2:
+                       chp_cfg_cancel_deconfigure(chpid);
+                       break;
+               }
+       }
+}
 
+static void chsc_process_sei(struct chsc_sei_area *sei_area)
+{
        /* Check if we might have lost some information. */
-       if (sei_area->flags & 0x40)
+       if (sei_area->flags & 0x40) {
                CIO_CRW_EVENT(2, "chsc: event overflow\n");
+               css_schedule_eval_all();
+       }
        /* which kind of information was stored? */
-       rc = 0;
        switch (sei_area->cc) {
        case 1: /* link incident*/
-               rc = chsc_process_sei_link_incident(sei_area);
+               chsc_process_sei_link_incident(sei_area);
                break;
        case 2: /* i/o resource accessibiliy */
-               rc = chsc_process_sei_res_acc(sei_area);
+               chsc_process_sei_res_acc(sei_area);
+               break;
+       case 8: /* channel-path-configuration notification */
+               chsc_process_sei_chp_config(sei_area);
                break;
        default: /* other stuff */
                CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n",
                              sei_area->cc);
                break;
        }
-
-       return rc;
 }
 
-int chsc_process_crw(void)
+void chsc_process_crw(void)
 {
        struct chsc_sei_area *sei_area;
-       int ret;
-       int rc;
 
        if (!sei_page)
-               return 0;
+               return;
        /* Access to sei_page is serialized through machine check handler
         * thread, so no need for locking. */
        sei_area = sei_page;
 
        CIO_TRACE_EVENT( 2, "prcss");
-       ret = 0;
        do {
                memset(sei_area, 0, sizeof(*sei_area));
                sei_area->request.length = 0x0010;
@@ -580,37 +490,26 @@ int chsc_process_crw(void)
 
                if (sei_area->response.code == 0x0001) {
                        CIO_CRW_EVENT(4, "chsc: sei successful\n");
-                       rc = chsc_process_sei(sei_area);
-                       if (rc)
-                               ret = rc;
+                       chsc_process_sei(sei_area);
                } else {
                        CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
                                      sei_area->response.code);
-                       ret = 0;
                        break;
                }
        } while (sei_area->flags & 0x80);
-
-       return ret;
 }
 
 static int
 __chp_add_new_sch(struct subchannel_id schid)
 {
        struct schib schib;
-       int ret;
 
        if (stsch_err(schid, &schib))
                /* We're through */
-               return need_rescan ? -EAGAIN : -ENXIO;
+               return -ENXIO;
 
        /* Put it on the slow path. */
-       ret = css_enqueue_subchannel_slow(schid);
-       if (ret) {
-               css_clear_subchannel_slow_list();
-               need_rescan = 1;
-               return -EAGAIN;
-       }
+       css_schedule_eval(schid);
        return 0;
 }
 
@@ -619,10 +518,10 @@ static int
 __chp_add(struct subchannel_id schid, void *data)
 {
        int i, mask;
-       struct channel_path *chp;
+       struct chp_id *chpid;
        struct subchannel *sch;
 
-       chp = data;
+       chpid = data;
        sch = get_subchannel_by_schid(schid);
        if (!sch)
                /* Check if the subchannel is now available. */
@@ -631,7 +530,7 @@ __chp_add(struct subchannel_id schid, void *data)
        for (i=0; i<8; i++) {
                mask = 0x80 >> i;
                if ((sch->schib.pmcw.pim & mask) &&
-                   (sch->schib.pmcw.chpid[i] == chp->id)) {
+                   (sch->schib.pmcw.chpid[i] == chpid->id)) {
                        if (stsch(sch->schid, &sch->schib) != 0) {
                                /* Endgame. */
                                spin_unlock_irq(sch->lock);
@@ -657,122 +556,58 @@ __chp_add(struct subchannel_id schid, void *data)
        return 0;
 }
 
-static int
-chp_add(int chpid)
+void chsc_chp_online(struct chp_id chpid)
 {
-       int rc;
        char dbf_txt[15];
-       struct device *dev;
 
-       if (!get_chp_status(chpid))
-               return 0; /* no need to do the rest */
-       
-       sprintf(dbf_txt, "cadd%x", chpid);
+       sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id);
        CIO_TRACE_EVENT(2, dbf_txt);
 
-       dev = get_device(&css[0]->chps[chpid]->dev);
-       rc = for_each_subchannel(__chp_add, to_channelpath(dev));
-       if (css_slow_subchannels_exist())
-               rc = -EAGAIN;
-       if (rc != -EAGAIN)
-               rc = 0;
-       put_device(dev);
-       return rc;
+       if (chp_get_status(chpid) != 0)
+               for_each_subchannel(__chp_add, &chpid);
 }
 
-/* 
- * Handling of crw machine checks with channel path source.
- */
-int
-chp_process_crw(int chpid, int on)
-{
-       if (on == 0) {
-               /* Path has gone. We use the link incident routine.*/
-               s390_set_chpid_offline(chpid);
-               return 0; /* De-register is async anyway. */
-       }
-       /*
-        * Path has come. Allocate a new channel path structure,
-        * if needed.
-        */
-       if (get_chp_status(chpid) < 0)
-               new_channel_path(chpid);
-       /* Avoid the extra overhead in process_rec_acc. */
-       return chp_add(chpid);
-}
-
-static int check_for_io_on_path(struct subchannel *sch, int index)
-{
-       int cc;
-
-       cc = stsch(sch->schid, &sch->schib);
-       if (cc)
-               return 0;
-       if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == (0x80 >> index))
-               return 1;
-       return 0;
-}
-
-static void terminate_internal_io(struct subchannel *sch)
-{
-       if (cio_clear(sch)) {
-               /* Recheck device in case clear failed. */
-               sch->lpm = 0;
-               if (device_trigger_verify(sch) != 0) {
-                       if(css_enqueue_subchannel_slow(sch->schid)) {
-                               css_clear_subchannel_slow_list();
-                               need_rescan = 1;
-                       }
-               }
-               return;
-       }
-       /* Request retry of internal operation. */
-       device_set_intretry(sch);
-       /* Call handler. */
-       if (sch->driver && sch->driver->termination)
-               sch->driver->termination(&sch->dev);
-}
-
-static void
-__s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on)
+static void __s390_subchannel_vary_chpid(struct subchannel *sch,
+                                        struct chp_id chpid, int on)
 {
        int chp, old_lpm;
+       int mask;
        unsigned long flags;
 
-       if (!sch->ssd_info.valid)
-               return;
-       
        spin_lock_irqsave(sch->lock, flags);
        old_lpm = sch->lpm;
        for (chp = 0; chp < 8; chp++) {
-               if (sch->ssd_info.chpid[chp] != chpid)
+               mask = 0x80 >> chp;
+               if (!(sch->ssd_info.path_mask & mask))
+                       continue;
+               if (!chp_id_is_equal(&sch->ssd_info.chpid[chp], &chpid))
                        continue;
 
                if (on) {
-                       sch->opm |= (0x80 >> chp);
-                       sch->lpm |= (0x80 >> chp);
+                       sch->opm |= mask;
+                       sch->lpm |= mask;
                        if (!old_lpm)
                                device_trigger_reprobe(sch);
                        else if (sch->driver && sch->driver->verify)
                                sch->driver->verify(&sch->dev);
                        break;
                }
-               sch->opm &= ~(0x80 >> chp);
-               sch->lpm &= ~(0x80 >> chp);
-               if (check_for_io_on_path(sch, chp)) {
+               sch->opm &= ~mask;
+               sch->lpm &= ~mask;
+               if (check_for_io_on_path(sch, mask)) {
                        if (device_is_online(sch))
                                /* Path verification is done after killing. */
                                device_kill_io(sch);
-                       else
+                       else {
                                /* Kill and retry internal I/O. */
                                terminate_internal_io(sch);
-               } else if (!sch->lpm) {
-                       if (device_trigger_verify(sch) != 0) {
-                               if (css_enqueue_subchannel_slow(sch->schid)) {
-                                       css_clear_subchannel_slow_list();
-                                       need_rescan = 1;
-                               }
+                               /* Re-start path verification. */
+                               if (sch->driver && sch->driver->verify)
+                                       sch->driver->verify(&sch->dev);
                        }
+               } else if (!sch->lpm) {
+                       if (device_trigger_verify(sch) != 0)
+                               css_schedule_eval(sch->schid);
                } else if (sch->driver && sch->driver->verify)
                        sch->driver->verify(&sch->dev);
                break;
@@ -780,11 +615,10 @@ __s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on)
        spin_unlock_irqrestore(sch->lock, flags);
 }
 
-static int
-s390_subchannel_vary_chpid_off(struct device *dev, void *data)
+static int s390_subchannel_vary_chpid_off(struct device *dev, void *data)
 {
        struct subchannel *sch;
-       __u8 *chpid;
+       struct chp_id *chpid;
 
        sch = to_subchannel(dev);
        chpid = data;
@@ -793,11 +627,10 @@ s390_subchannel_vary_chpid_off(struct device *dev, void *data)
        return 0;
 }
 
-static int
-s390_subchannel_vary_chpid_on(struct device *dev, void *data)
+static int s390_subchannel_vary_chpid_on(struct device *dev, void *data)
 {
        struct subchannel *sch;
-       __u8 *chpid;
+       struct chp_id *chpid;
 
        sch = to_subchannel(dev);
        chpid = data;
@@ -821,40 +654,17 @@ __s390_vary_chpid_on(struct subchannel_id schid, void *data)
                /* We're through */
                return -ENXIO;
        /* Put it on the slow path. */
-       if (css_enqueue_subchannel_slow(schid)) {
-               css_clear_subchannel_slow_list();
-               need_rescan = 1;
-               return -EAGAIN;
-       }
+       css_schedule_eval(schid);
        return 0;
 }
 
-/*
- * Function: s390_vary_chpid
- * Varies the specified chpid online or offline
+/**
+ * chsc_chp_vary - propagate channel-path vary operation to subchannels
+ * @chpid: channl-path ID
+ * @on: non-zero for vary online, zero for vary offline
  */
-static int
-s390_vary_chpid( __u8 chpid, int on)
+int chsc_chp_vary(struct chp_id chpid, int on)
 {
-       char dbf_text[15];
-       int status;
-
-       sprintf(dbf_text, on?"varyon%x":"varyoff%x", chpid);
-       CIO_TRACE_EVENT( 2, dbf_text);
-
-       status = get_chp_status(chpid);
-       if (status < 0) {
-               printk(KERN_ERR "Can't vary unknown chpid %02X\n", chpid);
-               return -EINVAL;
-       }
-
-       if (!on && !status) {
-               printk(KERN_ERR "chpid %x is already offline\n", chpid);
-               return -EINVAL;
-       }
-
-       set_chp_logically_online(chpid, on);
-
        /*
         * Redo PathVerification on the devices the chpid connects to
         */
@@ -865,118 +675,9 @@ s390_vary_chpid( __u8 chpid, int on)
        if (on)
                /* Scan for new devices on varied on path. */
                for_each_subchannel(__s390_vary_chpid_on, NULL);
-       if (need_rescan || css_slow_subchannels_exist())
-               queue_work(slow_path_wq, &slow_path_work);
        return 0;
 }
 
-/*
- * Channel measurement related functions
- */
-static ssize_t
-chp_measurement_chars_read(struct kobject *kobj, char *buf, loff_t off,
-                          size_t count)
-{
-       struct channel_path *chp;
-       unsigned int size;
-
-       chp = to_channelpath(container_of(kobj, struct device, kobj));
-       if (!chp->cmg_chars)
-               return 0;
-
-       size = sizeof(struct cmg_chars);
-
-       if (off > size)
-               return 0;
-       if (off + count > size)
-               count = size - off;
-       memcpy(buf, chp->cmg_chars + off, count);
-       return count;
-}
-
-static struct bin_attribute chp_measurement_chars_attr = {
-       .attr = {
-               .name = "measurement_chars",
-               .mode = S_IRUSR,
-               .owner = THIS_MODULE,
-       },
-       .size = sizeof(struct cmg_chars),
-       .read = chp_measurement_chars_read,
-};
-
-static void
-chp_measurement_copy_block(struct cmg_entry *buf,
-                          struct channel_subsystem *css, int chpid)
-{
-       void *area;
-       struct cmg_entry *entry, reference_buf;
-       int idx;
-
-       if (chpid < 128) {
-               area = css->cub_addr1;
-               idx = chpid;
-       } else {
-               area = css->cub_addr2;
-               idx = chpid - 128;
-       }
-       entry = area + (idx * sizeof(struct cmg_entry));
-       do {
-               memcpy(buf, entry, sizeof(*entry));
-               memcpy(&reference_buf, entry, sizeof(*entry));
-       } while (reference_buf.values[0] != buf->values[0]);
-}
-
-static ssize_t
-chp_measurement_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
-{
-       struct channel_path *chp;
-       struct channel_subsystem *css;
-       unsigned int size;
-
-       chp = to_channelpath(container_of(kobj, struct device, kobj));
-       css = to_css(chp->dev.parent);
-
-       size = sizeof(struct cmg_entry);
-
-       /* Only allow single reads. */
-       if (off || count < size)
-               return 0;
-       chp_measurement_copy_block((struct cmg_entry *)buf, css, chp->id);
-       count = size;
-       return count;
-}
-
-static struct bin_attribute chp_measurement_attr = {
-       .attr = {
-               .name = "measurement",
-               .mode = S_IRUSR,
-               .owner = THIS_MODULE,
-       },
-       .size = sizeof(struct cmg_entry),
-       .read = chp_measurement_read,
-};
-
-static void
-chsc_remove_chp_cmg_attr(struct channel_path *chp)
-{
-       device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr);
-       device_remove_bin_file(&chp->dev, &chp_measurement_attr);
-}
-
-static int
-chsc_add_chp_cmg_attr(struct channel_path *chp)
-{
-       int ret;
-
-       ret = device_create_bin_file(&chp->dev, &chp_measurement_chars_attr);
-       if (ret)
-               return ret;
-       ret = device_create_bin_file(&chp->dev, &chp_measurement_attr);
-       if (ret)
-               device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr);
-       return ret;
-}
-
 static void
 chsc_remove_cmg_attr(struct channel_subsystem *css)
 {
@@ -985,7 +686,7 @@ chsc_remove_cmg_attr(struct channel_subsystem *css)
        for (i = 0; i <= __MAX_CHPID; i++) {
                if (!css->chps[i])
                        continue;
-               chsc_remove_chp_cmg_attr(css->chps[i]);
+               chp_remove_cmg_attr(css->chps[i]);
        }
 }
 
@@ -998,7 +699,7 @@ chsc_add_cmg_attr(struct channel_subsystem *css)
        for (i = 0; i <= __MAX_CHPID; i++) {
                if (!css->chps[i])
                        continue;
-               ret = chsc_add_chp_cmg_attr(css->chps[i]);
+               ret = chp_add_cmg_attr(css->chps[i]);
                if (ret)
                        goto cleanup;
        }
@@ -1007,12 +708,11 @@ cleanup:
        for (--i; i >= 0; i--) {
                if (!css->chps[i])
                        continue;
-               chsc_remove_chp_cmg_attr(css->chps[i]);
+               chp_remove_cmg_attr(css->chps[i]);
        }
        return ret;
 }
 
-
 static int
 __chsc_do_secm(struct channel_subsystem *css, int enable, void *page)
 {
@@ -1118,7 +818,7 @@ chsc_secm(struct channel_subsystem *css, int enable)
                } else
                        chsc_remove_cmg_attr(css);
        }
-       if (enable && !css->cm_enabled) {
+       if (!css->cm_enabled) {
                free_page((unsigned long)css->cub_addr1);
                free_page((unsigned long)css->cub_addr2);
        }
@@ -1127,109 +827,8 @@ chsc_secm(struct channel_subsystem *css, int enable)
        return ret;
 }
 
-/*
- * Files for the channel path entries.
- */
-static ssize_t
-chp_status_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
-       struct channel_path *chp = container_of(dev, struct channel_path, dev);
-
-       if (!chp)
-               return 0;
-       return (get_chp_status(chp->id) ? sprintf(buf, "online\n") :
-               sprintf(buf, "offline\n"));
-}
-
-static ssize_t
-chp_status_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
-{
-       struct channel_path *cp = container_of(dev, struct channel_path, dev);
-       char cmd[10];
-       int num_args;
-       int error;
-
-       num_args = sscanf(buf, "%5s", cmd);
-       if (!num_args)
-               return count;
-
-       if (!strnicmp(cmd, "on", 2))
-               error = s390_vary_chpid(cp->id, 1);
-       else if (!strnicmp(cmd, "off", 3))
-               error = s390_vary_chpid(cp->id, 0);
-       else
-               error = -EINVAL;
-
-       return error < 0 ? error : count;
-
-}
-
-static DEVICE_ATTR(status, 0644, chp_status_show, chp_status_write);
-
-static ssize_t
-chp_type_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
-       struct channel_path *chp = container_of(dev, struct channel_path, dev);
-
-       if (!chp)
-               return 0;
-       return sprintf(buf, "%x\n", chp->desc.desc);
-}
-
-static DEVICE_ATTR(type, 0444, chp_type_show, NULL);
-
-static ssize_t
-chp_cmg_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
-       struct channel_path *chp = to_channelpath(dev);
-
-       if (!chp)
-               return 0;
-       if (chp->cmg == -1) /* channel measurements not available */
-               return sprintf(buf, "unknown\n");
-       return sprintf(buf, "%x\n", chp->cmg);
-}
-
-static DEVICE_ATTR(cmg, 0444, chp_cmg_show, NULL);
-
-static ssize_t
-chp_shared_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
-       struct channel_path *chp = to_channelpath(dev);
-
-       if (!chp)
-               return 0;
-       if (chp->shared == -1) /* channel measurements not available */
-               return sprintf(buf, "unknown\n");
-       return sprintf(buf, "%x\n", chp->shared);
-}
-
-static DEVICE_ATTR(shared, 0444, chp_shared_show, NULL);
-
-static struct attribute * chp_attrs[] = {
-       &dev_attr_status.attr,
-       &dev_attr_type.attr,
-       &dev_attr_cmg.attr,
-       &dev_attr_shared.attr,
-       NULL,
-};
-
-static struct attribute_group chp_attr_group = {
-       .attrs = chp_attrs,
-};
-
-static void
-chp_release(struct device *dev)
-{
-       struct channel_path *cp;
-       
-       cp = container_of(dev, struct channel_path, dev);
-       kfree(cp);
-}
-
-static int
-chsc_determine_channel_path_description(int chpid,
-                                       struct channel_path_desc *desc)
+int chsc_determine_channel_path_description(struct chp_id chpid,
+                                           struct channel_path_desc *desc)
 {
        int ccode, ret;
 
@@ -1252,8 +851,8 @@ chsc_determine_channel_path_description(int chpid,
        scpd_area->request.length = 0x0010;
        scpd_area->request.code = 0x0002;
 
-       scpd_area->first_chpid = chpid;
-       scpd_area->last_chpid = chpid;
+       scpd_area->first_chpid = chpid.id;
+       scpd_area->last_chpid = chpid.id;
 
        ccode = chsc(scpd_area);
        if (ccode > 0) {
@@ -1316,8 +915,7 @@ chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
        }
 }
 
-static int
-chsc_get_channel_measurement_chars(struct channel_path *chp)
+int chsc_get_channel_measurement_chars(struct channel_path *chp)
 {
        int ccode, ret;
 
@@ -1349,8 +947,8 @@ chsc_get_channel_measurement_chars(struct channel_path *chp)
        scmc_area->request.length = 0x0010;
        scmc_area->request.code = 0x0022;
 
-       scmc_area->first_chpid = chp->id;
-       scmc_area->last_chpid = chp->id;
+       scmc_area->first_chpid = chp->chpid.id;
+       scmc_area->last_chpid = chp->chpid.id;
 
        ccode = chsc(scmc_area);
        if (ccode > 0) {
@@ -1392,94 +990,6 @@ out:
        return ret;
 }
 
-/*
- * Entries for chpids on the system bus.
- * This replaces /proc/chpids.
- */
-static int
-new_channel_path(int chpid)
-{
-       struct channel_path *chp;
-       int ret;
-
-       chp = kzalloc(sizeof(struct channel_path), GFP_KERNEL);
-       if (!chp)
-               return -ENOMEM;
-
-       /* fill in status, etc. */
-       chp->id = chpid;
-       chp->state = 1;
-       chp->dev.parent = &css[0]->device;
-       chp->dev.release = chp_release;
-       snprintf(chp->dev.bus_id, BUS_ID_SIZE, "chp0.%x", chpid);
-
-       /* Obtain channel path description and fill it in. */
-       ret = chsc_determine_channel_path_description(chpid, &chp->desc);
-       if (ret)
-               goto out_free;
-       /* Get channel-measurement characteristics. */
-       if (css_characteristics_avail && css_chsc_characteristics.scmc
-           && css_chsc_characteristics.secm) {
-               ret = chsc_get_channel_measurement_chars(chp);
-               if (ret)
-                       goto out_free;
-       } else {
-               static int msg_done;
-
-               if (!msg_done) {
-                       printk(KERN_WARNING "cio: Channel measurements not "
-                              "available, continuing.\n");
-                       msg_done = 1;
-               }
-               chp->cmg = -1;
-       }
-
-       /* make it known to the system */
-       ret = device_register(&chp->dev);
-       if (ret) {
-               printk(KERN_WARNING "%s: could not register %02x\n",
-                      __func__, chpid);
-               goto out_free;
-       }
-       ret = sysfs_create_group(&chp->dev.kobj, &chp_attr_group);
-       if (ret) {
-               device_unregister(&chp->dev);
-               goto out_free;
-       }
-       mutex_lock(&css[0]->mutex);
-       if (css[0]->cm_enabled) {
-               ret = chsc_add_chp_cmg_attr(chp);
-               if (ret) {
-                       sysfs_remove_group(&chp->dev.kobj, &chp_attr_group);
-                       device_unregister(&chp->dev);
-                       mutex_unlock(&css[0]->mutex);
-                       goto out_free;
-               }
-       }
-       css[0]->chps[chpid] = chp;
-       mutex_unlock(&css[0]->mutex);
-       return ret;
-out_free:
-       kfree(chp);
-       return ret;
-}
-
-void *
-chsc_get_chp_desc(struct subchannel *sch, int chp_no)
-{
-       struct channel_path *chp;
-       struct channel_path_desc *desc;
-
-       chp = css[0]->chps[sch->schib.pmcw.chpid[chp_no]];
-       if (!chp)
-               return NULL;
-       desc = kmalloc(sizeof(struct channel_path_desc), GFP_KERNEL);
-       if (!desc)
-               return NULL;
-       memcpy(desc, &chp->desc, sizeof(struct channel_path_desc));
-       return desc;
-}
-
 static int __init
 chsc_alloc_sei_area(void)
 {
index 0fb2b02..2ad81d1 100644 (file)
@@ -1,9 +1,10 @@
 #ifndef S390_CHSC_H
 #define S390_CHSC_H
 
-#define CHSC_SEI_ACC_CHPID        1
-#define CHSC_SEI_ACC_LINKADDR     2
-#define CHSC_SEI_ACC_FULLLINKADDR 3
+#include <linux/types.h>
+#include <linux/device.h>
+#include <asm/chpid.h>
+#include "schid.h"
 
 #define CHSC_SDA_OC_MSS   0x2
 
@@ -33,23 +34,9 @@ struct channel_path_desc {
        u8 chpp;
 } __attribute__ ((packed));
 
-struct channel_path {
-       int id;
-       int state;
-       struct channel_path_desc desc;
-       /* Channel-measurement related stuff: */
-       int cmg;
-       int shared;
-       void *cmg_chars;
-       struct device dev;
-};
+struct channel_path;
 
-extern void s390_process_css( void );
-extern void chsc_validate_chpids(struct subchannel *);
-extern void chpid_is_actually_online(int);
-extern int css_get_ssd_info(struct subchannel *);
-extern int chsc_process_crw(void);
-extern int chp_process_crw(int, int);
+extern void chsc_process_crw(void);
 
 struct css_general_char {
        u64 : 41;
@@ -82,15 +69,26 @@ struct css_chsc_char {
 extern struct css_general_char css_general_characteristics;
 extern struct css_chsc_char css_chsc_characteristics;
 
+struct chsc_ssd_info {
+       u8 path_mask;
+       u8 fla_valid_mask;
+       struct chp_id chpid[8];
+       u16 fla[8];
+};
+extern int chsc_get_ssd_info(struct subchannel_id schid,
+                            struct chsc_ssd_info *ssd);
 extern int chsc_determine_css_characteristics(void);
 extern int css_characteristics_avail;
 
-extern void *chsc_get_chp_desc(struct subchannel*, int);
-
 extern int chsc_enable_facility(int);
 struct channel_subsystem;
 extern int chsc_secm(struct channel_subsystem *, int);
 
-#define to_channelpath(device) container_of(device, struct channel_path, dev)
+int chsc_chp_vary(struct chp_id chpid, int on);
+int chsc_determine_channel_path_description(struct chp_id chpid,
+                                           struct channel_path_desc *desc);
+void chsc_chp_online(struct chp_id chpid);
+void chsc_chp_offline(struct chp_id chpid);
+int chsc_get_channel_measurement_chars(struct channel_path *chp);
 
 #endif
index 9cb129a..ea1defb 100644 (file)
@@ -22,6 +22,7 @@
 #include <asm/setup.h>
 #include <asm/reset.h>
 #include <asm/ipl.h>
+#include <asm/chpid.h>
 #include "airq.h"
 #include "cio.h"
 #include "css.h"
@@ -29,6 +30,7 @@
 #include "ioasm.h"
 #include "blacklist.h"
 #include "cio_debug.h"
+#include "chp.h"
 #include "../s390mach.h"
 
 debug_info_t *cio_debug_msg_id;
@@ -592,9 +594,10 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
                err = -ENODEV;
                goto out;
        }
-       sch->opm = 0xff;
-       if (!cio_is_console(sch->schid))
-               chsc_validate_chpids(sch);
+       if (cio_is_console(sch->schid))
+               sch->opm = 0xff;
+       else
+               sch->opm = chp_get_sch_opm(sch);
        sch->lpm = sch->schib.pmcw.pam & sch->opm;
 
        CIO_DEBUG(KERN_INFO, 0,
@@ -954,6 +957,7 @@ static void css_reset(void)
 {
        int i, ret;
        unsigned long long timeout;
+       struct chp_id chpid;
 
        /* Reset subchannels. */
        for_each_subchannel(__shutdown_subchannel_easy,  NULL);
@@ -963,8 +967,10 @@ static void css_reset(void)
        __ctl_set_bit(14, 28);
        /* Temporarily reenable machine checks. */
        local_mcck_enable();
+       chp_id_init(&chpid);
        for (i = 0; i <= __MAX_CHPID; i++) {
-               ret = rchp(i);
+               chpid.id = i;
+               ret = rchp(chpid);
                if ((ret == 0) || (ret == 2))
                        /*
                         * rchp either succeeded, or another rchp is already
@@ -1048,37 +1054,19 @@ void reipl_ccw_dev(struct ccw_dev_id *devid)
        do_reipl_asm(*((__u32*)&schid));
 }
 
-static struct schib __initdata ipl_schib;
-
-/*
- * ipl_save_parameters gets called very early. It is not allowed to access
- * anything in the bss section at all. The bss section is not cleared yet,
- * but may contain some ipl parameters written by the firmware.
- * These parameters (if present) are copied to 0x2000.
- * To avoid corruption of the ipl parameters, all variables used by this
- * function must reside on the stack or in the data section.
- */
-void ipl_save_parameters(void)
+int __init cio_get_iplinfo(struct cio_iplinfo *iplinfo)
 {
        struct subchannel_id schid;
-       unsigned int *ipl_ptr;
-       void *src, *dst;
+       struct schib schib;
 
        schid = *(struct subchannel_id *)__LC_SUBCHANNEL_ID;
        if (!schid.one)
-               return;
-       if (stsch(schid, &ipl_schib))
-               return;
-       if (!ipl_schib.pmcw.dnv)
-               return;
-       ipl_devno = ipl_schib.pmcw.dev;
-       ipl_flags |= IPL_DEVNO_VALID;
-       if (!ipl_schib.pmcw.qf)
-               return;
-       ipl_flags |= IPL_PARMBLOCK_VALID;
-       ipl_ptr = (unsigned int *)__LC_IPL_PARMBLOCK_PTR;
-       src = (void *)(unsigned long)*ipl_ptr;
-       dst = (void *)IPL_PARMBLOCK_ORIGIN;
-       memmove(dst, src, PAGE_SIZE);
-       *ipl_ptr = IPL_PARMBLOCK_ORIGIN;
+               return -ENODEV;
+       if (stsch(schid, &schib))
+               return -ENODEV;
+       if (!schib.pmcw.dnv)
+               return -ENODEV;
+       iplinfo->devno = schib.pmcw.dev;
+       iplinfo->is_qdio = schib.pmcw.qf;
+       return 0;
 }
index 35154a2..7446c39 100644 (file)
@@ -1,18 +1,11 @@
 #ifndef S390_CIO_H
 #define S390_CIO_H
 
-#include "schid.h"
 #include <linux/mutex.h>
-
-/*
- * where we put the ssd info
- */
-struct ssd_info {
-       __u8  valid:1;
-       __u8  type:7;           /* subchannel type */
-       __u8  chpid[8];         /* chpids */
-       __u16 fla[8];           /* full link addresses */
-} __attribute__ ((packed));
+#include <linux/device.h>
+#include <asm/chpid.h>
+#include "chsc.h"
+#include "schid.h"
 
 /*
  * path management control word
@@ -108,7 +101,7 @@ struct subchannel {
        struct schib schib;     /* subchannel information block */
        struct orb orb;         /* operation request block */
        struct ccw1 sense_ccw;  /* static ccw for sense command */
-       struct ssd_info ssd_info;       /* subchannel description */
+       struct chsc_ssd_info ssd_info;  /* subchannel description */
        struct device dev;      /* entry in device tree */
        struct css_driver *driver;
 } __attribute__ ((aligned(8)));
index 90b22fa..28abd69 100644 (file)
@@ -476,7 +476,7 @@ struct cmb_area {
 };
 
 static struct cmb_area cmb_area = {
-       .lock = SPIN_LOCK_UNLOCKED,
+       .lock = __SPIN_LOCK_UNLOCKED(cmb_area.lock),
        .list = LIST_HEAD_INIT(cmb_area.list),
        .num_channels  = 1024,
 };
index fe0ace7..27c6d9e 100644 (file)
@@ -20,8 +20,9 @@
 #include "ioasm.h"
 #include "chsc.h"
 #include "device.h"
+#include "idset.h"
+#include "chp.h"
 
-int need_rescan = 0;
 int css_init_done = 0;
 static int need_reprobe = 0;
 static int max_ssid = 0;
@@ -125,8 +126,52 @@ void css_sch_device_unregister(struct subchannel *sch)
        mutex_unlock(&sch->reg_mutex);
 }
 
-static int
-css_register_subchannel(struct subchannel *sch)
+static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw)
+{
+       int i;
+       int mask;
+
+       memset(ssd, 0, sizeof(struct chsc_ssd_info));
+       ssd->path_mask = pmcw->pim;
+       for (i = 0; i < 8; i++) {
+               mask = 0x80 >> i;
+               if (pmcw->pim & mask) {
+                       chp_id_init(&ssd->chpid[i]);
+                       ssd->chpid[i].id = pmcw->chpid[i];
+               }
+       }
+}
+
+static void ssd_register_chpids(struct chsc_ssd_info *ssd)
+{
+       int i;
+       int mask;
+
+       for (i = 0; i < 8; i++) {
+               mask = 0x80 >> i;
+               if (ssd->path_mask & mask)
+                       if (!chp_is_registered(ssd->chpid[i]))
+                               chp_new(ssd->chpid[i]);
+       }
+}
+
+void css_update_ssd_info(struct subchannel *sch)
+{
+       int ret;
+
+       if (cio_is_console(sch->schid)) {
+               /* Console is initialized too early for functions requiring
+                * memory allocation. */
+               ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
+       } else {
+               ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info);
+               if (ret)
+                       ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
+               ssd_register_chpids(&sch->ssd_info);
+       }
+}
+
+static int css_register_subchannel(struct subchannel *sch)
 {
        int ret;
 
@@ -135,9 +180,7 @@ css_register_subchannel(struct subchannel *sch)
        sch->dev.bus = &css_bus_type;
        sch->dev.release = &css_subchannel_release;
        sch->dev.groups = subch_attr_groups;
-
-       css_get_ssd_info(sch);
-
+       css_update_ssd_info(sch);
        /* make it known to the system */
        ret = css_sch_device_register(sch);
        if (ret) {
@@ -306,7 +349,7 @@ static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
        return css_probe_device(schid);
 }
 
-static int css_evaluate_subchannel(struct subchannel_id schid, int slow)
+static void css_evaluate_subchannel(struct subchannel_id schid, int slow)
 {
        struct subchannel *sch;
        int ret;
@@ -317,53 +360,66 @@ static int css_evaluate_subchannel(struct subchannel_id schid, int slow)
                put_device(&sch->dev);
        } else
                ret = css_evaluate_new_subchannel(schid, slow);
-
-       return ret;
+       if (ret == -EAGAIN)
+               css_schedule_eval(schid);
 }
 
-static int
-css_rescan_devices(struct subchannel_id schid, void *data)
+static struct idset *slow_subchannel_set;
+static spinlock_t slow_subchannel_lock;
+
+static int __init slow_subchannel_init(void)
 {
-       return css_evaluate_subchannel(schid, 1);
+       spin_lock_init(&slow_subchannel_lock);
+       slow_subchannel_set = idset_sch_new();
+       if (!slow_subchannel_set) {
+               printk(KERN_WARNING "cio: could not allocate slow subchannel "
+                      "set\n");
+               return -ENOMEM;
+       }
+       return 0;
 }
 
-struct slow_subchannel {
-       struct list_head slow_list;
-       struct subchannel_id schid;
-};
-
-static LIST_HEAD(slow_subchannels_head);
-static DEFINE_SPINLOCK(slow_subchannel_lock);
+subsys_initcall(slow_subchannel_init);
 
-static void
-css_trigger_slow_path(struct work_struct *unused)
+static void css_slow_path_func(struct work_struct *unused)
 {
-       CIO_TRACE_EVENT(4, "slowpath");
-
-       if (need_rescan) {
-               need_rescan = 0;
-               for_each_subchannel(css_rescan_devices, NULL);
-               return;
-       }
+       struct subchannel_id schid;
 
+       CIO_TRACE_EVENT(4, "slowpath");
        spin_lock_irq(&slow_subchannel_lock);
-       while (!list_empty(&slow_subchannels_head)) {
-               struct slow_subchannel *slow_sch =
-                       list_entry(slow_subchannels_head.next,
-                                  struct slow_subchannel, slow_list);
-
-               list_del_init(slow_subchannels_head.next);
+       init_subchannel_id(&schid);
+       while (idset_sch_get_first(slow_subchannel_set, &schid)) {
+               idset_sch_del(slow_subchannel_set, schid);
                spin_unlock_irq(&slow_subchannel_lock);
-               css_evaluate_subchannel(slow_sch->schid, 1);
+               css_evaluate_subchannel(schid, 1);
                spin_lock_irq(&slow_subchannel_lock);
-               kfree(slow_sch);
        }
        spin_unlock_irq(&slow_subchannel_lock);
 }
 
-DECLARE_WORK(slow_path_work, css_trigger_slow_path);
+static DECLARE_WORK(slow_path_work, css_slow_path_func);
 struct workqueue_struct *slow_path_wq;
 
+void css_schedule_eval(struct subchannel_id schid)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&slow_subchannel_lock, flags);
+       idset_sch_add(slow_subchannel_set, schid);
+       queue_work(slow_path_wq, &slow_path_work);
+       spin_unlock_irqrestore(&slow_subchannel_lock, flags);
+}
+
+void css_schedule_eval_all(void)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&slow_subchannel_lock, flags);
+       idset_fill(slow_subchannel_set);
+       queue_work(slow_path_wq, &slow_path_work);
+       spin_unlock_irqrestore(&slow_subchannel_lock, flags);
+}
+
 /* Reprobe subchannel if unregistered. */
 static int reprobe_subchannel(struct subchannel_id schid, void *data)
 {
@@ -425,34 +481,15 @@ void css_schedule_reprobe(void)
 
 EXPORT_SYMBOL_GPL(css_schedule_reprobe);
 
-/*
- * Rescan for new devices. FIXME: This is slow.
- * This function is called when we have lost CRWs due to overflows and we have
- * to do subchannel housekeeping.
- */
-void
-css_reiterate_subchannels(void)
-{
-       css_clear_subchannel_slow_list();
-       need_rescan = 1;
-}
-
 /*
  * Called from the machine check handler for subchannel report words.
  */
-int
-css_process_crw(int rsid1, int rsid2)
+void css_process_crw(int rsid1, int rsid2)
 {
-       int ret;
        struct subchannel_id mchk_schid;
 
        CIO_CRW_EVENT(2, "source is subchannel %04X, subsystem id %x\n",
                      rsid1, rsid2);
-
-       if (need_rescan)
-               /* We need to iterate all subchannels anyway. */
-               return -EAGAIN;
-
        init_subchannel_id(&mchk_schid);
        mchk_schid.sch_no = rsid1;
        if (rsid2 != 0)
@@ -463,14 +500,7 @@ css_process_crw(int rsid1, int rsid2)
         * use stsch() to find out if the subchannel in question has come
         * or gone.
         */
-       ret = css_evaluate_subchannel(mchk_schid, 0);
-       if (ret == -EAGAIN) {
-               if (css_enqueue_subchannel_slow(mchk_schid)) {
-                       css_clear_subchannel_slow_list();
-                       need_rescan = 1;
-               }
-       }
-       return ret;
+       css_evaluate_subchannel(mchk_schid, 0);
 }
 
 static int __init
@@ -745,47 +775,6 @@ struct bus_type css_bus_type = {
 
 subsys_initcall(init_channel_subsystem);
 
-int
-css_enqueue_subchannel_slow(struct subchannel_id schid)
-{
-       struct slow_subchannel *new_slow_sch;
-       unsigned long flags;
-
-       new_slow_sch = kzalloc(sizeof(struct slow_subchannel), GFP_ATOMIC);
-       if (!new_slow_sch)
-               return -ENOMEM;
-       new_slow_sch->schid = schid;
-       spin_lock_irqsave(&slow_subchannel_lock, flags);
-       list_add_tail(&new_slow_sch->slow_list, &slow_subchannels_head);
-       spin_unlock_irqrestore(&slow_subchannel_lock, flags);
-       return 0;
-}
-
-void
-css_clear_subchannel_slow_list(void)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&slow_subchannel_lock, flags);
-       while (!list_empty(&slow_subchannels_head)) {
-               struct slow_subchannel *slow_sch =
-                       list_entry(slow_subchannels_head.next,
-                                  struct slow_subchannel, slow_list);
-
-               list_del_init(slow_subchannels_head.next);
-               kfree(slow_sch);
-       }
-       spin_unlock_irqrestore(&slow_subchannel_lock, flags);
-}
-
-
-
-int
-css_slow_subchannels_exist(void)
-{
-       return (!list_empty(&slow_subchannels_head));
-}
-
 MODULE_LICENSE("GPL");
 EXPORT_SYMBOL(css_bus_type);
 EXPORT_SYMBOL_GPL(css_characteristics_avail);
index ca2bab9..71fcfdc 100644 (file)
@@ -4,8 +4,11 @@
 #include <linux/mutex.h>
 #include <linux/wait.h>
 #include <linux/workqueue.h>
+#include <linux/device.h>
+#include <linux/types.h>
 
 #include <asm/cio.h>
+#include <asm/chpid.h>
 
 #include "schid.h"
 
@@ -143,13 +146,12 @@ extern void css_sch_device_unregister(struct subchannel *);
 extern struct subchannel * get_subchannel_by_schid(struct subchannel_id);
 extern int css_init_done;
 extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *);
-extern int css_process_crw(int, int);
+extern void css_process_crw(int, int);
 extern void css_reiterate_subchannels(void);
+void css_update_ssd_info(struct subchannel *sch);
 
 #define __MAX_SUBCHANNEL 65535
 #define __MAX_SSID 3
-#define __MAX_CHPID 255
-#define __MAX_CSSID 0
 
 struct channel_subsystem {
        u8 cssid;
@@ -185,16 +187,12 @@ int device_trigger_verify(struct subchannel *sch);
 void device_kill_pending_timer(struct subchannel *);
 
 /* Helper functions to build lists for the slow path. */
-extern int css_enqueue_subchannel_slow(struct subchannel_id schid);
-void css_walk_subchannel_slow_list(void (*fn)(unsigned long));
-void css_clear_subchannel_slow_list(void);
-int css_slow_subchannels_exist(void);
-extern int need_rescan;
+void css_schedule_eval(struct subchannel_id schid);
+void css_schedule_eval_all(void);
 
 int sch_is_pseudo_sch(struct subchannel *);
 
 extern struct workqueue_struct *slow_path_wq;
-extern struct work_struct slow_path_work;
 
 int subchannel_add_files (struct device *);
 extern struct attribute_group *subch_attr_groups[];
index e322111..0335590 100644 (file)
@@ -56,13 +56,12 @@ ccw_bus_match (struct device * dev, struct device_driver * drv)
 /* Store modalias string delimited by prefix/suffix string into buffer with
  * specified size. Return length of resulting string (excluding trailing '\0')
  * even if string doesn't fit buffer (snprintf semantics). */
-static int snprint_alias(char *buf, size_t size, const char *prefix,
+static int snprint_alias(char *buf, size_t size,
                         struct ccw_device_id *id, const char *suffix)
 {
        int len;
 
-       len = snprintf(buf, size, "%sccw:t%04Xm%02X", prefix, id->cu_type,
-                      id->cu_model);
+       len = snprintf(buf, size, "ccw:t%04Xm%02X", id->cu_type, id->cu_model);
        if (len > size)
                return len;
        buf += len;
@@ -85,53 +84,40 @@ static int ccw_uevent(struct device *dev, char **envp, int num_envp,
        struct ccw_device *cdev = to_ccwdev(dev);
        struct ccw_device_id *id = &(cdev->id);
        int i = 0;
-       int len;
+       int len = 0;
+       int ret;
+       char modalias_buf[30];
 
        /* CU_TYPE= */
-       len = snprintf(buffer, buffer_size, "CU_TYPE=%04X", id->cu_type) + 1;
-       if (len > buffer_size || i >= num_envp)
-               return -ENOMEM;
-       envp[i++] = buffer;
-       buffer += len;
-       buffer_size -= len;
+       ret = add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &len,
+                            "CU_TYPE=%04X", id->cu_type);
+       if (ret)
+               return ret;
 
        /* CU_MODEL= */
-       len = snprintf(buffer, buffer_size, "CU_MODEL=%02X", id->cu_model) + 1;
-       if (len > buffer_size || i >= num_envp)
-               return -ENOMEM;
-       envp[i++] = buffer;
-       buffer += len;
-       buffer_size -= len;
+       ret = add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &len,
+                            "CU_MODEL=%02X", id->cu_model);
+       if (ret)
+               return ret;
 
        /* The next two can be zero, that's ok for us */
        /* DEV_TYPE= */
-       len = snprintf(buffer, buffer_size, "DEV_TYPE=%04X", id->dev_type) + 1;
-       if (len > buffer_size || i >= num_envp)
-               return -ENOMEM;
-       envp[i++] = buffer;
-       buffer += len;
-       buffer_size -= len;
+       ret = add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &len,
+                            "DEV_TYPE=%04X", id->dev_type);
+       if (ret)
+               return ret;
 
        /* DEV_MODEL= */
-       len = snprintf(buffer, buffer_size, "DEV_MODEL=%02X",
-                       (unsigned char) id->dev_model) + 1;
-       if (len > buffer_size || i >= num_envp)
-               return -ENOMEM;
-       envp[i++] = buffer;
-       buffer += len;
-       buffer_size -= len;
+       ret = add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &len,
+                            "DEV_MODEL=%02X", id->dev_model);
+       if (ret)
+               return ret;
 
        /* MODALIAS=  */
-       len = snprint_alias(buffer, buffer_size, "MODALIAS=", id, "") + 1;
-       if (len > buffer_size || i >= num_envp)
-               return -ENOMEM;
-       envp[i++] = buffer;
-       buffer += len;
-       buffer_size -= len;
-
-       envp[i] = NULL;
-
-       return 0;
+       snprint_alias(modalias_buf, sizeof(modalias_buf), id, "");
+       ret = add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &len,
+                            "MODALIAS=%s", modalias_buf);
+       return ret;
 }
 
 struct bus_type ccw_bus_type;
@@ -230,12 +216,18 @@ static ssize_t
 chpids_show (struct device * dev, struct device_attribute *attr, char * buf)
 {
        struct subchannel *sch = to_subchannel(dev);
-       struct ssd_info *ssd = &sch->ssd_info;
+       struct chsc_ssd_info *ssd = &sch->ssd_info;
        ssize_t ret = 0;
        int chp;
+       int mask;
 
-       for (chp = 0; chp < 8; chp++)
-               ret += sprintf (buf+ret, "%02x ", ssd->chpid[chp]);
+       for (chp = 0; chp < 8; chp++) {
+               mask = 0x80 >> chp;
+               if (ssd->path_mask & mask)
+                       ret += sprintf(buf + ret, "%02x ", ssd->chpid[chp].id);
+               else
+                       ret += sprintf(buf + ret, "00 ");
+       }
        ret += sprintf (buf+ret, "\n");
        return min((ssize_t)PAGE_SIZE, ret);
 }
@@ -280,7 +272,7 @@ modalias_show (struct device *dev, struct device_attribute *attr, char *buf)
        struct ccw_device_id *id = &(cdev->id);
        int len;
 
-       len = snprint_alias(buf, PAGE_SIZE, "", id, "\n") + 1;
+       len = snprint_alias(buf, PAGE_SIZE, id, "\n") + 1;
 
        return len > PAGE_SIZE ? PAGE_SIZE : len;
 }
@@ -298,16 +290,10 @@ int ccw_device_is_orphan(struct ccw_device *cdev)
        return sch_is_pseudo_sch(to_subchannel(cdev->dev.parent));
 }
 
-static void ccw_device_unregister(struct work_struct *work)
+static void ccw_device_unregister(struct ccw_device *cdev)
 {
-       struct ccw_device_private *priv;
-       struct ccw_device *cdev;
-
-       priv = container_of(work, struct ccw_device_private, kick_work);
-       cdev = priv->cdev;
        if (test_and_clear_bit(1, &cdev->private->registered))
-               device_unregister(&cdev->dev);
-       put_device(&cdev->dev);
+               device_del(&cdev->dev);
 }
 
 static void
@@ -324,11 +310,8 @@ ccw_device_remove_disconnected(struct ccw_device *cdev)
                spin_lock_irqsave(cdev->ccwlock, flags);
                cdev->private->state = DEV_STATE_NOT_OPER;
                spin_unlock_irqrestore(cdev->ccwlock, flags);
-               if (get_device(&cdev->dev)) {
-                       PREPARE_WORK(&cdev->private->kick_work,
-                                    ccw_device_unregister);
-                       queue_work(ccw_device_work, &cdev->private->kick_work);
-               }
+               ccw_device_unregister(cdev);
+               put_device(&cdev->dev);
                return ;
        }
        sch = to_subchannel(cdev->dev.parent);
@@ -413,11 +396,60 @@ ccw_device_set_online(struct ccw_device *cdev)
        return (ret == 0) ? -ENODEV : ret;
 }
 
-static ssize_t
-online_store (struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+static void online_store_handle_offline(struct ccw_device *cdev)
+{
+       if (cdev->private->state == DEV_STATE_DISCONNECTED)
+               ccw_device_remove_disconnected(cdev);
+       else if (cdev->drv && cdev->drv->set_offline)
+               ccw_device_set_offline(cdev);
+}
+
+static int online_store_recog_and_online(struct ccw_device *cdev)
+{
+       int ret;
+
+       /* Do device recognition, if needed. */
+       if (cdev->id.cu_type == 0) {
+               ret = ccw_device_recognition(cdev);
+               if (ret) {
+                       printk(KERN_WARNING"Couldn't start recognition "
+                              "for device %s (ret=%d)\n",
+                              cdev->dev.bus_id, ret);
+                       return ret;
+               }
+               wait_event(cdev->private->wait_q,
+                          cdev->private->flags.recog_done);
+       }
+       if (cdev->drv && cdev->drv->set_online)
+               ccw_device_set_online(cdev);
+       return 0;
+}
+static void online_store_handle_online(struct ccw_device *cdev, int force)
+{
+       int ret;
+
+       ret = online_store_recog_and_online(cdev);
+       if (ret)
+               return;
+       if (force && cdev->private->state == DEV_STATE_BOXED) {
+               ret = ccw_device_stlck(cdev);
+               if (ret) {
+                       printk(KERN_WARNING"ccw_device_stlck for device %s "
+                              "returned %d!\n", cdev->dev.bus_id, ret);
+                       return;
+               }
+               if (cdev->id.cu_type == 0)
+                       cdev->private->state = DEV_STATE_NOT_OPER;
+               online_store_recog_and_online(cdev);
+       }
+
+}
+
+static ssize_t online_store (struct device *dev, struct device_attribute *attr,
+                            const char *buf, size_t count)
 {
        struct ccw_device *cdev = to_ccwdev(dev);
-       int i, force, ret;
+       int i, force;
        char *tmp;
 
        if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0)
@@ -434,51 +466,17 @@ online_store (struct device *dev, struct device_attribute *attr, const char *buf
                force = 0;
                i = simple_strtoul(buf, &tmp, 16);
        }
-       if (i == 1) {
-               /* Do device recognition, if needed. */
-               if (cdev->id.cu_type == 0) {
-                       ret = ccw_device_recognition(cdev);
-                       if (ret) {
-                               printk(KERN_WARNING"Couldn't start recognition "
-                                      "for device %s (ret=%d)\n",
-                                      cdev->dev.bus_id, ret);
-                               goto out;
-                       }
-                       wait_event(cdev->private->wait_q,
-                                  cdev->private->flags.recog_done);
-               }
-               if (cdev->drv && cdev->drv->set_online)
-                       ccw_device_set_online(cdev);
-       } else if (i == 0) {
-               if (cdev->private->state == DEV_STATE_DISCONNECTED)
-                       ccw_device_remove_disconnected(cdev);
-               else if (cdev->drv && cdev->drv->set_offline)
-                       ccw_device_set_offline(cdev);
-       }
-       if (force && cdev->private->state == DEV_STATE_BOXED) {
-               ret = ccw_device_stlck(cdev);
-               if (ret) {
-                       printk(KERN_WARNING"ccw_device_stlck for device %s "
-                              "returned %d!\n", cdev->dev.bus_id, ret);
-                       goto out;
-               }
-               /* Do device recognition, if needed. */
-               if (cdev->id.cu_type == 0) {
-                       cdev->private->state = DEV_STATE_NOT_OPER;
-                       ret = ccw_device_recognition(cdev);
-                       if (ret) {
-                               printk(KERN_WARNING"Couldn't start recognition "
-                                      "for device %s (ret=%d)\n",
-                                      cdev->dev.bus_id, ret);
-                               goto out;
-                       }
-                       wait_event(cdev->private->wait_q,
-                                  cdev->private->flags.recog_done);
-               }
-               if (cdev->drv && cdev->drv->set_online)
-                       ccw_device_set_online(cdev);
+
+       switch (i) {
+       case 0:
+               online_store_handle_offline(cdev);
+               break;
+       case 1:
+               online_store_handle_online(cdev, force);
+               break;
+       default:
+               count = -EINVAL;
        }
-       out:
        if (cdev->drv)
                module_put(cdev->drv->owner);
        atomic_set(&cdev->private->onoff, 0);
@@ -548,17 +546,10 @@ static struct attribute_group ccwdev_attr_group = {
        .attrs = ccwdev_attrs,
 };
 
-static int
-device_add_files (struct device *dev)
-{
-       return sysfs_create_group(&dev->kobj, &ccwdev_attr_group);
-}
-
-static void
-device_remove_files(struct device *dev)
-{
-       sysfs_remove_group(&dev->kobj, &ccwdev_attr_group);
-}
+struct attribute_group *ccwdev_attr_groups[] = {
+       &ccwdev_attr_group,
+       NULL,
+};
 
 /* this is a simple abstraction for device_register that sets the
  * correct bus type and adds the bus specific files */
@@ -573,10 +564,6 @@ static int ccw_device_register(struct ccw_device *cdev)
                return ret;
 
        set_bit(1, &cdev->private->registered);
-       if ((ret = device_add_files(dev))) {
-               if (test_and_clear_bit(1, &cdev->private->registered))
-                       device_del(dev);
-       }
        return ret;
 }
 
@@ -648,10 +635,6 @@ ccw_device_add_changed(struct work_struct *work)
                return;
        }
        set_bit(1, &cdev->private->registered);
-       if (device_add_files(&cdev->dev)) {
-               if (test_and_clear_bit(1, &cdev->private->registered))
-                       device_unregister(&cdev->dev);
-       }
 }
 
 void ccw_device_do_unreg_rereg(struct work_struct *work)
@@ -664,9 +647,7 @@ void ccw_device_do_unreg_rereg(struct work_struct *work)
        cdev = priv->cdev;
        sch = to_subchannel(cdev->dev.parent);
 
-       device_remove_files(&cdev->dev);
-       if (test_and_clear_bit(1, &cdev->private->registered))
-               device_del(&cdev->dev);
+       ccw_device_unregister(cdev);
        PREPARE_WORK(&cdev->private->kick_work,
                     ccw_device_add_changed);
        queue_work(ccw_device_work, &cdev->private->kick_work);
@@ -705,6 +686,7 @@ static int io_subchannel_initialize_dev(struct subchannel *sch,
        cdev->dev.parent = &sch->dev;
        cdev->dev.release = ccw_device_release;
        INIT_LIST_HEAD(&cdev->private->kick_work.entry);
+       cdev->dev.groups = ccwdev_attr_groups;
        /* Do first half of device_register. */
        device_initialize(&cdev->dev);
        if (!get_device(&sch->dev)) {
@@ -736,6 +718,7 @@ static int io_subchannel_recog(struct ccw_device *, struct subchannel *);
 static void sch_attach_device(struct subchannel *sch,
                              struct ccw_device *cdev)
 {
+       css_update_ssd_info(sch);
        spin_lock_irq(sch->lock);
        sch->dev.driver_data = cdev;
        cdev->private->schid = sch->schid;
@@ -871,7 +854,7 @@ io_subchannel_register(struct work_struct *work)
        priv = container_of(work, struct ccw_device_private, kick_work);
        cdev = priv->cdev;
        sch = to_subchannel(cdev->dev.parent);
-
+       css_update_ssd_info(sch);
        /*
         * io_subchannel_register() will also be called after device
         * recognition has been done for a boxed device (which will already
@@ -1133,15 +1116,8 @@ io_subchannel_remove (struct subchannel *sch)
        sch->dev.driver_data = NULL;
        cdev->private->state = DEV_STATE_NOT_OPER;
        spin_unlock_irqrestore(cdev->ccwlock, flags);
-       /*
-        * Put unregistration on workqueue to avoid livelocks on the css bus
-        * semaphore.
-        */
-       if (get_device(&cdev->dev)) {
-               PREPARE_WORK(&cdev->private->kick_work,
-                            ccw_device_unregister);
-               queue_work(ccw_device_work, &cdev->private->kick_work);
-       }
+       ccw_device_unregister(cdev);
+       put_device(&cdev->dev);
        return 0;
 }
 
index 089a3dd..898ec3b 100644 (file)
@@ -15,6 +15,7 @@
 
 #include <asm/ccwdev.h>
 #include <asm/cio.h>
+#include <asm/chpid.h>
 
 #include "cio.h"
 #include "cio_debug.h"
@@ -22,6 +23,7 @@
 #include "device.h"
 #include "chsc.h"
 #include "ioasm.h"
+#include "chp.h"
 
 int
 device_is_online(struct subchannel *sch)
@@ -210,14 +212,18 @@ static void
 __recover_lost_chpids(struct subchannel *sch, int old_lpm)
 {
        int mask, i;
+       struct chp_id chpid;
 
+       chp_id_init(&chpid);
        for (i = 0; i<8; i++) {
                mask = 0x80 >> i;
                if (!(sch->lpm & mask))
                        continue;
                if (old_lpm & mask)
                        continue;
-               chpid_is_actually_online(sch->schib.pmcw.chpid[i]);
+               chpid.id = sch->schib.pmcw.chpid[i];
+               if (!chp_is_registered(chpid))
+                       css_schedule_eval_all();
        }
 }
 
index 7c7775a..16f59fc 100644 (file)
 
 #include <asm/ccwdev.h>
 #include <asm/idals.h>
+#include <asm/chpid.h>
 
 #include "cio.h"
 #include "cio_debug.h"
 #include "css.h"
 #include "chsc.h"
 #include "device.h"
+#include "chp.h"
 
 int ccw_device_set_options_mask(struct ccw_device *cdev, unsigned long flags)
 {
@@ -606,9 +608,12 @@ void *
 ccw_device_get_chp_desc(struct ccw_device *cdev, int chp_no)
 {
        struct subchannel *sch;
+       struct chp_id chpid;
 
        sch = to_subchannel(cdev->dev.parent);
-       return chsc_get_chp_desc(sch, chp_no);
+       chp_id_init(&chpid);
+       chpid.id = sch->schib.pmcw.chpid[chp_no];
+       return chp_get_chp_desc(chpid);
 }
 
 // FIXME: these have to go:
diff --git a/drivers/s390/cio/idset.c b/drivers/s390/cio/idset.c
new file mode 100644 (file)
index 0000000..16ea828
--- /dev/null
@@ -0,0 +1,112 @@
+/*
+ *  drivers/s390/cio/idset.c
+ *
+ *    Copyright IBM Corp. 2007
+ *    Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
+ */
+
+#include <linux/slab.h>
+#include <asm/bitops.h>
+#include "idset.h"
+#include "css.h"
+
+struct idset {
+       int num_ssid;
+       int num_id;
+       unsigned long bitmap[0];
+};
+
+static inline unsigned long bitmap_size(int num_ssid, int num_id)
+{
+       return __BITOPS_WORDS(num_ssid * num_id) * sizeof(unsigned long);
+}
+
+static struct idset *idset_new(int num_ssid, int num_id)
+{
+       struct idset *set;
+
+       set = kzalloc(sizeof(struct idset) + bitmap_size(num_ssid, num_id),
+                     GFP_KERNEL);
+       if (set) {
+               set->num_ssid = num_ssid;
+               set->num_id = num_id;
+       }
+       return set;
+}
+
+void idset_free(struct idset *set)
+{
+       kfree(set);
+}
+
+void idset_clear(struct idset *set)
+{
+       memset(set->bitmap, 0, bitmap_size(set->num_ssid, set->num_id));
+}
+
+void idset_fill(struct idset *set)
+{
+       memset(set->bitmap, 0xff, bitmap_size(set->num_ssid, set->num_id));
+}
+
+static inline void idset_add(struct idset *set, int ssid, int id)
+{
+       set_bit(ssid * set->num_id + id, set->bitmap);
+}
+
+static inline void idset_del(struct idset *set, int ssid, int id)
+{
+       clear_bit(ssid * set->num_id + id, set->bitmap);
+}
+
+static inline int idset_contains(struct idset *set, int ssid, int id)
+{
+       return test_bit(ssid * set->num_id + id, set->bitmap);
+}
+
+static inline int idset_get_first(struct idset *set, int *ssid, int *id)
+{
+       int bitnum;
+
+       bitnum = find_first_bit(set->bitmap, set->num_ssid * set->num_id);
+       if (bitnum >= set->num_ssid * set->num_id)
+               return 0;
+       *ssid = bitnum / set->num_id;
+       *id = bitnum % set->num_id;
+       return 1;
+}
+
+struct idset *idset_sch_new(void)
+{
+       return idset_new(__MAX_SSID + 1, __MAX_SUBCHANNEL + 1);
+}
+
+void idset_sch_add(struct idset *set, struct subchannel_id schid)
+{
+       idset_add(set, schid.ssid, schid.sch_no);
+}
+
+void idset_sch_del(struct idset *set, struct subchannel_id schid)
+{
+       idset_del(set, schid.ssid, schid.sch_no);
+}
+
+int idset_sch_contains(struct idset *set, struct subchannel_id schid)
+{
+       return idset_contains(set, schid.ssid, schid.sch_no);
+}
+
+int idset_sch_get_first(struct idset *set, struct subchannel_id *schid)
+{
+       int ssid = 0;
+       int id = 0;
+       int rc;
+
+       rc = idset_get_first(set, &ssid, &id);
+       if (rc) {
+               init_subchannel_id(schid);
+               schid->ssid = ssid;
+               schid->sch_no = id;
+       }
+       return rc;
+}
diff --git a/drivers/s390/cio/idset.h b/drivers/s390/cio/idset.h
new file mode 100644 (file)
index 0000000..144466a
--- /dev/null
@@ -0,0 +1,25 @@
+/*
+ *  drivers/s390/cio/idset.h
+ *
+ *    Copyright IBM Corp. 2007
+ *    Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
+ */
+
+#ifndef S390_IDSET_H
+#define S390_IDSET_H S390_IDSET_H
+
+#include "schid.h"
+
+struct idset;
+
+void idset_free(struct idset *set);
+void idset_clear(struct idset *set);
+void idset_fill(struct idset *set);
+
+struct idset *idset_sch_new(void);
+void idset_sch_add(struct idset *set, struct subchannel_id id);
+void idset_sch_del(struct idset *set, struct subchannel_id id);
+int idset_sch_contains(struct idset *set, struct subchannel_id id);
+int idset_sch_get_first(struct idset *set, struct subchannel_id *id);
+
+#endif /* S390_IDSET_H */
index ad6d829..7153dd9 100644 (file)
@@ -1,6 +1,7 @@
 #ifndef S390_CIO_IOASM_H
 #define S390_CIO_IOASM_H
 
+#include <asm/chpid.h>
 #include "schid.h"
 
 /*
@@ -189,9 +190,9 @@ static inline int chsc(void *chsc_area)
        return cc;
 }
 
-static inline int rchp(int chpid)
+static inline int rchp(struct chp_id chpid)
 {
-       register unsigned int reg1 asm ("1") = chpid;
+       register struct chp_id reg1 asm ("1") = chpid;
        int ccode;
 
        asm volatile(
index 7809a79..6dd64d0 100644 (file)
@@ -3525,8 +3525,8 @@ unpack_next:
                                 memcpy(skb_put(skb,len_of_data),
                                        privptr->p_mtc_envelope,
                                        len_of_data);
-                                skb->mac.raw=skb->data;
                                 skb->dev=dev;
+                               skb_reset_mac_header(skb);
                                 skb->protocol=htons(ETH_P_IP);
                                 skb->ip_summed=CHECKSUM_UNNECESSARY;
                                 privptr->stats.rx_packets++;
index 0d6d5fc..b20fd06 100644 (file)
@@ -455,7 +455,7 @@ ctc_unpack_skb(struct channel *ch, struct sk_buff *pskb)
                        return;
                }
                skb_put(pskb, header->length);
-               pskb->mac.raw = pskb->data;
+               skb_reset_mac_header(pskb);
                len -= header->length;
                skb = dev_alloc_skb(pskb->len);
                if (!skb) {
@@ -472,8 +472,9 @@ ctc_unpack_skb(struct channel *ch, struct sk_buff *pskb)
                        privptr->stats.rx_dropped++;
                        return;
                }
-               memcpy(skb_put(skb, pskb->len), pskb->data, pskb->len);
-               skb->mac.raw = skb->data;
+               skb_copy_from_linear_data(pskb, skb_put(skb, pskb->len),
+                                         pskb->len);
+               skb_reset_mac_header(skb);
                skb->dev = pskb->dev;
                skb->protocol = pskb->protocol;
                pskb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -706,7 +707,8 @@ ch_action_txdone(fsm_instance * fi, int event, void *arg)
                        spin_unlock(&ch->collect_lock);
                        return;
                }
-               ch->trans_skb->tail = ch->trans_skb->data = ch->trans_skb_data;
+               ch->trans_skb->data = ch->trans_skb_data;
+               skb_reset_tail_pointer(ch->trans_skb);
                ch->trans_skb->len = 0;
                if (ch->prof.maxmulti < (ch->collect_len + 2))
                        ch->prof.maxmulti = ch->collect_len + 2;
@@ -715,8 +717,9 @@ ch_action_txdone(fsm_instance * fi, int event, void *arg)
                *((__u16 *) skb_put(ch->trans_skb, 2)) = ch->collect_len + 2;
                i = 0;
                while ((skb = skb_dequeue(&ch->collect_queue))) {
-                       memcpy(skb_put(ch->trans_skb, skb->len), skb->data,
-                              skb->len);
+                       skb_copy_from_linear_data(skb, skb_put(ch->trans_skb,
+                                                              skb->len),
+                                                 skb->len);
                        privptr->stats.tx_packets++;
                        privptr->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
                        atomic_dec(&skb->users);
@@ -831,7 +834,8 @@ ch_action_rx(fsm_instance * fi, int event, void *arg)
                ctc_unpack_skb(ch, skb);
        }
  again:
-       skb->data = skb->tail = ch->trans_skb_data;
+       skb->data = ch->trans_skb_data;
+       skb_reset_tail_pointer(skb);
        skb->len = 0;
        if (ctc_checkalloc_buffer(ch, 1))
                return;
@@ -1638,21 +1642,19 @@ add_channel(struct ccw_device *cdev, enum channel_types type)
        struct channel *ch;
 
        DBF_TEXT(trace, 2, __FUNCTION__);
-       if ((ch =
-            (struct channel *) kmalloc(sizeof (struct channel),
-                                       GFP_KERNEL)) == NULL) {
+       ch = kzalloc(sizeof(struct channel), GFP_KERNEL);
+       if (!ch) {
                ctc_pr_warn("ctc: Out of memory in add_channel\n");
                return -1;
        }
-       memset(ch, 0, sizeof (struct channel));
-       if ((ch->ccw = kmalloc(8*sizeof(struct ccw1),
-                                              GFP_KERNEL | GFP_DMA)) == NULL) {
+       /* assure all flags and counters are reset */
+       ch->ccw = kzalloc(8 * sizeof(struct ccw1), GFP_KERNEL | GFP_DMA);
+       if (!ch->ccw) {
                kfree(ch);
                ctc_pr_warn("ctc: Out of memory in add_channel\n");
                return -1;
        }
 
-       memset(ch->ccw, 0, 8*sizeof(struct ccw1));      // assure all flags and counters are reset
 
        /**
         * "static" ccws are used in the following way:
@@ -1692,15 +1694,14 @@ add_channel(struct ccw_device *cdev, enum channel_types type)
                return -1;
        }
        fsm_newstate(ch->fsm, CH_STATE_IDLE);
-       if ((ch->irb = kmalloc(sizeof (struct irb),
-                                             GFP_KERNEL)) == NULL) {
+       ch->irb = kzalloc(sizeof(struct irb), GFP_KERNEL);
+       if (!ch->irb) {
                ctc_pr_warn("ctc: Out of memory in add_channel\n");
                kfree_fsm(ch->fsm);
                kfree(ch->ccw);
                kfree(ch);
                return -1;
        }
-       memset(ch->irb, 0, sizeof (struct irb));
        while (*c && less_than((*c)->id, ch->id))
                c = &(*c)->next;
        if (*c && (!strncmp((*c)->id, ch->id, CTC_ID_SIZE))) {
@@ -2226,7 +2227,8 @@ transmit_skb(struct channel *ch, struct sk_buff *skb)
                 * IDAL support in CTC is broken, so we have to
                 * care about skb's above 2G ourselves.
                 */
-               hi = ((unsigned long) skb->tail + LL_HEADER_LENGTH) >> 31;
+               hi = ((unsigned long)skb_tail_pointer(skb) +
+                     LL_HEADER_LENGTH) >> 31;
                if (hi) {
                        nskb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
                        if (!nskb) {
@@ -2262,11 +2264,12 @@ transmit_skb(struct channel *ch, struct sk_buff *skb)
                                return -EBUSY;
                        }
 
-                       ch->trans_skb->tail = ch->trans_skb->data;
+                       skb_reset_tail_pointer(ch->trans_skb);
                        ch->trans_skb->len = 0;
                        ch->ccw[1].count = skb->len;
-                       memcpy(skb_put(ch->trans_skb, skb->len), skb->data,
-                              skb->len);
+                       skb_copy_from_linear_data(skb, skb_put(ch->trans_skb,
+                                                              skb->len),
+                                                 skb->len);
                        atomic_dec(&skb->users);
                        dev_kfree_skb_irq(skb);
                        ccw_idx = 0;
@@ -2745,14 +2748,13 @@ ctc_probe_device(struct ccwgroup_device *cgdev)
        if (!get_device(&cgdev->dev))
                return -ENODEV;
 
-       priv = kmalloc(sizeof (struct ctc_priv), GFP_KERNEL);
+       priv = kzalloc(sizeof(struct ctc_priv), GFP_KERNEL);
        if (!priv) {
                ctc_pr_err("%s: Out of memory\n", __func__);
                put_device(&cgdev->dev);
                return -ENOMEM;
        }
 
-       memset(priv, 0, sizeof (struct ctc_priv));
        rc = ctc_add_files(&cgdev->dev);
        if (rc) {
                kfree(priv);
@@ -2793,10 +2795,9 @@ ctc_init_netdevice(struct net_device * dev, int alloc_device,
        DBF_TEXT(setup, 3, __FUNCTION__);
 
        if (alloc_device) {
-               dev = kmalloc(sizeof (struct net_device), GFP_KERNEL);
+               dev = kzalloc(sizeof(struct net_device), GFP_KERNEL);
                if (!dev)
                        return NULL;
-               memset(dev, 0, sizeof (struct net_device));
        }
 
        dev->priv = privptr;
index ecca104..08a994f 100644 (file)
@@ -1576,7 +1576,7 @@ __lcs_start_xmit(struct lcs_card *card, struct sk_buff *skb,
        header->offset = card->tx_buffer->count;
        header->type = card->lan_type;
        header->slot = card->portno;
-       memcpy(header + 1, skb->data, skb->len);
+       skb_copy_from_linear_data(skb, header + 1, skb->len);
        spin_unlock(&card->lock);
        card->stats.tx_bytes += skb->len;
        card->stats.tx_packets++;
@@ -1784,7 +1784,6 @@ lcs_get_skb(struct lcs_card *card, char *skb_data, unsigned int skb_len)
                card->stats.rx_dropped++;
                return;
        }
-       skb->dev = card->dev;
        memcpy(skb_put(skb, skb_len), skb_data, skb_len);
        skb->protocol = card->lan_type_trans(skb, card->dev);
        card->stats.rx_bytes += skb_len;
index 594320c..e10e85e 100644 (file)
@@ -635,7 +635,7 @@ static void netiucv_unpack_skb(struct iucv_connection *conn,
                        return;
                }
                skb_put(pskb, header->next);
-               pskb->mac.raw = pskb->data;
+               skb_reset_mac_header(pskb);
                skb = dev_alloc_skb(pskb->len);
                if (!skb) {
                        PRINT_WARN("%s Out of memory in netiucv_unpack_skb\n",
@@ -645,8 +645,9 @@ static void netiucv_unpack_skb(struct iucv_connection *conn,
                        privptr->stats.rx_dropped++;
                        return;
                }
-               memcpy(skb_put(skb, pskb->len), pskb->data, pskb->len);
-               skb->mac.raw = skb->data;
+               skb_copy_from_linear_data(pskb, skb_put(skb, pskb->len),
+                                         pskb->len);
+               skb_reset_mac_header(skb);
                skb->dev = pskb->dev;
                skb->protocol = pskb->protocol;
                pskb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -689,7 +690,8 @@ static void conn_action_rx(fsm_instance *fi, int event, void *arg)
                               msg->length, conn->max_buffsize);
                return;
        }
-       conn->rx_buff->data = conn->rx_buff->tail = conn->rx_buff->head;
+       conn->rx_buff->data = conn->rx_buff->head;
+       skb_reset_tail_pointer(conn->rx_buff);
        conn->rx_buff->len = 0;
        rc = iucv_message_receive(conn->path, msg, 0, conn->rx_buff->data,
                                  msg->length, NULL);
@@ -735,14 +737,17 @@ static void conn_action_txdone(fsm_instance *fi, int event, void *arg)
                        }
                }
        }
-       conn->tx_buff->data = conn->tx_buff->tail = conn->tx_buff->head;
+       conn->tx_buff->data = conn->tx_buff->head;
+       skb_reset_tail_pointer(conn->tx_buff);
        conn->tx_buff->len = 0;
        spin_lock_irqsave(&conn->collect_lock, saveflags);
        while ((skb = skb_dequeue(&conn->collect_queue))) {
                header.next = conn->tx_buff->len + skb->len + NETIUCV_HDRLEN;
                memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header,
                       NETIUCV_HDRLEN);
-               memcpy(skb_put(conn->tx_buff, skb->len), skb->data, skb->len);
+               skb_copy_from_linear_data(skb,
+                                         skb_put(conn->tx_buff, skb->len),
+                                         skb->len);
                txbytes += skb->len;
                txpackets++;
                stat_maxcq++;
@@ -1164,8 +1169,8 @@ static int netiucv_transmit_skb(struct iucv_connection *conn,
                 * Copy the skb to a new allocated skb in lowmem only if the
                 * data is located above 2G in memory or tailroom is < 2.
                 */
-               unsigned long hi =
-                       ((unsigned long)(skb->tail + NETIUCV_HDRLEN)) >> 31;
+               unsigned long hi = ((unsigned long)(skb_tail_pointer(skb) +
+                                   NETIUCV_HDRLEN)) >> 31;
                int copied = 0;
                if (hi || (skb_tailroom(skb) < 2)) {
                        nskb = alloc_skb(skb->len + NETIUCV_HDRLEN +
index 7c735e1..dd7034f 100644 (file)
@@ -267,7 +267,8 @@ qeth_eddp_copy_data_tcp(char *dst, struct qeth_eddp_data *eddp, int len,
 
        QETH_DBF_TEXT(trace, 5, "eddpcdtc");
        if (skb_shinfo(eddp->skb)->nr_frags == 0) {
-               memcpy(dst, eddp->skb->data + eddp->skb_offset, len);
+               skb_copy_from_linear_data_offset(eddp->skb, eddp->skb_offset,
+                                                dst, len);
                *hcsum = csum_partial(eddp->skb->data + eddp->skb_offset, len,
                                      *hcsum);
                eddp->skb_offset += len;
@@ -416,7 +417,7 @@ __qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
                        eddp->skb_offset += VLAN_HLEN;
 #endif /* CONFIG_QETH_VLAN */
        }
-       tcph = eddp->skb->h.th;
+       tcph = tcp_hdr(eddp->skb);
        while (eddp->skb_offset < eddp->skb->len) {
                data_len = min((int)skb_shinfo(eddp->skb)->gso_size,
                               (int)(eddp->skb->len - eddp->skb_offset));
@@ -473,20 +474,24 @@ qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
        QETH_DBF_TEXT(trace, 5, "eddpficx");
        /* create our segmentation headers and copy original headers */
        if (skb->protocol == htons(ETH_P_IP))
-               eddp = qeth_eddp_create_eddp_data(qhdr, (u8 *)skb->nh.iph,
-                               skb->nh.iph->ihl*4,
-                               (u8 *)skb->h.th, skb->h.th->doff*4);
+               eddp = qeth_eddp_create_eddp_data(qhdr,
+                                                 skb_network_header(skb),
+                                                 ip_hdrlen(skb),
+                                                 skb_transport_header(skb),
+                                                 tcp_hdrlen(skb));
        else
-               eddp = qeth_eddp_create_eddp_data(qhdr, (u8 *)skb->nh.ipv6h,
-                               sizeof(struct ipv6hdr),
-                               (u8 *)skb->h.th, skb->h.th->doff*4);
+               eddp = qeth_eddp_create_eddp_data(qhdr,
+                                                 skb_network_header(skb),
+                                                 sizeof(struct ipv6hdr),
+                                                 skb_transport_header(skb),
+                                                 tcp_hdrlen(skb));
 
        if (eddp == NULL) {
                QETH_DBF_TEXT(trace, 2, "eddpfcnm");
                return -ENOMEM;
        }
        if (qhdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
-               skb->mac.raw = (skb->data) + sizeof(struct qeth_hdr);
+               skb_set_mac_header(skb, sizeof(struct qeth_hdr));
                memcpy(&eddp->mac, eth_hdr(skb), ETH_HLEN);
 #ifdef CONFIG_QETH_VLAN
                if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)) {
@@ -590,12 +595,13 @@ qeth_eddp_create_context_tcp(struct qeth_card *card, struct sk_buff *skb,
        QETH_DBF_TEXT(trace, 5, "creddpct");
        if (skb->protocol == htons(ETH_P_IP))
                ctx = qeth_eddp_create_context_generic(card, skb,
-                       sizeof(struct qeth_hdr) + skb->nh.iph->ihl*4 +
-                       skb->h.th->doff*4);
+                                                      (sizeof(struct qeth_hdr) +
+                                                       ip_hdrlen(skb) +
+                                                       tcp_hdrlen(skb)));
        else if (skb->protocol == htons(ETH_P_IPV6))
                ctx = qeth_eddp_create_context_generic(card, skb,
                        sizeof(struct qeth_hdr) + sizeof(struct ipv6hdr) +
-                       skb->h.th->doff*4);
+                       tcp_hdrlen(skb));
        else
                QETH_DBF_TEXT(trace, 2, "cetcpinv");
 
index d8a86f5..ad7792d 100644 (file)
@@ -2278,7 +2278,7 @@ qeth_type_trans(struct sk_buff *skb, struct net_device *dev)
            (card->info.link_type == QETH_LINK_TYPE_LANE_TR))
                return tr_type_trans(skb,dev);
 #endif /* CONFIG_TR */
-       skb->mac.raw = skb->data;
+       skb_reset_mac_header(skb);
        skb_pull(skb, ETH_HLEN );
        eth = eth_hdr(skb);
 
@@ -2306,9 +2306,9 @@ qeth_rebuild_skb_fake_ll_tr(struct qeth_card *card, struct sk_buff *skb,
        struct iphdr *ip_hdr;
 
        QETH_DBF_TEXT(trace,5,"skbfktr");
-       skb->mac.raw = skb->data - QETH_FAKE_LL_LEN_TR;
+       skb_set_mac_header(skb, -QETH_FAKE_LL_LEN_TR);
        /* this is a fake ethernet header */
-       fake_hdr = (struct trh_hdr *) skb->mac.raw;
+       fake_hdr = tr_hdr(skb);
 
        /* the destination MAC address */
        switch (skb->pkt_type){
@@ -2359,9 +2359,9 @@ qeth_rebuild_skb_fake_ll_eth(struct qeth_card *card, struct sk_buff *skb,
        struct iphdr *ip_hdr;
 
        QETH_DBF_TEXT(trace,5,"skbfketh");
-       skb->mac.raw = skb->data - QETH_FAKE_LL_LEN_ETH;
+       skb_set_mac_header(skb, -QETH_FAKE_LL_LEN_ETH);
        /* this is a fake ethernet header */
-       fake_hdr = (struct ethhdr *) skb->mac.raw;
+       fake_hdr = eth_hdr(skb);
 
        /* the destination MAC address */
        switch (skb->pkt_type){
@@ -2461,7 +2461,7 @@ qeth_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
        if (card->options.fake_ll)
                qeth_rebuild_skb_fake_ll(card, skb, hdr);
        else
-               skb->mac.raw = skb->data;
+               skb_reset_mac_header(skb);
        skb->ip_summed = card->options.checksum_type;
        if (card->options.checksum_type == HW_CHECKSUMMING){
                if ( (hdr->hdr.l3.ext_flags &
@@ -2501,7 +2501,8 @@ qeth_process_inbound_buffer(struct qeth_card *card,
                        vlan_tag = qeth_rebuild_skb(card, skb, hdr);
                else { /*in case of OSN*/
                        skb_push(skb, sizeof(struct qeth_hdr));
-                       memcpy(skb->data, hdr, sizeof(struct qeth_hdr));
+                       skb_copy_to_linear_data(skb, hdr,
+                                               sizeof(struct qeth_hdr));
                }
                /* is device UP ? */
                if (!(card->dev->flags & IFF_UP)){
@@ -3778,9 +3779,11 @@ qeth_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
        }
        /* try something else */
        if (skb->protocol == ETH_P_IPV6)
-               return (skb->nh.raw[24] == 0xff) ? RTN_MULTICAST : 0;
+               return (skb_network_header(skb)[24] == 0xff) ?
+                               RTN_MULTICAST : 0;
        else if (skb->protocol == ETH_P_IP)
-               return ((skb->nh.raw[16] & 0xf0) == 0xe0) ? RTN_MULTICAST : 0;
+               return ((skb_network_header(skb)[16] & 0xf0) == 0xe0) ?
+                               RTN_MULTICAST : 0;
        /* ... */
        if (!memcmp(skb->data, skb->dev->broadcast, 6))
                return RTN_BROADCAST;
@@ -3818,18 +3821,20 @@ qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb,
                        return card->info.is_multicast_different &
                                (card->qdio.no_out_queues - 1);
                if (card->qdio.do_prio_queueing && (ipv == 4)) {
+                       const u8 tos = ip_hdr(skb)->tos;
+
                        if (card->qdio.do_prio_queueing==QETH_PRIO_Q_ING_TOS){
-                               if (skb->nh.iph->tos & IP_TOS_NOTIMPORTANT)
+                               if (tos & IP_TOS_NOTIMPORTANT)
                                        return 3;
-                               if (skb->nh.iph->tos & IP_TOS_HIGHRELIABILITY)
+                               if (tos & IP_TOS_HIGHRELIABILITY)
                                        return 2;
-                               if (skb->nh.iph->tos & IP_TOS_HIGHTHROUGHPUT)
+                               if (tos & IP_TOS_HIGHTHROUGHPUT)
                                        return 1;
-                               if (skb->nh.iph->tos & IP_TOS_LOWDELAY)
+                               if (tos & IP_TOS_LOWDELAY)
                                        return 0;
                        }
                        if (card->qdio.do_prio_queueing==QETH_PRIO_Q_ING_PREC)
-                               return 3 - (skb->nh.iph->tos >> 6);
+                               return 3 - (tos >> 6);
                } else if (card->qdio.do_prio_queueing && (ipv == 6)) {
                        /* TODO: IPv6!!! */
                }
@@ -3866,9 +3871,9 @@ __qeth_prepare_skb(struct qeth_card *card, struct sk_buff *skb, int ipv)
                 * memcpys instead of one memmove to save cycles.
                 */
                skb_push(skb, VLAN_HLEN);
-               memcpy(skb->data, skb->data + 4, 4);
-               memcpy(skb->data + 4, skb->data + 8, 4);
-               memcpy(skb->data + 8, skb->data + 12, 4);
+               skb_copy_to_linear_data(skb, skb->data + 4, 4);
+               skb_copy_to_linear_data_offset(skb, 4, skb->data + 8, 4);
+               skb_copy_to_linear_data_offset(skb, 8, skb->data + 12, 4);
                tag = (u16 *)(skb->data + 12);
                /*
                 * first two bytes  = ETH_P_8021Q (0x8100)
@@ -4039,7 +4044,8 @@ qeth_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
                            *((u32 *) skb->dst->neighbour->primary_key);
                } else {
                        /* fill in destination address used in ip header */
-                       *((u32 *) (&hdr->hdr.l3.dest_addr[12])) = skb->nh.iph->daddr;
+                       *((u32 *)(&hdr->hdr.l3.dest_addr[12])) =
+                                                          ip_hdr(skb)->daddr;
                }
        } else if (ipv == 6) { /* IPv6 or passthru */
                hdr->hdr.l3.flags = qeth_get_qeth_hdr_flags6(cast_type);
@@ -4048,7 +4054,8 @@ qeth_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
                               skb->dst->neighbour->primary_key, 16);
                } else {
                        /* fill in destination address used in ip header */
-                       memcpy(hdr->hdr.l3.dest_addr, &skb->nh.ipv6h->daddr, 16);
+                       memcpy(hdr->hdr.l3.dest_addr,
+                              &ipv6_hdr(skb)->daddr, 16);
                }
        } else { /* passthrough */
                 if((skb->dev->type == ARPHRD_IEEE802_TR) &&
index 14504af..c20e923 100644 (file)
@@ -40,8 +40,8 @@ qeth_tso_fill_header(struct qeth_card *card, struct sk_buff *skb)
        QETH_DBF_TEXT(trace, 5, "tsofhdr");
 
        hdr  = (struct qeth_hdr_tso *) skb->data;
-       iph  = skb->nh.iph;
-       tcph = skb->h.th;
+       iph  = ip_hdr(skb);
+       tcph = tcp_hdr(skb);
        /*fix header to TSO values ...*/
        hdr->hdr.hdr.l3.id = QETH_HEADER_TYPE_TSO;
        /*set values which are fix for the first approach ...*/
@@ -63,13 +63,9 @@ qeth_tso_fill_header(struct qeth_card *card, struct sk_buff *skb)
 static inline void
 qeth_tso_set_tcpip_header(struct qeth_card *card, struct sk_buff *skb)
 {
-       struct iphdr *iph;
-       struct ipv6hdr *ip6h;
-       struct tcphdr *tcph;
-
-       iph  = skb->nh.iph;
-       ip6h = skb->nh.ipv6h;
-       tcph = skb->h.th;
+       struct iphdr *iph    = ip_hdr(skb);
+       struct ipv6hdr *ip6h = ipv6_hdr(skb);
+       struct tcphdr *tcph  = tcp_hdr(skb);
 
        tcph->check = 0;
        if (skb->protocol == ETH_P_IPV6) {
index 806bb1a..644a06e 100644 (file)
@@ -21,6 +21,7 @@
 #include "cio/cio.h"
 #include "cio/chsc.h"
 #include "cio/css.h"
+#include "cio/chp.h"
 #include "s390mach.h"
 
 static struct semaphore m_sem;
@@ -44,14 +45,13 @@ static int
 s390_collect_crw_info(void *param)
 {
        struct crw crw[2];
-       int ccode, ret, slow;
+       int ccode;
        struct semaphore *sem;
        unsigned int chain;
 
        sem = (struct semaphore *)param;
 repeat:
        down_interruptible(sem);
-       slow = 0;
        chain = 0;
        while (1) {
                if (unlikely(chain > 1)) {
@@ -84,9 +84,8 @@ repeat:
                /* Check for overflows. */
                if (crw[chain].oflw) {
                        pr_debug("%s: crw overflow detected!\n", __FUNCTION__);
-                       css_reiterate_subchannels();
+                       css_schedule_eval_all();
                        chain = 0;
-                       slow = 1;
                        continue;
                }
                switch (crw[chain].rsc) {
@@ -94,10 +93,7 @@ repeat:
                        if (crw[0].chn && !chain)
                                break;
                        pr_debug("source is subchannel %04X\n", crw[0].rsid);
-                       ret = css_process_crw (crw[0].rsid,
-                                              chain ? crw[1].rsid : 0);
-                       if (ret == -EAGAIN)
-                               slow = 1;
+                       css_process_crw(crw[0].rsid, chain ? crw[1].rsid : 0);
                        break;
                case CRW_RSC_MONITOR:
                        pr_debug("source is monitoring facility\n");
@@ -116,28 +112,23 @@ repeat:
                        }
                        switch (crw[0].erc) {
                        case CRW_ERC_IPARM: /* Path has come. */
-                               ret = chp_process_crw(crw[0].rsid, 1);
+                               chp_process_crw(crw[0].rsid, 1);
                                break;
                        case CRW_ERC_PERRI: /* Path has gone. */
                        case CRW_ERC_PERRN:
-                               ret = chp_process_crw(crw[0].rsid, 0);
+                               chp_process_crw(crw[0].rsid, 0);
                                break;
                        default:
                                pr_debug("Don't know how to handle erc=%x\n",
                                         crw[0].erc);
-                               ret = 0;
                        }
-                       if (ret == -EAGAIN)
-                               slow = 1;
                        break;
                case CRW_RSC_CONFIG:
                        pr_debug("source is configuration-alert facility\n");
                        break;
                case CRW_RSC_CSS:
                        pr_debug("source is channel subsystem\n");
-                       ret = chsc_process_crw();
-                       if (ret == -EAGAIN)
-                               slow = 1;
+                       chsc_process_crw();
                        break;
                default:
                        pr_debug("unknown source\n");
@@ -146,8 +137,6 @@ repeat:
                /* chain is always 0 or 1 here. */
                chain = crw[chain].chn ? chain + 1 : 0;
        }
-       if (slow)
-               queue_work(slow_path_wq, &slow_path_work);
        goto repeat;
        return 0;
 }
index 090743d..19343f9 100644 (file)
@@ -357,6 +357,24 @@ static __init int create_proc_sysinfo(void)
 
 __initcall(create_proc_sysinfo);
 
+int get_cpu_capability(unsigned int *capability)
+{
+       struct sysinfo_1_2_2 *info;
+       int rc;
+
+       info = (void *) get_zeroed_page(GFP_KERNEL);
+       if (!info)
+               return -ENOMEM;
+       rc = stsi(info, 1, 2, 2);
+       if (rc == -ENOSYS)
+               goto out;
+       rc = 0;
+       *capability = info->capability;
+out:
+       free_page((unsigned long) info);
+       return rc;
+}
+
 /*
  * CPU capability might have changed. Therefore recalculate loops_per_jiffy.
  */
index 2cea4f5..f2be2ea 100644 (file)
@@ -726,7 +726,7 @@ static struct miscdevice envctrl_dev = {
  * Return: None.
  */
 static void envctrl_set_mon(struct i2c_child_t *pchild,
-                           char *chnl_desc,
+                           const char *chnl_desc,
                            int chnl_no)
 {
        /* Firmware only has temperature type.  It does not distinguish
@@ -763,8 +763,8 @@ static void envctrl_set_mon(struct i2c_child_t *pchild,
 static void envctrl_init_adc(struct i2c_child_t *pchild, struct device_node *dp)
 {
        int i = 0, len;
-       char *pos;
-       unsigned int *pval;
+       const char *pos;
+       const unsigned int *pval;
 
        /* Firmware describe channels into a stream separated by a '\0'. */
        pos = of_get_property(dp, "channels-description", &len);
@@ -859,7 +859,7 @@ static void envctrl_init_i2c_child(struct linux_ebus_child *edev_child,
 {
        int len, i, tbls_size = 0;
        struct device_node *dp = edev_child->prom_node;
-       void *pval;
+       const void *pval;
 
        /* Get device address. */
        pval = of_get_property(dp, "reg", &len);
index 6e99507..262f01e 100644 (file)
@@ -190,7 +190,7 @@ static int __init flash_init(void)
        }
        if (!sdev) {
 #ifdef CONFIG_PCI
-               struct linux_prom_registers *ebus_regs;
+               const struct linux_prom_registers *ebus_regs;
 
                for_each_ebus(ebus) {
                        for_each_ebusdev(edev, ebus) {
index eec28c1..fbfeb89 100644 (file)
@@ -44,7 +44,6 @@
 #include <asm/openpromio.h>
 #ifdef CONFIG_PCI
 #include <linux/pci.h>
-#include <asm/pbm.h>
 #endif
 
 MODULE_AUTHOR("Thomas K. Dyas (tdyas@noc.rutgers.edu) and Eddie C. Dost  (ecd@skynet.be)");
@@ -141,7 +140,7 @@ static int copyout(void __user *info, struct openpromio *opp, int len)
 
 static int opromgetprop(void __user *argp, struct device_node *dp, struct openpromio *op, int bufsize)
 {
-       void *pval;
+       const void *pval;
        int len;
 
        if (!dp ||
@@ -248,18 +247,18 @@ static int oprompci2node(void __user *argp, struct device_node *dp, struct openp
        if (bufsize >= 2*sizeof(int)) {
 #ifdef CONFIG_PCI
                struct pci_dev *pdev;
-               struct pcidev_cookie *pcp;
-               pdev = pci_find_slot (((int *) op->oprom_array)[0],
+               struct device_node *dp;
+
+               pdev = pci_get_bus_and_slot (((int *) op->oprom_array)[0],
                                      ((int *) op->oprom_array)[1]);
 
-               pcp = pdev->sysdata;
-               if (pcp != NULL) {
-                       dp = pcp->prom_node;
-                       data->current_node = dp;
-                       *((int *)op->oprom_array) = dp->node;
-                       op->oprom_size = sizeof(int);
-                       err = copyout(argp, op, bufsize + sizeof(int));
-               }
+               dp = pci_device_to_OF_node(pdev);
+               data->current_node = dp;
+               *((int *)op->oprom_array) = dp->node;
+               op->oprom_size = sizeof(int);
+               err = copyout(argp, op, bufsize + sizeof(int));
+
+               pci_dev_put(pdev);
 #endif
        }
 
@@ -409,7 +408,7 @@ static int opiocget(void __user *argp, DATA *data)
        struct opiocdesc op;
        struct device_node *dp;
        char *str;
-       void *pval;
+       const void *pval;
        int err, len;
 
        if (copy_from_user(&op, argp, sizeof(op)))
index 8bfb67c..c3135e2 100644 (file)
@@ -259,11 +259,10 @@ static int vfc_debug(struct vfc_dev *dev, int cmd, void __user *argp)
                if (copy_from_user(&inout, argp, sizeof(inout)))
                        return -EFAULT;
 
-               buffer = kmalloc(inout.len, GFP_KERNEL);
+               buffer = kzalloc(inout.len, GFP_KERNEL);
                if (buffer == NULL)
                        return -ENOMEM;
 
-               memset(buffer,0,inout.len);
                vfc_lock_device(dev);
                inout.ret=
                        vfc_i2c_recvbuf(dev,inout.addr & 0xff
index 6349dd6..eee590a 100644 (file)
@@ -35,7 +35,7 @@ struct sbus_bus *sbus_root;
 static void __init fill_sbus_device(struct device_node *dp, struct sbus_dev *sdev)
 {
        unsigned long base;
-       void *pval;
+       const void *pval;
        int len, err;
 
        sdev->prom_node = dp->node;
@@ -86,7 +86,7 @@ static void __init fill_sbus_device(struct device_node *dp, struct sbus_dev *sde
 
 static void __init sbus_bus_ranges_init(struct device_node *dp, struct sbus_bus *sbus)
 {
-       void *pval;
+       const void *pval;
        int len;
 
        pval = of_get_property(dp, "ranges", &len);
index 4cd280e..fcc4cb6 100644 (file)
@@ -1763,9 +1763,15 @@ config SUN3X_ESP
          The ESP was an on-board SCSI controller used on Sun 3/80
          machines.  Say Y here to compile in support for it.
 
+config SCSI_ESP_CORE
+       tristate "ESP Scsi Driver Core"
+       depends on SCSI
+       select SCSI_SPI_ATTRS
+
 config SCSI_SUNESP
        tristate "Sparc ESP Scsi Driver"
        depends on SBUS && SCSI
+       select SCSI_ESP_CORE
        help
          This is the driver for the Sun ESP SCSI host adapter. The ESP
          chipset is present in most SPARC SBUS-based computers.
index 79ecf4e..70cff4c 100644 (file)
@@ -106,7 +106,8 @@ obj-$(CONFIG_MEGARAID_LEGACY)       += megaraid.o
 obj-$(CONFIG_MEGARAID_NEWGEN)  += megaraid/
 obj-$(CONFIG_MEGARAID_SAS)     += megaraid/
 obj-$(CONFIG_SCSI_ACARD)       += atp870u.o
-obj-$(CONFIG_SCSI_SUNESP)      += esp.o
+obj-$(CONFIG_SCSI_ESP_CORE)    += esp_scsi.o
+obj-$(CONFIG_SCSI_SUNESP)      += sun_esp.o
 obj-$(CONFIG_SCSI_GDTH)                += gdth.o
 obj-$(CONFIG_SCSI_INITIO)      += initio.o
 obj-$(CONFIG_SCSI_INIA100)     += a100u2w.o
diff --git a/drivers/scsi/esp.c b/drivers/scsi/esp.c
deleted file mode 100644 (file)
index 2c2fe80..0000000
+++ /dev/null
@@ -1,4394 +0,0 @@
-/* esp.c: ESP Sun SCSI driver.
- *
- * Copyright (C) 1995, 1998, 2006 David S. Miller (davem@davemloft.net)
- */
-
-/* TODO:
- *
- * 1) Maybe disable parity checking in config register one for SCSI1
- *    targets.  (Gilmore says parity error on the SBus can lock up
- *    old sun4c's)
- * 2) Add support for DMA2 pipelining.
- * 3) Add tagged queueing.
- */
-
-#include <linux/kernel.h>
-#include <linux/delay.h>
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <linux/blkdev.h>
-#include <linux/proc_fs.h>
-#include <linux/stat.h>
-#include <linux/init.h>
-#include <linux/spinlock.h>
-#include <linux/interrupt.h>
-#include <linux/module.h>
-
-#include "esp.h"
-
-#include <asm/sbus.h>
-#include <asm/dma.h>
-#include <asm/system.h>
-#include <asm/ptrace.h>
-#include <asm/pgtable.h>
-#include <asm/oplib.h>
-#include <asm/io.h>
-#include <asm/irq.h>
-#ifndef __sparc_v9__
-#include <asm/machines.h>
-#include <asm/idprom.h>
-#endif
-
-#include <scsi/scsi.h>
-#include <scsi/scsi_cmnd.h>
-#include <scsi/scsi_device.h>
-#include <scsi/scsi_eh.h>
-#include <scsi/scsi_host.h>
-#include <scsi/scsi_tcq.h>
-
-#define DRV_VERSION "1.101"
-
-#define DEBUG_ESP
-/* #define DEBUG_ESP_HME */
-/* #define DEBUG_ESP_DATA */
-/* #define DEBUG_ESP_QUEUE */
-/* #define DEBUG_ESP_DISCONNECT */
-/* #define DEBUG_ESP_STATUS */
-/* #define DEBUG_ESP_PHASES */
-/* #define DEBUG_ESP_WORKBUS */
-/* #define DEBUG_STATE_MACHINE */
-/* #define DEBUG_ESP_CMDS */
-/* #define DEBUG_ESP_IRQS */
-/* #define DEBUG_SDTR */
-/* #define DEBUG_ESP_SG */
-
-/* Use the following to sprinkle debugging messages in a way which
- * suits you if combinations of the above become too verbose when
- * trying to track down a specific problem.
- */
-/* #define DEBUG_ESP_MISC */
-
-#if defined(DEBUG_ESP)
-#define ESPLOG(foo)  printk foo
-#else
-#define ESPLOG(foo)
-#endif /* (DEBUG_ESP) */
-
-#if defined(DEBUG_ESP_HME)
-#define ESPHME(foo)  printk foo
-#else
-#define ESPHME(foo)
-#endif
-
-#if defined(DEBUG_ESP_DATA)
-#define ESPDATA(foo)  printk foo
-#else
-#define ESPDATA(foo)
-#endif
-
-#if defined(DEBUG_ESP_QUEUE)
-#define ESPQUEUE(foo)  printk foo
-#else
-#define ESPQUEUE(foo)
-#endif
-
-#if defined(DEBUG_ESP_DISCONNECT)
-#define ESPDISC(foo)  printk foo
-#else
-#define ESPDISC(foo)
-#endif
-
-#if defined(DEBUG_ESP_STATUS)
-#define ESPSTAT(foo)  printk foo
-#else
-#define ESPSTAT(foo)
-#endif
-
-#if defined(DEBUG_ESP_PHASES)
-#define ESPPHASE(foo)  printk foo
-#else
-#define ESPPHASE(foo)
-#endif
-
-#if defined(DEBUG_ESP_WORKBUS)
-#define ESPBUS(foo)  printk foo
-#else
-#define ESPBUS(foo)
-#endif
-
-#if defined(DEBUG_ESP_IRQS)
-#define ESPIRQ(foo)  printk foo
-#else
-#define ESPIRQ(foo)
-#endif
-
-#if defined(DEBUG_SDTR)
-#define ESPSDTR(foo)  printk foo
-#else
-#define ESPSDTR(foo)
-#endif
-
-#if defined(DEBUG_ESP_MISC)
-#define ESPMISC(foo)  printk foo
-#else
-#define ESPMISC(foo)
-#endif
-
-/* Command phase enumeration. */
-enum {
-       not_issued    = 0x00,  /* Still in the issue_SC queue.          */
-
-       /* Various forms of selecting a target. */
-#define in_slct_mask    0x10
-       in_slct_norm  = 0x10,  /* ESP is arbitrating, normal selection  */
-       in_slct_stop  = 0x11,  /* ESP will select, then stop with IRQ   */
-       in_slct_msg   = 0x12,  /* select, then send a message           */
-       in_slct_tag   = 0x13,  /* select and send tagged queue msg      */
-       in_slct_sneg  = 0x14,  /* select and acquire sync capabilities  */
-
-       /* Any post selection activity. */
-#define in_phases_mask  0x20
-       in_datain     = 0x20,  /* Data is transferring from the bus     */
-       in_dataout    = 0x21,  /* Data is transferring to the bus       */
-       in_data_done  = 0x22,  /* Last DMA data operation done (maybe)  */
-       in_msgin      = 0x23,  /* Eating message from target            */
-       in_msgincont  = 0x24,  /* Eating more msg bytes from target     */
-       in_msgindone  = 0x25,  /* Decide what to do with what we got    */
-       in_msgout     = 0x26,  /* Sending message to target             */
-       in_msgoutdone = 0x27,  /* Done sending msg out                  */
-       in_cmdbegin   = 0x28,  /* Sending cmd after abnormal selection  */
-       in_cmdend     = 0x29,  /* Done sending slow cmd                 */
-       in_status     = 0x2a,  /* Was in status phase, finishing cmd    */
-       in_freeing    = 0x2b,  /* freeing the bus for cmd cmplt or disc */
-       in_the_dark   = 0x2c,  /* Don't know what bus phase we are in   */
-
-       /* Special states, ie. not normal bus transitions... */
-#define in_spec_mask    0x80
-       in_abortone   = 0x80,  /* Aborting one command currently        */
-       in_abortall   = 0x81,  /* Blowing away all commands we have     */
-       in_resetdev   = 0x82,  /* SCSI target reset in progress         */
-       in_resetbus   = 0x83,  /* SCSI bus reset in progress            */
-       in_tgterror   = 0x84,  /* Target did something stupid           */
-};
-
-enum {
-       /* Zero has special meaning, see skipahead[12]. */
-/*0*/  do_never,
-
-/*1*/  do_phase_determine,
-/*2*/  do_reset_bus,
-/*3*/  do_reset_complete,
-/*4*/  do_work_bus,
-/*5*/  do_intr_end
-};
-
-/* Forward declarations. */
-static irqreturn_t esp_intr(int irq, void *dev_id);
-
-/* Debugging routines */
-struct esp_cmdstrings {
-       u8 cmdchar;
-       char *text;
-} esp_cmd_strings[] = {
-       /* Miscellaneous */
-       { ESP_CMD_NULL, "ESP_NOP", },
-       { ESP_CMD_FLUSH, "FIFO_FLUSH", },
-       { ESP_CMD_RC, "RSTESP", },
-       { ESP_CMD_RS, "RSTSCSI", },
-       /* Disconnected State Group */
-       { ESP_CMD_RSEL, "RESLCTSEQ", },
-       { ESP_CMD_SEL, "SLCTNATN", },
-       { ESP_CMD_SELA, "SLCTATN", },
-       { ESP_CMD_SELAS, "SLCTATNSTOP", },
-       { ESP_CMD_ESEL, "ENSLCTRESEL", },
-       { ESP_CMD_DSEL, "DISSELRESEL", },
-       { ESP_CMD_SA3, "SLCTATN3", },
-       { ESP_CMD_RSEL3, "RESLCTSEQ", },
-       /* Target State Group */
-       { ESP_CMD_SMSG, "SNDMSG", },
-       { ESP_CMD_SSTAT, "SNDSTATUS", },
-       { ESP_CMD_SDATA, "SNDDATA", },
-       { ESP_CMD_DSEQ, "DISCSEQ", },
-       { ESP_CMD_TSEQ, "TERMSEQ", },
-       { ESP_CMD_TCCSEQ, "TRGTCMDCOMPSEQ", },
-       { ESP_CMD_DCNCT, "DISC", },
-       { ESP_CMD_RMSG, "RCVMSG", },
-       { ESP_CMD_RCMD, "RCVCMD", },
-       { ESP_CMD_RDATA, "RCVDATA", },
-       { ESP_CMD_RCSEQ, "RCVCMDSEQ", },
-       /* Initiator State Group */
-       { ESP_CMD_TI, "TRANSINFO", },
-       { ESP_CMD_ICCSEQ, "INICMDSEQCOMP", },
-       { ESP_CMD_MOK, "MSGACCEPTED", },
-       { ESP_CMD_TPAD, "TPAD", },
-       { ESP_CMD_SATN, "SATN", },
-       { ESP_CMD_RATN, "RATN", },
-};
-#define NUM_ESP_COMMANDS  ((sizeof(esp_cmd_strings)) / (sizeof(struct esp_cmdstrings)))
-
-/* Print textual representation of an ESP command */
-static inline void esp_print_cmd(u8 espcmd)
-{
-       u8 dma_bit = espcmd & ESP_CMD_DMA;
-       int i;
-
-       espcmd &= ~dma_bit;
-       for (i = 0; i < NUM_ESP_COMMANDS; i++)
-               if (esp_cmd_strings[i].cmdchar == espcmd)
-                       break;
-       if (i == NUM_ESP_COMMANDS)
-               printk("ESP_Unknown");
-       else
-               printk("%s%s", esp_cmd_strings[i].text,
-                      ((dma_bit) ? "+DMA" : ""));
-}
-
-/* Print the status register's value */
-static inline void esp_print_statreg(u8 statreg)
-{
-       u8 phase;
-
-       printk("STATUS<");
-       phase = statreg & ESP_STAT_PMASK;
-       printk("%s,", (phase == ESP_DOP ? "DATA-OUT" :
-                      (phase == ESP_DIP ? "DATA-IN" :
-                       (phase == ESP_CMDP ? "COMMAND" :
-                        (phase == ESP_STATP ? "STATUS" :
-                         (phase == ESP_MOP ? "MSG-OUT" :
-                          (phase == ESP_MIP ? "MSG_IN" :
-                           "unknown")))))));
-       if (statreg & ESP_STAT_TDONE)
-               printk("TRANS_DONE,");
-       if (statreg & ESP_STAT_TCNT)
-               printk("TCOUNT_ZERO,");
-       if (statreg & ESP_STAT_PERR)
-               printk("P_ERROR,");
-       if (statreg & ESP_STAT_SPAM)
-               printk("SPAM,");
-       if (statreg & ESP_STAT_INTR)
-               printk("IRQ,");
-       printk(">");
-}
-
-/* Print the interrupt register's value */
-static inline void esp_print_ireg(u8 intreg)
-{
-       printk("INTREG< ");
-       if (intreg & ESP_INTR_S)
-               printk("SLCT_NATN ");
-       if (intreg & ESP_INTR_SATN)
-               printk("SLCT_ATN ");
-       if (intreg & ESP_INTR_RSEL)
-               printk("RSLCT ");
-       if (intreg & ESP_INTR_FDONE)
-               printk("FDONE ");
-       if (intreg & ESP_INTR_BSERV)
-               printk("BSERV ");
-       if (intreg & ESP_INTR_DC)
-               printk("DISCNCT ");
-       if (intreg & ESP_INTR_IC)
-               printk("ILL_CMD ");
-       if (intreg & ESP_INTR_SR)
-               printk("SCSI_BUS_RESET ");
-       printk(">");
-}
-
-/* Print the sequence step registers contents */
-static inline void esp_print_seqreg(u8 stepreg)
-{
-       stepreg &= ESP_STEP_VBITS;
-       printk("STEP<%s>",
-              (stepreg == ESP_STEP_ASEL ? "SLCT_ARB_CMPLT" :
-               (stepreg == ESP_STEP_SID ? "1BYTE_MSG_SENT" :
-                (stepreg == ESP_STEP_NCMD ? "NOT_IN_CMD_PHASE" :
-                 (stepreg == ESP_STEP_PPC ? "CMD_BYTES_LOST" :
-                  (stepreg == ESP_STEP_FINI4 ? "CMD_SENT_OK" :
-                   "UNKNOWN"))))));
-}
-
-static char *phase_string(int phase)
-{
-       switch (phase) {
-       case not_issued:
-               return "UNISSUED";
-       case in_slct_norm:
-               return "SLCTNORM";
-       case in_slct_stop:
-               return "SLCTSTOP";
-       case in_slct_msg:
-               return "SLCTMSG";
-       case in_slct_tag:
-               return "SLCTTAG";
-       case in_slct_sneg:
-               return "SLCTSNEG";
-       case in_datain:
-               return "DATAIN";
-       case in_dataout:
-               return "DATAOUT";
-       case in_data_done:
-               return "DATADONE";
-       case in_msgin:
-               return "MSGIN";
-       case in_msgincont:
-               return "MSGINCONT";
-       case in_msgindone:
-               return "MSGINDONE";
-       case in_msgout:
-               return "MSGOUT";
-       case in_msgoutdone:
-               return "MSGOUTDONE";
-       case in_cmdbegin:
-               return "CMDBEGIN";
-       case in_cmdend:
-               return "CMDEND";
-       case in_status:
-               return "STATUS";
-       case in_freeing:
-               return "FREEING";
-       case in_the_dark:
-               return "CLUELESS";
-       case in_abortone:
-               return "ABORTONE";
-       case in_abortall:
-               return "ABORTALL";
-       case in_resetdev:
-               return "RESETDEV";
-       case in_resetbus:
-               return "RESETBUS";
-       case in_tgterror:
-               return "TGTERROR";
-       default:
-               return "UNKNOWN";
-       };
-}
-
-#ifdef DEBUG_STATE_MACHINE
-static inline void esp_advance_phase(struct scsi_cmnd *s, int newphase)
-{
-       ESPLOG(("<%s>", phase_string(newphase)));
-       s->SCp.sent_command = s->SCp.phase;
-       s->SCp.phase = newphase;
-}
-#else
-#define esp_advance_phase(__s, __newphase) \
-       (__s)->SCp.sent_command = (__s)->SCp.phase; \
-       (__s)->SCp.phase = (__newphase);
-#endif
-
-#ifdef DEBUG_ESP_CMDS
-static inline void esp_cmd(struct esp *esp, u8 cmd)
-{
-       esp->espcmdlog[esp->espcmdent] = cmd;
-       esp->espcmdent = (esp->espcmdent + 1) & 31;
-       sbus_writeb(cmd, esp->eregs + ESP_CMD);
-}
-#else
-#define esp_cmd(__esp, __cmd)  \
-       sbus_writeb((__cmd), ((__esp)->eregs) + ESP_CMD)
-#endif
-
-#define ESP_INTSOFF(__dregs)   \
-       sbus_writel(sbus_readl((__dregs)+DMA_CSR)&~(DMA_INT_ENAB), (__dregs)+DMA_CSR)
-#define ESP_INTSON(__dregs)    \
-       sbus_writel(sbus_readl((__dregs)+DMA_CSR)|DMA_INT_ENAB, (__dregs)+DMA_CSR)
-#define ESP_IRQ_P(__dregs)     \
-       (sbus_readl((__dregs)+DMA_CSR) & (DMA_HNDL_INTR|DMA_HNDL_ERROR))
-
-/* How we use the various Linux SCSI data structures for operation.
- *
- * struct scsi_cmnd:
- *
- *   We keep track of the synchronous capabilities of a target
- *   in the device member, using sync_min_period and
- *   sync_max_offset.  These are the values we directly write
- *   into the ESP registers while running a command.  If offset
- *   is zero the ESP will use asynchronous transfers.
- *   If the borken flag is set we assume we shouldn't even bother
- *   trying to negotiate for synchronous transfer as this target
- *   is really stupid.  If we notice the target is dropping the
- *   bus, and we have been allowing it to disconnect, we clear
- *   the disconnect flag.
- */
-
-
-/* Manipulation of the ESP command queues.  Thanks to the aha152x driver
- * and its author, Juergen E. Fischer, for the methods used here.
- * Note that these are per-ESP queues, not global queues like
- * the aha152x driver uses.
- */
-static inline void append_SC(struct scsi_cmnd **SC, struct scsi_cmnd *new_SC)
-{
-       struct scsi_cmnd *end;
-
-       new_SC->host_scribble = (unsigned char *) NULL;
-       if (!*SC)
-               *SC = new_SC;
-       else {
-               for (end=*SC;end->host_scribble;end=(struct scsi_cmnd *)end->host_scribble)
-                       ;
-               end->host_scribble = (unsigned char *) new_SC;
-       }
-}
-
-static inline void prepend_SC(struct scsi_cmnd **SC, struct scsi_cmnd *new_SC)
-{
-       new_SC->host_scribble = (unsigned char *) *SC;
-       *SC = new_SC;
-}
-
-static inline struct scsi_cmnd *remove_first_SC(struct scsi_cmnd **SC)
-{
-       struct scsi_cmnd *ptr;
-       ptr = *SC;
-       if (ptr)
-               *SC = (struct scsi_cmnd *) (*SC)->host_scribble;
-       return ptr;
-}
-
-static inline struct scsi_cmnd *remove_SC(struct scsi_cmnd **SC, int target, int lun)
-{
-       struct scsi_cmnd *ptr, *prev;
-
-       for (ptr = *SC, prev = NULL;
-            ptr && ((ptr->device->id != target) || (ptr->device->lun != lun));
-            prev = ptr, ptr = (struct scsi_cmnd *) ptr->host_scribble)
-               ;
-       if (ptr) {
-               if (prev)
-                       prev->host_scribble=ptr->host_scribble;
-               else
-                       *SC=(struct scsi_cmnd *)ptr->host_scribble;
-       }
-       return ptr;
-}
-
-/* Resetting various pieces of the ESP scsi driver chipset/buses. */
-static void esp_reset_dma(struct esp *esp)
-{
-       int can_do_burst16, can_do_burst32, can_do_burst64;
-       int can_do_sbus64;
-       u32 tmp;
-
-       can_do_burst16 = (esp->bursts & DMA_BURST16) != 0;
-       can_do_burst32 = (esp->bursts & DMA_BURST32) != 0;
-       can_do_burst64 = 0;
-       can_do_sbus64 = 0;
-       if (sbus_can_dma_64bit(esp->sdev))
-               can_do_sbus64 = 1;
-       if (sbus_can_burst64(esp->sdev))
-               can_do_burst64 = (esp->bursts & DMA_BURST64) != 0;
-
-       /* Punt the DVMA into a known state. */
-       if (esp->dma->revision != dvmahme) {
-               tmp = sbus_readl(esp->dregs + DMA_CSR);
-               sbus_writel(tmp | DMA_RST_SCSI, esp->dregs + DMA_CSR);
-               sbus_writel(tmp & ~DMA_RST_SCSI, esp->dregs + DMA_CSR);
-       }
-       switch (esp->dma->revision) {
-       case dvmahme:
-               /* This is the HME DVMA gate array. */
-
-               sbus_writel(DMA_RESET_FAS366, esp->dregs + DMA_CSR);
-               sbus_writel(DMA_RST_SCSI, esp->dregs + DMA_CSR);
-
-               esp->prev_hme_dmacsr = (DMA_PARITY_OFF|DMA_2CLKS|DMA_SCSI_DISAB|DMA_INT_ENAB);
-               esp->prev_hme_dmacsr &= ~(DMA_ENABLE|DMA_ST_WRITE|DMA_BRST_SZ);
-
-               if (can_do_burst64)
-                       esp->prev_hme_dmacsr |= DMA_BRST64;
-               else if (can_do_burst32)
-                       esp->prev_hme_dmacsr |= DMA_BRST32;
-
-               if (can_do_sbus64) {
-                       esp->prev_hme_dmacsr |= DMA_SCSI_SBUS64;
-                       sbus_set_sbus64(esp->sdev, esp->bursts);
-               }
-
-               /* This chip is horrible. */
-               while (sbus_readl(esp->dregs + DMA_CSR) & DMA_PEND_READ)
-                       udelay(1);
-
-               sbus_writel(0, esp->dregs + DMA_CSR);
-               sbus_writel(esp->prev_hme_dmacsr, esp->dregs + DMA_CSR);
-
-               /* This is necessary to avoid having the SCSI channel
-                * engine lock up on us.
-                */
-               sbus_writel(0, esp->dregs + DMA_ADDR);
-
-               break;
-       case dvmarev2:
-               /* This is the gate array found in the sun4m
-                * NCR SBUS I/O subsystem.
-                */
-               if (esp->erev != esp100) {
-                       tmp = sbus_readl(esp->dregs + DMA_CSR);
-                       sbus_writel(tmp | DMA_3CLKS, esp->dregs + DMA_CSR);
-               }
-               break;
-       case dvmarev3:
-               tmp = sbus_readl(esp->dregs + DMA_CSR);
-               tmp &= ~DMA_3CLKS;
-               tmp |= DMA_2CLKS;
-               if (can_do_burst32) {
-                       tmp &= ~DMA_BRST_SZ;
-                       tmp |= DMA_BRST32;
-               }
-               sbus_writel(tmp, esp->dregs + DMA_CSR);
-               break;
-       case dvmaesc1:
-               /* This is the DMA unit found on SCSI/Ether cards. */
-               tmp = sbus_readl(esp->dregs + DMA_CSR);
-               tmp |= DMA_ADD_ENABLE;
-               tmp &= ~DMA_BCNT_ENAB;
-               if (!can_do_burst32 && can_do_burst16) {
-                       tmp |= DMA_ESC_BURST;
-               } else {
-                       tmp &= ~(DMA_ESC_BURST);
-               }
-               sbus_writel(tmp, esp->dregs + DMA_CSR);
-               break;
-       default:
-               break;
-       };
-       ESP_INTSON(esp->dregs);
-}
-
-/* Reset the ESP chip, _not_ the SCSI bus. */
-static void __init esp_reset_esp(struct esp *esp)
-{
-       u8 family_code, version;
-       int i;
-
-       /* Now reset the ESP chip */
-       esp_cmd(esp, ESP_CMD_RC);
-       esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
-       esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
-
-       /* Reload the configuration registers */
-       sbus_writeb(esp->cfact, esp->eregs + ESP_CFACT);
-       esp->prev_stp = 0;
-       sbus_writeb(esp->prev_stp, esp->eregs + ESP_STP);
-       esp->prev_soff = 0;
-       sbus_writeb(esp->prev_soff, esp->eregs + ESP_SOFF);
-       sbus_writeb(esp->neg_defp, esp->eregs + ESP_TIMEO);
-
-       /* This is the only point at which it is reliable to read
-        * the ID-code for a fast ESP chip variants.
-        */
-       esp->max_period = ((35 * esp->ccycle) / 1000);
-       if (esp->erev == fast) {
-               version = sbus_readb(esp->eregs + ESP_UID);
-               family_code = (version & 0xf8) >> 3;
-               if (family_code == 0x02)
-                       esp->erev = fas236;
-               else if (family_code == 0x0a)
-                       esp->erev = fashme; /* Version is usually '5'. */
-               else
-                       esp->erev = fas100a;
-               ESPMISC(("esp%d: FAST chip is %s (family=%d, version=%d)\n",
-                        esp->esp_id,
-                        (esp->erev == fas236) ? "fas236" :
-                        ((esp->erev == fas100a) ? "fas100a" :
-                         "fasHME"), family_code, (version & 7)));
-
-               esp->min_period = ((4 * esp->ccycle) / 1000);
-       } else {
-               esp->min_period = ((5 * esp->ccycle) / 1000);
-       }
-       esp->max_period = (esp->max_period + 3)>>2;
-       esp->min_period = (esp->min_period + 3)>>2;
-
-       sbus_writeb(esp->config1, esp->eregs + ESP_CFG1);
-       switch (esp->erev) {
-       case esp100:
-               /* nothing to do */
-               break;
-       case esp100a:
-               sbus_writeb(esp->config2, esp->eregs + ESP_CFG2);
-               break;
-       case esp236:
-               /* Slow 236 */
-               sbus_writeb(esp->config2, esp->eregs + ESP_CFG2);
-               esp->prev_cfg3 = esp->config3[0];
-               sbus_writeb(esp->prev_cfg3, esp->eregs + ESP_CFG3);
-               break;
-       case fashme:
-               esp->config2 |= (ESP_CONFIG2_HME32 | ESP_CONFIG2_HMEFENAB);
-               /* fallthrough... */
-       case fas236:
-               /* Fast 236 or HME */
-               sbus_writeb(esp->config2, esp->eregs + ESP_CFG2);
-               for (i = 0; i < 16; i++) {
-                       if (esp->erev == fashme) {
-                               u8 cfg3;
-
-                               cfg3 = ESP_CONFIG3_FCLOCK | ESP_CONFIG3_OBPUSH;
-                               if (esp->scsi_id >= 8)
-                                       cfg3 |= ESP_CONFIG3_IDBIT3;
-                               esp->config3[i] |= cfg3;
-                       } else {
-                               esp->config3[i] |= ESP_CONFIG3_FCLK;
-                       }
-               }
-               esp->prev_cfg3 = esp->config3[0];
-               sbus_writeb(esp->prev_cfg3, esp->eregs + ESP_CFG3);
-               if (esp->erev == fashme) {
-                       esp->radelay = 80;
-               } else {
-                       if (esp->diff)
-                               esp->radelay = 0;
-                       else
-                               esp->radelay = 96;
-               }
-               break;
-       case fas100a:
-               /* Fast 100a */
-               sbus_writeb(esp->config2, esp->eregs + ESP_CFG2);
-               for (i = 0; i < 16; i++)
-                       esp->config3[i] |= ESP_CONFIG3_FCLOCK;
-               esp->prev_cfg3 = esp->config3[0];
-               sbus_writeb(esp->prev_cfg3, esp->eregs + ESP_CFG3);
-               esp->radelay = 32;
-               break;
-       default:
-               panic("esp: what could it be... I wonder...");
-               break;
-       };
-
-       /* Eat any bitrot in the chip */
-       sbus_readb(esp->eregs + ESP_INTRPT);
-       udelay(100);
-}
-
-/* This places the ESP into a known state at boot time. */
-static void __init esp_bootup_reset(struct esp *esp)
-{
-       u8 tmp;
-
-       /* Reset the DMA */
-       esp_reset_dma(esp);
-
-       /* Reset the ESP */
-       esp_reset_esp(esp);
-
-       /* Reset the SCSI bus, but tell ESP not to generate an irq */
-       tmp = sbus_readb(esp->eregs + ESP_CFG1);
-       tmp |= ESP_CONFIG1_SRRDISAB;
-       sbus_writeb(tmp, esp->eregs + ESP_CFG1);
-
-       esp_cmd(esp, ESP_CMD_RS);
-       udelay(400);
-
-       sbus_writeb(esp->config1, esp->eregs + ESP_CFG1);
-
-       /* Eat any bitrot in the chip and we are done... */
-       sbus_readb(esp->eregs + ESP_INTRPT);
-}
-
-static int __init esp_find_dvma(struct esp *esp, struct sbus_dev *dma_sdev)
-{
-       struct sbus_dev *sdev = esp->sdev;
-       struct sbus_dma *dma;
-
-       if (dma_sdev != NULL) {
-               for_each_dvma(dma) {
-                       if (dma->sdev == dma_sdev)
-                               break;
-               }
-       } else {
-               for_each_dvma(dma) {
-                       /* If allocated already, can't use it. */
-                       if (dma->allocated)
-                               continue;
-
-                       if (dma->sdev == NULL)
-                               break;
-
-                       /* If bus + slot are the same and it has the
-                        * correct OBP name, it's ours.
-                        */
-                       if (sdev->bus == dma->sdev->bus &&
-                           sdev->slot == dma->sdev->slot &&
-                           (!strcmp(dma->sdev->prom_name, "dma") ||
-                            !strcmp(dma->sdev->prom_name, "espdma")))
-                               break;
-               }
-       }
-
-       /* If we don't know how to handle the dvma,
-        * do not use this device.
-        */
-       if (dma == NULL) {
-               printk("Cannot find dvma for ESP%d's SCSI\n", esp->esp_id);
-               return -1;
-       }
-       if (dma->allocated) {
-               printk("esp%d: can't use my espdma\n", esp->esp_id);
-               return -1;
-       }
-       dma->allocated = 1;
-       esp->dma = dma;
-       esp->dregs = dma->regs;
-
-       return 0;
-}
-
-static int __init esp_map_regs(struct esp *esp, int hme)
-{
-       struct sbus_dev *sdev = esp->sdev;
-       struct resource *res;
-
-       /* On HME, two reg sets exist, first is DVMA,
-        * second is ESP registers.
-        */
-       if (hme)
-               res = &sdev->resource[1];
-       else
-               res = &sdev->resource[0];
-
-       esp->eregs = sbus_ioremap(res, 0, ESP_REG_SIZE, "ESP Registers");
-
-       if (esp->eregs == 0)
-               return -1;
-       return 0;
-}
-
-static int __init esp_map_cmdarea(struct esp *esp)
-{
-       struct sbus_dev *sdev = esp->sdev;
-
-       esp->esp_command = sbus_alloc_consistent(sdev, 16,
-                                                &esp->esp_command_dvma);
-       if (esp->esp_command == NULL ||
-           esp->esp_command_dvma == 0)
-               return -1;
-       return 0;
-}
-
-static int __init esp_register_irq(struct esp *esp)
-{
-       esp->ehost->irq = esp->irq = esp->sdev->irqs[0];
-
-       /* We used to try various overly-clever things to
-        * reduce the interrupt processing overhead on
-        * sun4c/sun4m when multiple ESP's shared the
-        * same IRQ.  It was too complex and messy to
-        * sanely maintain.
-        */
-       if (request_irq(esp->ehost->irq, esp_intr,
-                       IRQF_SHARED, "ESP SCSI", esp)) {
-               printk("esp%d: Cannot acquire irq line\n",
-                      esp->esp_id);
-               return -1;
-       }
-
-       printk("esp%d: IRQ %d ", esp->esp_id,
-              esp->ehost->irq);
-
-       return 0;
-}
-
-static void __init esp_get_scsi_id(struct esp *esp)
-{
-       struct sbus_dev *sdev = esp->sdev;
-       struct device_node *dp = sdev->ofdev.node;
-
-       esp->scsi_id = of_getintprop_default(dp,
-                                            "initiator-id",
-                                            -1);
-       if (esp->scsi_id == -1)
-               esp->scsi_id = of_getintprop_default(dp,
-                                                    "scsi-initiator-id",
-                                                    -1);
-       if (esp->scsi_id == -1)
-               esp->scsi_id = (sdev->bus == NULL) ? 7 :
-                       of_getintprop_default(sdev->bus->ofdev.node,
-                                             "scsi-initiator-id",
-                                             7);
-       esp->ehost->this_id = esp->scsi_id;
-       esp->scsi_id_mask = (1 << esp->scsi_id);
-
-}
-
-static void __init esp_get_clock_params(struct esp *esp)
-{
-       struct sbus_dev *sdev = esp->sdev;
-       int prom_node = esp->prom_node;
-       int sbus_prom_node;
-       unsigned int fmhz;
-       u8 ccf;
-
-       if (sdev != NULL && sdev->bus != NULL)
-               sbus_prom_node = sdev->bus->prom_node;
-       else
-               sbus_prom_node = 0;
-
-       /* This is getting messy but it has to be done
-        * correctly or else you get weird behavior all
-        * over the place.  We are trying to basically
-        * figure out three pieces of information.
-        *
-        * a) Clock Conversion Factor
-        *
-        *    This is a representation of the input
-        *    crystal clock frequency going into the
-        *    ESP on this machine.  Any operation whose
-        *    timing is longer than 400ns depends on this
-        *    value being correct.  For example, you'll
-        *    get blips for arbitration/selection during
-        *    high load or with multiple targets if this
-        *    is not set correctly.
-        *
-        * b) Selection Time-Out
-        *
-        *    The ESP isn't very bright and will arbitrate
-        *    for the bus and try to select a target
-        *    forever if you let it.  This value tells
-        *    the ESP when it has taken too long to
-        *    negotiate and that it should interrupt
-        *    the CPU so we can see what happened.
-        *    The value is computed as follows (from
-        *    NCR/Symbios chip docs).
-        *
-        *          (Time Out Period) *  (Input Clock)
-        *    STO = ----------------------------------
-        *          (8192) * (Clock Conversion Factor)
-        *
-        *    You usually want the time out period to be
-        *    around 250ms, I think we'll set it a little
-        *    bit higher to account for fully loaded SCSI
-        *    bus's and slow devices that don't respond so
-        *    quickly to selection attempts. (yeah, I know
-        *    this is out of spec. but there is a lot of
-        *    buggy pieces of firmware out there so bite me)
-        *
-        * c) Imperical constants for synchronous offset
-        *    and transfer period register values
-        *
-        *    This entails the smallest and largest sync
-        *    period we could ever handle on this ESP.
-        */
-
-       fmhz = prom_getintdefault(prom_node, "clock-frequency", -1);
-       if (fmhz == -1)
-               fmhz = (!sbus_prom_node) ? 0 :
-                       prom_getintdefault(sbus_prom_node, "clock-frequency", -1);
-
-       if (fmhz <= (5000000))
-               ccf = 0;
-       else
-               ccf = (((5000000 - 1) + (fmhz))/(5000000));
-
-       if (!ccf || ccf > 8) {
-               /* If we can't find anything reasonable,
-                * just assume 20MHZ.  This is the clock
-                * frequency of the older sun4c's where I've
-                * been unable to find the clock-frequency
-                * PROM property.  All other machines provide
-                * useful values it seems.
-                */
-               ccf = ESP_CCF_F4;
-               fmhz = (20000000);
-       }
-
-       if (ccf == (ESP_CCF_F7 + 1))
-               esp->cfact = ESP_CCF_F0;
-       else if (ccf == ESP_CCF_NEVER)
-               esp->cfact = ESP_CCF_F2;
-       else
-               esp->cfact = ccf;
-       esp->raw_cfact = ccf;
-
-       esp->cfreq = fmhz;
-       esp->ccycle = ESP_MHZ_TO_CYCLE(fmhz);
-       esp->ctick = ESP_TICK(ccf, esp->ccycle);
-       esp->neg_defp = ESP_NEG_DEFP(fmhz, ccf);
-       esp->sync_defp = SYNC_DEFP_SLOW;
-
-       printk("SCSI ID %d Clk %dMHz CCYC=%d CCF=%d TOut %d ",
-              esp->scsi_id, (fmhz / 1000000),
-              (int)esp->ccycle, (int)ccf, (int) esp->neg_defp);
-}
-
-static void __init esp_get_bursts(struct esp *esp, struct sbus_dev *dma)
-{
-       struct sbus_dev *sdev = esp->sdev;
-       u8 bursts;
-
-       bursts = prom_getintdefault(esp->prom_node, "burst-sizes", 0xff);
-
-       if (dma) {
-               u8 tmp = prom_getintdefault(dma->prom_node,
-                                           "burst-sizes", 0xff);
-               if (tmp != 0xff)
-                       bursts &= tmp;
-       }
-
-       if (sdev->bus) {
-               u8 tmp = prom_getintdefault(sdev->bus->prom_node,
-                                           "burst-sizes", 0xff);
-               if (tmp != 0xff)
-                       bursts &= tmp;
-       }
-
-       if (bursts == 0xff ||
-           (bursts & DMA_BURST16) == 0 ||
-           (bursts & DMA_BURST32) == 0)
-               bursts = (DMA_BURST32 - 1);
-
-       esp->bursts = bursts;
-}
-
-static void __init esp_get_revision(struct esp *esp)
-{
-       u8 tmp;
-
-       esp->config1 = (ESP_CONFIG1_PENABLE | (esp->scsi_id & 7));
-       esp->config2 = (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY);
-       sbus_writeb(esp->config2, esp->eregs + ESP_CFG2);
-
-       tmp = sbus_readb(esp->eregs + ESP_CFG2);
-       tmp &= ~ESP_CONFIG2_MAGIC;
-       if (tmp != (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY)) {
-               /* If what we write to cfg2 does not come back, cfg2
-                * is not implemented, therefore this must be a plain
-                * esp100.
-                */
-               esp->erev = esp100;
-               printk("NCR53C90(esp100)\n");
-       } else {
-               esp->config2 = 0;
-               esp->prev_cfg3 = esp->config3[0] = 5;
-               sbus_writeb(esp->config2, esp->eregs + ESP_CFG2);
-               sbus_writeb(0, esp->eregs + ESP_CFG3);
-               sbus_writeb(esp->prev_cfg3, esp->eregs + ESP_CFG3);
-
-               tmp = sbus_readb(esp->eregs + ESP_CFG3);
-               if (tmp != 5) {
-                       /* The cfg2 register is implemented, however
-                        * cfg3 is not, must be esp100a.
-                        */
-                       esp->erev = esp100a;
-                       printk("NCR53C90A(esp100a)\n");
-               } else {
-                       int target;
-
-                       for (target = 0; target < 16; target++)
-                               esp->config3[target] = 0;
-                       esp->prev_cfg3 = 0;
-                       sbus_writeb(esp->prev_cfg3, esp->eregs + ESP_CFG3);
-
-                       /* All of cfg{1,2,3} implemented, must be one of
-                        * the fas variants, figure out which one.
-                        */
-                       if (esp->raw_cfact > ESP_CCF_F5) {
-                               esp->erev = fast;
-                               esp->sync_defp = SYNC_DEFP_FAST;
-                               printk("NCR53C9XF(espfast)\n");
-                       } else {
-                               esp->erev = esp236;
-                               printk("NCR53C9x(esp236)\n");
-                       }
-                       esp->config2 = 0;
-                       sbus_writeb(esp->config2, esp->eregs + ESP_CFG2);
-               }
-       }
-}
-
-static void __init esp_init_swstate(struct esp *esp)
-{
-       int i;
-
-       /* Command queues... */
-       esp->current_SC = NULL;
-       esp->disconnected_SC = NULL;
-       esp->issue_SC = NULL;
-
-       /* Target and current command state... */
-       esp->targets_present = 0;
-       esp->resetting_bus = 0;
-       esp->snip = 0;
-
-       init_waitqueue_head(&esp->reset_queue);
-
-       /* Debugging... */
-       for(i = 0; i < 32; i++)
-               esp->espcmdlog[i] = 0;
-       esp->espcmdent = 0;
-
-       /* MSG phase state... */
-       for(i = 0; i < 16; i++) {
-               esp->cur_msgout[i] = 0;
-               esp->cur_msgin[i] = 0;
-       }
-       esp->prevmsgout = esp->prevmsgin = 0;
-       esp->msgout_len = esp->msgin_len = 0;
-
-       /* Clear the one behind caches to hold unmatchable values. */
-       esp->prev_soff = esp->prev_stp = esp->prev_cfg3 = 0xff;
-       esp->prev_hme_dmacsr = 0xffffffff;
-}
-
-static int __init detect_one_esp(struct scsi_host_template *tpnt,
-                                struct device *dev,
-                                struct sbus_dev *esp_dev,
-                                struct sbus_dev *espdma,
-                                struct sbus_bus *sbus,
-                                int hme)
-{
-       static int instance;
-       struct Scsi_Host *esp_host = scsi_host_alloc(tpnt, sizeof(struct esp));
-       struct esp *esp;
-       
-       if (!esp_host)
-               return -ENOMEM;
-
-       if (hme)
-               esp_host->max_id = 16;
-       esp = (struct esp *) esp_host->hostdata;
-       esp->ehost = esp_host;
-       esp->sdev = esp_dev;
-       esp->esp_id = instance;
-       esp->prom_node = esp_dev->prom_node;
-       prom_getstring(esp->prom_node, "name", esp->prom_name,
-                      sizeof(esp->prom_name));
-
-       if (esp_find_dvma(esp, espdma) < 0)
-               goto fail_unlink;
-       if (esp_map_regs(esp, hme) < 0) {
-               printk("ESP registers unmappable");
-               goto fail_dvma_release;
-       }
-       if (esp_map_cmdarea(esp) < 0) {
-               printk("ESP DVMA transport area unmappable");
-               goto fail_unmap_regs;
-       }
-       if (esp_register_irq(esp) < 0)
-               goto fail_unmap_cmdarea;
-
-       esp_get_scsi_id(esp);
-
-       esp->diff = prom_getbool(esp->prom_node, "differential");
-       if (esp->diff)
-               printk("Differential ");
-
-       esp_get_clock_params(esp);
-       esp_get_bursts(esp, espdma);
-       esp_get_revision(esp);
-       esp_init_swstate(esp);
-
-       esp_bootup_reset(esp);
-
-       if (scsi_add_host(esp_host, dev))
-               goto fail_free_irq;
-
-       dev_set_drvdata(&esp_dev->ofdev.dev, esp);
-
-       scsi_scan_host(esp_host);
-       instance++;
-
-       return 0;
-
-fail_free_irq:
-       free_irq(esp->ehost->irq, esp);
-
-fail_unmap_cmdarea:
-       sbus_free_consistent(esp->sdev, 16,
-                            (void *) esp->esp_command,
-                            esp->esp_command_dvma);
-
-fail_unmap_regs:
-       sbus_iounmap(esp->eregs, ESP_REG_SIZE);
-
-fail_dvma_release:
-       esp->dma->allocated = 0;
-
-fail_unlink:
-       scsi_host_put(esp_host);
-       return -1;
-}
-
-/* Detecting ESP chips on the machine.  This is the simple and easy
- * version.
- */
-static int __devexit esp_remove_common(struct esp *esp)
-{
-       unsigned int irq = esp->ehost->irq;
-
-       scsi_remove_host(esp->ehost);
-
-       ESP_INTSOFF(esp->dregs);
-#if 0
-       esp_reset_dma(esp);
-       esp_reset_esp(esp);
-#endif
-
-       free_irq(irq, esp);
-       sbus_free_consistent(esp->sdev, 16,
-                            (void *) esp->esp_command, esp->esp_command_dvma);
-       sbus_iounmap(esp->eregs, ESP_REG_SIZE);
-       esp->dma->allocated = 0;
-
-       scsi_host_put(esp->ehost);
-
-       return 0;
-}
-
-
-#ifdef CONFIG_SUN4
-
-#include <asm/sun4paddr.h>
-
-static struct sbus_dev sun4_esp_dev;
-
-static int __init esp_sun4_probe(struct scsi_host_template *tpnt)
-{
-       if (sun4_esp_physaddr) {
-               memset(&sun4_esp_dev, 0, sizeof(sun4_esp_dev));
-               sun4_esp_dev.reg_addrs[0].phys_addr = sun4_esp_physaddr;
-               sun4_esp_dev.irqs[0] = 4;
-               sun4_esp_dev.resource[0].start = sun4_esp_physaddr;
-               sun4_esp_dev.resource[0].end =
-                       sun4_esp_physaddr + ESP_REG_SIZE - 1;
-               sun4_esp_dev.resource[0].flags = IORESOURCE_IO;
-
-               return detect_one_esp(tpnt, NULL,
-                                     &sun4_esp_dev, NULL, NULL, 0);
-       }
-       return 0;
-}
-
-static int __devexit esp_sun4_remove(void)
-{
-       struct of_device *dev = &sun4_esp_dev.ofdev;
-       struct esp *esp = dev_get_drvdata(&dev->dev);
-
-       return esp_remove_common(esp);
-}
-
-#else /* !CONFIG_SUN4 */
-
-static int __devinit esp_sbus_probe(struct of_device *dev, const struct of_device_id *match)
-{
-       struct sbus_dev *sdev = to_sbus_device(&dev->dev);
-       struct device_node *dp = dev->node;
-       struct sbus_dev *dma_sdev = NULL;
-       int hme = 0;
-
-       if (dp->parent &&
-           (!strcmp(dp->parent->name, "espdma") ||
-            !strcmp(dp->parent->name, "dma")))
-               dma_sdev = sdev->parent;
-       else if (!strcmp(dp->name, "SUNW,fas")) {
-               dma_sdev = sdev;
-               hme = 1;
-       }
-
-       return detect_one_esp(match->data, &dev->dev,
-                             sdev, dma_sdev, sdev->bus, hme);
-}
-
-static int __devexit esp_sbus_remove(struct of_device *dev)
-{
-       struct esp *esp = dev_get_drvdata(&dev->dev);
-
-       return esp_remove_common(esp);
-}
-
-#endif /* !CONFIG_SUN4 */
-
-/* The info function will return whatever useful
- * information the developer sees fit.  If not provided, then
- * the name field will be used instead.
- */
-static const char *esp_info(struct Scsi_Host *host)
-{
-       struct esp *esp;
-
-       esp = (struct esp *) host->hostdata;
-       switch (esp->erev) {
-       case esp100:
-               return "Sparc ESP100 (NCR53C90)";
-       case esp100a:
-               return "Sparc ESP100A (NCR53C90A)";
-       case esp236:
-               return "Sparc ESP236";
-       case fas236:
-               return "Sparc ESP236-FAST";
-       case fashme:
-               return "Sparc ESP366-HME";
-       case fas100a:
-               return "Sparc ESP100A-FAST";
-       default:
-               return "Bogon ESP revision";
-       };
-}
-
-/* From Wolfgang Stanglmeier's NCR scsi driver. */
-struct info_str
-{
-       char *buffer;
-       int length;
-       int offset;
-       int pos;
-};
-
-static void copy_mem_info(struct info_str *info, char *data, int len)
-{
-       if (info->pos + len > info->length)
-               len = info->length - info->pos;
-
-       if (info->pos + len < info->offset) {
-               info->pos += len;
-               return;
-       }
-       if (info->pos < info->offset) {
-               data += (info->offset - info->pos);
-               len  -= (info->offset - info->pos);
-       }
-
-       if (len > 0) {
-               memcpy(info->buffer + info->pos, data, len);
-               info->pos += len;
-       }
-}
-
-static int copy_info(struct info_str *info, char *fmt, ...)
-{
-       va_list args;
-       char buf[81];
-       int len;
-
-       va_start(args, fmt);
-       len = vsprintf(buf, fmt, args);
-       va_end(args);
-
-       copy_mem_info(info, buf, len);
-       return len;
-}
-
-static int esp_host_info(struct esp *esp, char *ptr, off_t offset, int len)
-{
-       struct scsi_device *sdev;
-       struct info_str info;
-       int i;
-
-       info.buffer     = ptr;
-       info.length     = len;
-       info.offset     = offset;
-       info.pos        = 0;
-
-       copy_info(&info, "Sparc ESP Host Adapter:\n");
-       copy_info(&info, "\tPROM node\t\t%08x\n", (unsigned int) esp->prom_node);
-       copy_info(&info, "\tPROM name\t\t%s\n", esp->prom_name);
-       copy_info(&info, "\tESP Model\t\t");
-       switch (esp->erev) {
-       case esp100:
-               copy_info(&info, "ESP100\n");
-               break;
-       case esp100a:
-               copy_info(&info, "ESP100A\n");
-               break;
-       case esp236:
-               copy_info(&info, "ESP236\n");
-               break;
-       case fas236:
-               copy_info(&info, "FAS236\n");
-               break;
-       case fas100a:
-               copy_info(&info, "FAS100A\n");
-               break;
-       case fast:
-               copy_info(&info, "FAST\n");
-               break;
-       case fashme:
-               copy_info(&info, "Happy Meal FAS\n");
-               break;
-       case espunknown:
-       default:
-               copy_info(&info, "Unknown!\n");
-               break;
-       };
-       copy_info(&info, "\tDMA Revision\t\t");
-       switch (esp->dma->revision) {
-       case dvmarev0:
-               copy_info(&info, "Rev 0\n");
-               break;
-       case dvmaesc1:
-               copy_info(&info, "ESC Rev 1\n");
-               break;
-       case dvmarev1:
-               copy_info(&info, "Rev 1\n");
-               break;
-       case dvmarev2:
-               copy_info(&info, "Rev 2\n");
-               break;
-       case dvmarev3:
-               copy_info(&info, "Rev 3\n");
-               break;
-       case dvmarevplus:
-               copy_info(&info, "Rev 1+\n");
-               break;
-       case dvmahme:
-               copy_info(&info, "Rev HME/FAS\n");
-               break;
-       default:
-               copy_info(&info, "Unknown!\n");
-               break;
-       };
-       copy_info(&info, "\tLive Targets\t\t[ ");
-       for (i = 0; i < 15; i++) {
-               if (esp->targets_present & (1 << i))
-                       copy_info(&info, "%d ", i);
-       }
-       copy_info(&info, "]\n\n");
-       
-       /* Now describe the state of each existing target. */
-       copy_info(&info, "Target #\tconfig3\t\tSync Capabilities\tDisconnect\tWide\n");
-
-       shost_for_each_device(sdev, esp->ehost) {
-               struct esp_device *esp_dev = sdev->hostdata;
-               uint id = sdev->id;
-
-               if (!(esp->targets_present & (1 << id)))
-                       continue;
-
-               copy_info(&info, "%d\t\t", id);
-               copy_info(&info, "%08lx\t", esp->config3[id]);
-               copy_info(&info, "[%02lx,%02lx]\t\t\t",
-                       esp_dev->sync_max_offset,
-                       esp_dev->sync_min_period);
-               copy_info(&info, "%s\t\t",
-                       esp_dev->disconnect ? "yes" : "no");
-               copy_info(&info, "%s\n",
-                       (esp->config3[id] & ESP_CONFIG3_EWIDE) ? "yes" : "no");
-       }
-       return info.pos > info.offset? info.pos - info.offset : 0;
-}
-
-/* ESP proc filesystem code. */
-static int esp_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset,
-                        int length, int inout)
-{
-       struct esp *esp = (struct esp *) host->hostdata;
-
-       if (inout)
-               return -EINVAL; /* not yet */
-
-       if (start)
-               *start = buffer;
-
-       return esp_host_info(esp, buffer, offset, length);
-}
-
-static void esp_get_dmabufs(struct esp *esp, struct scsi_cmnd *sp)
-{
-       if (sp->use_sg == 0) {
-               sp->SCp.this_residual = sp->request_bufflen;
-               sp->SCp.buffer = (struct scatterlist *) sp->request_buffer;
-               sp->SCp.buffers_residual = 0;
-               if (sp->request_bufflen) {
-                       sp->SCp.have_data_in = sbus_map_single(esp->sdev, sp->SCp.buffer,
-                                                              sp->SCp.this_residual,
-                                                              sp->sc_data_direction);
-                       sp->SCp.ptr = (char *) ((unsigned long)sp->SCp.have_data_in);
-               } else {
-                       sp->SCp.ptr = NULL;
-               }
-       } else {
-               sp->SCp.buffer = (struct scatterlist *) sp->request_buffer;
-               sp->SCp.buffers_residual = sbus_map_sg(esp->sdev,
-                                                      sp->SCp.buffer,
-                                                      sp->use_sg,
-                                                      sp->sc_data_direction);
-               sp->SCp.this_residual = sg_dma_len(sp->SCp.buffer);
-               sp->SCp.ptr = (char *) ((unsigned long)sg_dma_address(sp->SCp.buffer));
-       }
-}
-
-static void esp_release_dmabufs(struct esp *esp, struct scsi_cmnd *sp)
-{
-       if (sp->use_sg) {
-               sbus_unmap_sg(esp->sdev, sp->request_buffer, sp->use_sg,
-                             sp->sc_data_direction);
-       } else if (sp->request_bufflen) {
-               sbus_unmap_single(esp->sdev,
-                                 sp->SCp.have_data_in,
-                                 sp->request_bufflen,
-                                 sp->sc_data_direction);
-       }
-}
-
-static void esp_restore_pointers(struct esp *esp, struct scsi_cmnd *sp)
-{
-       struct esp_pointers *ep = &esp->data_pointers[sp->device->id];
-
-       sp->SCp.ptr = ep->saved_ptr;
-       sp->SCp.buffer = ep->saved_buffer;
-       sp->SCp.this_residual = ep->saved_this_residual;
-       sp->SCp.buffers_residual = ep->saved_buffers_residual;
-}
-
-static void esp_save_pointers(struct esp *esp, struct scsi_cmnd *sp)
-{
-       struct esp_pointers *ep = &esp->data_pointers[sp->device->id];
-
-       ep->saved_ptr = sp->SCp.ptr;
-       ep->saved_buffer = sp->SCp.buffer;
-       ep->saved_this_residual = sp->SCp.this_residual;
-       ep->saved_buffers_residual = sp->SCp.buffers_residual;
-}
-
-/* Some rules:
- *
- *   1) Never ever panic while something is live on the bus.
- *      If there is to be any chance of syncing the disks this
- *      rule is to be obeyed.
- *
- *   2) Any target that causes a foul condition will no longer
- *      have synchronous transfers done to it, no questions
- *      asked.
- *
- *   3) Keep register accesses to a minimum.  Think about some
- *      day when we have Xbus machines this is running on and
- *      the ESP chip is on the other end of the machine on a
- *      different board from the cpu where this is running.
- */
-
-/* Fire off a command.  We assume the bus is free and that the only
- * case where we could see an interrupt is where we have disconnected
- * commands active and they are trying to reselect us.
- */
-static inline void esp_check_cmd(struct esp *esp, struct scsi_cmnd *sp)
-{
-       switch (sp->cmd_len) {
-       case 6:
-       case 10:
-       case 12:
-               esp->esp_slowcmd = 0;
-               break;
-
-       default:
-               esp->esp_slowcmd = 1;
-               esp->esp_scmdleft = sp->cmd_len;
-               esp->esp_scmdp = &sp->cmnd[0];
-               break;
-       };
-}
-
-static inline void build_sync_nego_msg(struct esp *esp, int period, int offset)
-{
-       esp->cur_msgout[0] = EXTENDED_MESSAGE;
-       esp->cur_msgout[1] = 3;
-       esp->cur_msgout[2] = EXTENDED_SDTR;
-       esp->cur_msgout[3] = period;
-       esp->cur_msgout[4] = offset;
-       esp->msgout_len = 5;
-}
-
-/* SIZE is in bits, currently HME only supports 16 bit wide transfers. */
-static inline void build_wide_nego_msg(struct esp *esp, int size)
-{
-       esp->cur_msgout[0] = EXTENDED_MESSAGE;
-       esp->cur_msgout[1] = 2;
-       esp->cur_msgout[2] = EXTENDED_WDTR;
-       switch (size) {
-       case 32:
-               esp->cur_msgout[3] = 2;
-               break;
-       case 16:
-               esp->cur_msgout[3] = 1;
-               break;
-       case 8:
-       default:
-               esp->cur_msgout[3] = 0;
-               break;
-       };
-
-       esp->msgout_len = 4;
-}
-
-static void esp_exec_cmd(struct esp *esp)
-{
-       struct scsi_cmnd *SCptr;
-       struct scsi_device *SDptr;
-       struct esp_device *esp_dev;
-       volatile u8 *cmdp = esp->esp_command;
-       u8 the_esp_command;
-       int lun, target;
-       int i;
-
-       /* Hold off if we have disconnected commands and
-        * an IRQ is showing...
-        */
-       if (esp->disconnected_SC && ESP_IRQ_P(esp->dregs))
-               return;
-
-       /* Grab first member of the issue queue. */
-       SCptr = esp->current_SC = remove_first_SC(&esp->issue_SC);
-
-       /* Safe to panic here because current_SC is null. */
-       if (!SCptr)
-               panic("esp: esp_exec_cmd and issue queue is NULL");
-
-       SDptr = SCptr->device;
-       esp_dev = SDptr->hostdata;
-       lun = SCptr->device->lun;
-       target = SCptr->device->id;
-
-       esp->snip = 0;
-       esp->msgout_len = 0;
-
-       /* Send it out whole, or piece by piece?   The ESP
-        * only knows how to automatically send out 6, 10,
-        * and 12 byte commands.  I used to think that the
-        * Linux SCSI code would never throw anything other
-        * than that to us, but then again there is the
-        * SCSI generic driver which can send us anything.
-        */
-       esp_check_cmd(esp, SCptr);
-
-       /* If arbitration/selection is successful, the ESP will leave
-        * ATN asserted, causing the target to go into message out
-        * phase.  The ESP will feed the target the identify and then
-        * the target can only legally go to one of command,
-        * datain/out, status, or message in phase, or stay in message
-        * out phase (should we be trying to send a sync negotiation
-        * message after the identify).  It is not allowed to drop
-        * BSY, but some buggy targets do and we check for this
-        * condition in the selection complete code.  Most of the time
-        * we'll make the command bytes available to the ESP and it
-        * will not interrupt us until it finishes command phase, we
-        * cannot do this for command sizes the ESP does not
-        * understand and in this case we'll get interrupted right
-        * when the target goes into command phase.
-        *
-        * It is absolutely _illegal_ in the presence of SCSI-2 devices
-        * to use the ESP select w/o ATN command.  When SCSI-2 devices are
-        * present on the bus we _must_ always go straight to message out
-        * phase with an identify message for the target.  Being that
-        * selection attempts in SCSI-1 w/o ATN was an option, doing SCSI-2
-        * selections should not confuse SCSI-1 we hope.
-        */
-
-       if (esp_dev->sync) {
-               /* this targets sync is known */
-#ifndef __sparc_v9__
-do_sync_known:
-#endif
-               if (esp_dev->disconnect)
-                       *cmdp++ = IDENTIFY(1, lun);
-               else
-                       *cmdp++ = IDENTIFY(0, lun);
-
-               if (esp->esp_slowcmd) {
-                       the_esp_command = (ESP_CMD_SELAS | ESP_CMD_DMA);
-                       esp_advance_phase(SCptr, in_slct_stop);
-               } else {
-                       the_esp_command = (ESP_CMD_SELA | ESP_CMD_DMA);
-                       esp_advance_phase(SCptr, in_slct_norm);
-               }
-       } else if (!(esp->targets_present & (1<<target)) || !(esp_dev->disconnect)) {
-               /* After the bootup SCSI code sends both the
-                * TEST_UNIT_READY and INQUIRY commands we want
-                * to at least attempt allowing the device to
-                * disconnect.
-                */
-               ESPMISC(("esp: Selecting device for first time. target=%d "
-                        "lun=%d\n", target, SCptr->device->lun));
-               if (!SDptr->borken && !esp_dev->disconnect)
-                       esp_dev->disconnect = 1;
-
-               *cmdp++ = IDENTIFY(0, lun);
-               esp->prevmsgout = NOP;
-               esp_advance_phase(SCptr, in_slct_norm);
-               the_esp_command = (ESP_CMD_SELA | ESP_CMD_DMA);
-
-               /* Take no chances... */
-               esp_dev->sync_max_offset = 0;
-               esp_dev->sync_min_period = 0;
-       } else {
-               /* Sorry, I have had way too many problems with
-                * various CDROM devices on ESP. -DaveM
-                */
-               int cdrom_hwbug_wkaround = 0;
-
-#ifndef __sparc_v9__
-               /* Never allow disconnects or synchronous transfers on
-                * SparcStation1 and SparcStation1+.  Allowing those
-                * to be enabled seems to lockup the machine completely.
-                */
-               if ((idprom->id_machtype == (SM_SUN4C | SM_4C_SS1)) ||
-                   (idprom->id_machtype == (SM_SUN4C | SM_4C_SS1PLUS))) {
-                       /* But we are nice and allow tapes and removable
-                        * disks (but not CDROMs) to disconnect.
-                        */
-                       if(SDptr->type == TYPE_TAPE ||
-                          (SDptr->type != TYPE_ROM && SDptr->removable))
-                               esp_dev->disconnect = 1;
-                       else
-                               esp_dev->disconnect = 0;
-                       esp_dev->sync_max_offset = 0;
-                       esp_dev->sync_min_period = 0;
-                       esp_dev->sync = 1;
-                       esp->snip = 0;
-                       goto do_sync_known;
-               }
-#endif /* !(__sparc_v9__) */
-
-               /* We've talked to this guy before,
-                * but never negotiated.  Let's try,
-                * need to attempt WIDE first, before
-                * sync nego, as per SCSI 2 standard.
-                */
-               if (esp->erev == fashme && !esp_dev->wide) {
-                       if (!SDptr->borken &&
-                          SDptr->type != TYPE_ROM &&
-                          SDptr->removable == 0) {
-                               build_wide_nego_msg(esp, 16);
-                               esp_dev->wide = 1;
-                               esp->wnip = 1;
-                               goto after_nego_msg_built;
-                       } else {
-                               esp_dev->wide = 1;
-                               /* Fall through and try sync. */
-                       }
-               }
-
-               if (!SDptr->borken) {
-                       if ((SDptr->type == TYPE_ROM)) {
-                               /* Nice try sucker... */
-                               ESPMISC(("esp%d: Disabling sync for buggy "
-                                        "CDROM.\n", esp->esp_id));
-                               cdrom_hwbug_wkaround = 1;
-                               build_sync_nego_msg(esp, 0, 0);
-                       } else if (SDptr->removable != 0) {
-                               ESPMISC(("esp%d: Not negotiating sync/wide but "
-                                        "allowing disconnect for removable media.\n",
-                                        esp->esp_id));
-                               build_sync_nego_msg(esp, 0, 0);
-                       } else {
-                               build_sync_nego_msg(esp, esp->sync_defp, 15);
-                       }
-               } else {
-                       build_sync_nego_msg(esp, 0, 0);
-               }
-               esp_dev->sync = 1;
-               esp->snip = 1;
-
-after_nego_msg_built:
-               /* A fix for broken SCSI1 targets, when they disconnect
-                * they lock up the bus and confuse ESP.  So disallow
-                * disconnects for SCSI1 targets for now until we
-                * find a better fix.
-                *
-                * Addendum: This is funny, I figured out what was going
-                *           on.  The blotzed SCSI1 target would disconnect,
-                *           one of the other SCSI2 targets or both would be
-                *           disconnected as well.  The SCSI1 target would
-                *           stay disconnected long enough that we start
-                *           up a command on one of the SCSI2 targets.  As
-                *           the ESP is arbitrating for the bus the SCSI1
-                *           target begins to arbitrate as well to reselect
-                *           the ESP.  The SCSI1 target refuses to drop it's
-                *           ID bit on the data bus even though the ESP is
-                *           at ID 7 and is the obvious winner for any
-                *           arbitration.  The ESP is a poor sport and refuses
-                *           to lose arbitration, it will continue indefinitely
-                *           trying to arbitrate for the bus and can only be
-                *           stopped via a chip reset or SCSI bus reset.
-                *           Therefore _no_ disconnects for SCSI1 targets
-                *           thank you very much. ;-)
-                */
-               if(((SDptr->scsi_level < 3) &&
-                   (SDptr->type != TYPE_TAPE) &&
-                   SDptr->removable == 0) ||
-                   cdrom_hwbug_wkaround || SDptr->borken) {
-                       ESPMISC((KERN_INFO "esp%d: Disabling DISCONNECT for target %d "
-                                "lun %d\n", esp->esp_id, SCptr->device->id, SCptr->device->lun));
-                       esp_dev->disconnect = 0;
-                       *cmdp++ = IDENTIFY(0, lun);
-               } else {
-                       *cmdp++ = IDENTIFY(1, lun);
-               }
-
-               /* ESP fifo is only so big...
-                * Make this look like a slow command.
-                */
-               esp->esp_slowcmd = 1;
-               esp->esp_scmdleft = SCptr->cmd_len;
-               esp->esp_scmdp = &SCptr->cmnd[0];
-
-               the_esp_command = (ESP_CMD_SELAS | ESP_CMD_DMA);
-               esp_advance_phase(SCptr, in_slct_msg);
-       }
-
-       if (!esp->esp_slowcmd)
-               for (i = 0; i < SCptr->cmd_len; i++)
-                       *cmdp++ = SCptr->cmnd[i];
-
-       /* HME sucks... */
-       if (esp->erev == fashme)
-               sbus_writeb((target & 0xf) | (ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT),
-                           esp->eregs + ESP_BUSID);
-       else
-               sbus_writeb(target & 7, esp->eregs + ESP_BUSID);
-       if (esp->prev_soff != esp_dev->sync_max_offset ||
-           esp->prev_stp  != esp_dev->sync_min_period ||
-           (esp->erev > esp100a &&
-            esp->prev_cfg3 != esp->config3[target])) {
-               esp->prev_soff = esp_dev->sync_max_offset;
-               esp->prev_stp = esp_dev->sync_min_period;
-               sbus_writeb(esp->prev_soff, esp->eregs + ESP_SOFF);
-               sbus_writeb(esp->prev_stp, esp->eregs + ESP_STP);
-               if (esp->erev > esp100a) {
-                       esp->prev_cfg3 = esp->config3[target];
-                       sbus_writeb(esp->prev_cfg3, esp->eregs + ESP_CFG3);
-               }
-       }
-       i = (cmdp - esp->esp_command);
-
-       if (esp->erev == fashme) {
-               esp_cmd(esp, ESP_CMD_FLUSH); /* Grrr! */
-
-               /* Set up the DMA and HME counters */
-               sbus_writeb(i, esp->eregs + ESP_TCLOW);
-               sbus_writeb(0, esp->eregs + ESP_TCMED);
-               sbus_writeb(0, esp->eregs + FAS_RLO);
-               sbus_writeb(0, esp->eregs + FAS_RHI);
-               esp_cmd(esp, the_esp_command);
-
-               /* Talk about touchy hardware... */
-               esp->prev_hme_dmacsr = ((esp->prev_hme_dmacsr |
-                                        (DMA_SCSI_DISAB | DMA_ENABLE)) &
-                                       ~(DMA_ST_WRITE));
-               sbus_writel(16, esp->dregs + DMA_COUNT);
-               sbus_writel(esp->esp_command_dvma, esp->dregs + DMA_ADDR);
-               sbus_writel(esp->prev_hme_dmacsr, esp->dregs + DMA_CSR);
-       } else {
-               u32 tmp;
-
-               /* Set up the DMA and ESP counters */
-               sbus_writeb(i, esp->eregs + ESP_TCLOW);
-               sbus_writeb(0, esp->eregs + ESP_TCMED);
-               tmp = sbus_readl(esp->dregs + DMA_CSR);
-               tmp &= ~DMA_ST_WRITE;
-               tmp |= DMA_ENABLE;
-               sbus_writel(tmp, esp->dregs + DMA_CSR);
-               if (esp->dma->revision == dvmaesc1) {
-                       if (i) /* Workaround ESC gate array SBUS rerun bug. */
-                               sbus_writel(PAGE_SIZE, esp->dregs + DMA_COUNT);
-               }
-               sbus_writel(esp->esp_command_dvma, esp->dregs + DMA_ADDR);
-
-               /* Tell ESP to "go". */
-               esp_cmd(esp, the_esp_command);
-       }
-}
-
-/* Queue a SCSI command delivered from the mid-level Linux SCSI code. */
-static int esp_queue(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
-{
-       struct esp *esp;
-
-       /* Set up func ptr and initial driver cmd-phase. */
-       SCpnt->scsi_done = done;
-       SCpnt->SCp.phase = not_issued;
-
-       /* We use the scratch area. */
-       ESPQUEUE(("esp_queue: target=%d lun=%d ", SCpnt->device->id, SCpnt->device->lun));
-       ESPDISC(("N<%02x,%02x>", SCpnt->device->id, SCpnt->device->lun));
-
-       esp = (struct esp *) SCpnt->device->host->hostdata;
-       esp_get_dmabufs(esp, SCpnt);
-       esp_save_pointers(esp, SCpnt); /* FIXME for tag queueing */
-
-       SCpnt->SCp.Status           = CHECK_CONDITION;
-       SCpnt->SCp.Message          = 0xff;
-       SCpnt->SCp.sent_command     = 0;
-
-       /* Place into our queue. */
-       if (SCpnt->cmnd[0] == REQUEST_SENSE) {
-               ESPQUEUE(("RQSENSE\n"));
-               prepend_SC(&esp->issue_SC, SCpnt);
-       } else {
-               ESPQUEUE(("\n"));
-               append_SC(&esp->issue_SC, SCpnt);
-       }
-
-       /* Run it now if we can. */
-       if (!esp->current_SC && !esp->resetting_bus)
-               esp_exec_cmd(esp);
-
-       return 0;
-}
-
-/* Dump driver state. */
-static void esp_dump_cmd(struct scsi_cmnd *SCptr)
-{
-       ESPLOG(("[tgt<%02x> lun<%02x> "
-               "pphase<%s> cphase<%s>]",
-               SCptr->device->id, SCptr->device->lun,
-               phase_string(SCptr->SCp.sent_command),
-               phase_string(SCptr->SCp.phase)));
-}
-
-static void esp_dump_state(struct esp *esp)
-{
-       struct scsi_cmnd *SCptr = esp->current_SC;
-#ifdef DEBUG_ESP_CMDS
-       int i;
-#endif
-
-       ESPLOG(("esp%d: dumping state\n", esp->esp_id));
-       ESPLOG(("esp%d: dma -- cond_reg<%08x> addr<%08x>\n",
-               esp->esp_id,
-               sbus_readl(esp->dregs + DMA_CSR),
-               sbus_readl(esp->dregs + DMA_ADDR)));
-       ESPLOG(("esp%d: SW [sreg<%02x> sstep<%02x> ireg<%02x>]\n",
-               esp->esp_id, esp->sreg, esp->seqreg, esp->ireg));
-       ESPLOG(("esp%d: HW reread [sreg<%02x> sstep<%02x> ireg<%02x>]\n",
-               esp->esp_id,
-               sbus_readb(esp->eregs + ESP_STATUS),
-               sbus_readb(esp->eregs + ESP_SSTEP),
-               sbus_readb(esp->eregs + ESP_INTRPT)));
-#ifdef DEBUG_ESP_CMDS
-       printk("esp%d: last ESP cmds [", esp->esp_id);
-       i = (esp->espcmdent - 1) & 31;
-       printk("<"); esp_print_cmd(esp->espcmdlog[i]); printk(">");
-       i = (i - 1) & 31;
-       printk("<"); esp_print_cmd(esp->espcmdlog[i]); printk(">");
-       i = (i - 1) & 31;
-       printk("<"); esp_print_cmd(esp->espcmdlog[i]); printk(">");
-       i = (i - 1) & 31;
-       printk("<"); esp_print_cmd(esp->espcmdlog[i]); printk(">");
-       printk("]\n");
-#endif /* (DEBUG_ESP_CMDS) */
-
-       if (SCptr) {
-               ESPLOG(("esp%d: current command ", esp->esp_id));
-               esp_dump_cmd(SCptr);
-       }
-       ESPLOG(("\n"));
-       SCptr = esp->disconnected_SC;
-       ESPLOG(("esp%d: disconnected ", esp->esp_id));
-       while (SCptr) {
-               esp_dump_cmd(SCptr);
-               SCptr = (struct scsi_cmnd *) SCptr->host_scribble;
-       }
-       ESPLOG(("\n"));
-}
-
-/* Abort a command.  The host_lock is acquired by caller. */
-static int esp_abort(struct scsi_cmnd *SCptr)
-{
-       struct esp *esp = (struct esp *) SCptr->device->host->hostdata;
-       int don;
-
-       ESPLOG(("esp%d: Aborting command\n", esp->esp_id));
-       esp_dump_state(esp);
-
-       /* Wheee, if this is the current command on the bus, the
-        * best we can do is assert ATN and wait for msgout phase.
-        * This should even fix a hung SCSI bus when we lose state
-        * in the driver and timeout because the eventual phase change
-        * will cause the ESP to (eventually) give an interrupt.
-        */
-       if (esp->current_SC == SCptr) {
-               esp->cur_msgout[0] = ABORT;
-               esp->msgout_len = 1;
-               esp->msgout_ctr = 0;
-               esp_cmd(esp, ESP_CMD_SATN);
-               return SUCCESS;
-       }
-
-       /* If it is still in the issue queue then we can safely
-        * call the completion routine and report abort success.
-        */
-       don = (sbus_readl(esp->dregs + DMA_CSR) & DMA_INT_ENAB);
-       if (don) {
-               ESP_INTSOFF(esp->dregs);
-       }
-       if (esp->issue_SC) {
-               struct scsi_cmnd **prev, *this;
-               for (prev = (&esp->issue_SC), this = esp->issue_SC;
-                    this != NULL;
-                    prev = (struct scsi_cmnd **) &(this->host_scribble),
-                            this = (struct scsi_cmnd *) this->host_scribble) {
-
-                       if (this == SCptr) {
-                               *prev = (struct scsi_cmnd *) this->host_scribble;
-                               this->host_scribble = NULL;
-
-                               esp_release_dmabufs(esp, this);
-                               this->result = DID_ABORT << 16;
-                               this->scsi_done(this);
-
-                               if (don)
-                                       ESP_INTSON(esp->dregs);
-
-                               return SUCCESS;
-                       }
-               }
-       }
-
-       /* Yuck, the command to abort is disconnected, it is not
-        * worth trying to abort it now if something else is live
-        * on the bus at this time.  So, we let the SCSI code wait
-        * a little bit and try again later.
-        */
-       if (esp->current_SC) {
-               if (don)
-                       ESP_INTSON(esp->dregs);
-               return FAILED;
-       }
-
-       /* It's disconnected, we have to reconnect to re-establish
-        * the nexus and tell the device to abort.  However, we really
-        * cannot 'reconnect' per se.  Don't try to be fancy, just
-        * indicate failure, which causes our caller to reset the whole
-        * bus.
-        */
-
-       if (don)
-               ESP_INTSON(esp->dregs);
-
-       return FAILED;
-}
-
-/* We've sent ESP_CMD_RS to the ESP, the interrupt had just
- * arrived indicating the end of the SCSI bus reset.  Our job
- * is to clean out the command queues and begin re-execution
- * of SCSI commands once more.
- */
-static int esp_finish_reset(struct esp *esp)
-{
-       struct scsi_cmnd *sp = esp->current_SC;
-
-       /* Clean up currently executing command, if any. */
-       if (sp != NULL) {
-               esp->current_SC = NULL;
-
-               esp_release_dmabufs(esp, sp);
-               sp->result = (DID_RESET << 16);
-
-               sp->scsi_done(sp);
-       }
-
-       /* Clean up disconnected queue, they have been invalidated
-        * by the bus reset.
-        */
-       if (esp->disconnected_SC) {
-               while ((sp = remove_first_SC(&esp->disconnected_SC)) != NULL) {
-                       esp_release_dmabufs(esp, sp);
-                       sp->result = (DID_RESET << 16);
-
-                       sp->scsi_done(sp);
-               }
-       }
-
-       /* SCSI bus reset is complete. */
-       esp->resetting_bus = 0;
-       wake_up(&esp->reset_queue);
-
-       /* Ok, now it is safe to get commands going once more. */
-       if (esp->issue_SC)
-               esp_exec_cmd(esp);
-
-       return do_intr_end;
-}
-
-static int esp_do_resetbus(struct esp *esp)
-{
-       ESPLOG(("esp%d: Resetting scsi bus\n", esp->esp_id));
-       esp->resetting_bus = 1;
-       esp_cmd(esp, ESP_CMD_RS);
-
-       return do_intr_end;
-}
-
-/* Reset ESP chip, reset hanging bus, then kill active and
- * disconnected commands for targets without soft reset.
- *
- * The host_lock is acquired by caller.
- */
-static int esp_reset(struct scsi_cmnd *SCptr)
-{
-       struct esp *esp = (struct esp *) SCptr->device->host->hostdata;
-
-       spin_lock_irq(esp->ehost->host_lock);
-       (void) esp_do_resetbus(esp);
-       spin_unlock_irq(esp->ehost->host_lock);
-
-       wait_event(esp->reset_queue, (esp->resetting_bus == 0));
-
-       return SUCCESS;
-}
-
-/* Internal ESP done function. */
-static void esp_done(struct esp *esp, int error)
-{
-       struct scsi_cmnd *done_SC = esp->current_SC;
-
-       esp->current_SC = NULL;
-
-       esp_release_dmabufs(esp, done_SC);
-       done_SC->result = error;
-
-       done_SC->scsi_done(done_SC);
-
-       /* Bus is free, issue any commands in the queue. */
-       if (esp->issue_SC && !esp->current_SC)
-               esp_exec_cmd(esp);
-
-}
-
-/* Wheee, ESP interrupt engine. */  
-
-/* Forward declarations. */
-static int esp_do_phase_determine(struct esp *esp);
-static int esp_do_data_finale(struct esp *esp);
-static int esp_select_complete(struct esp *esp);
-static int esp_do_status(struct esp *esp);
-static int esp_do_msgin(struct esp *esp);
-static int esp_do_msgindone(struct esp *esp);
-static int esp_do_msgout(struct esp *esp);
-static int esp_do_cmdbegin(struct esp *esp);
-
-#define sreg_datainp(__sreg)  (((__sreg) & ESP_STAT_PMASK) == ESP_DIP)
-#define sreg_dataoutp(__sreg) (((__sreg) & ESP_STAT_PMASK) == ESP_DOP)
-
-/* Read any bytes found in the FAS366 fifo, storing them into
- * the ESP driver software state structure.
- */
-static void hme_fifo_read(struct esp *esp)
-{
-       u8 count = 0;
-       u8 status = esp->sreg;
-
-       /* Cannot safely frob the fifo for these following cases, but
-        * we must always read the fifo when the reselect interrupt
-        * is pending.
-        */
-       if (((esp->ireg & ESP_INTR_RSEL) == 0)  &&
-           (sreg_datainp(status)               ||
-            sreg_dataoutp(status)              ||
-            (esp->current_SC &&
-             esp->current_SC->SCp.phase == in_data_done))) {
-               ESPHME(("<wkaround_skipped>"));
-       } else {
-               unsigned long fcnt = sbus_readb(esp->eregs + ESP_FFLAGS) & ESP_FF_FBYTES;
-
-               /* The HME stores bytes in multiples of 2 in the fifo. */
-               ESPHME(("hme_fifo[fcnt=%d", (int)fcnt));
-               while (fcnt) {
-                       esp->hme_fifo_workaround_buffer[count++] =
-                               sbus_readb(esp->eregs + ESP_FDATA);
-                       esp->hme_fifo_workaround_buffer[count++] =
-                               sbus_readb(esp->eregs + ESP_FDATA);
-                       ESPHME(("<%02x,%02x>", esp->hme_fifo_workaround_buffer[count-2], esp->hme_fifo_workaround_buffer[count-1]));
-                       fcnt--;
-               }
-               if (sbus_readb(esp->eregs + ESP_STATUS2) & ESP_STAT2_F1BYTE) {
-                       ESPHME(("<poke_byte>"));
-                       sbus_writeb(0, esp->eregs + ESP_FDATA);
-                       esp->hme_fifo_workaround_buffer[count++] =
-                               sbus_readb(esp->eregs + ESP_FDATA);
-                       ESPHME(("<%02x,0x00>", esp->hme_fifo_workaround_buffer[count-1]));
-                       ESPHME(("CMD_FLUSH"));
-                       esp_cmd(esp, ESP_CMD_FLUSH);
-               } else {
-                       ESPHME(("no_xtra_byte"));
-               }
-       }
-       ESPHME(("wkarnd_cnt=%d]", (int)count));
-       esp->hme_fifo_workaround_count = count;
-}
-
-static inline void hme_fifo_push(struct esp *esp, u8 *bytes, u8 count)
-{
-       esp_cmd(esp, ESP_CMD_FLUSH);
-       while (count) {
-               u8 tmp = *bytes++;
-               sbus_writeb(tmp, esp->eregs + ESP_FDATA);
-               sbus_writeb(0, esp->eregs + ESP_FDATA);
-               count--;
-       }
-}
-
-/* We try to avoid some interrupts by jumping ahead and see if the ESP
- * has gotten far enough yet.  Hence the following.
- */
-static inline int skipahead1(struct esp *esp, struct scsi_cmnd *scp,
-                            int prev_phase, int new_phase)
-{
-       if (scp->SCp.sent_command != prev_phase)
-               return 0;
-       if (ESP_IRQ_P(esp->dregs)) {
-               /* Yes, we are able to save an interrupt. */
-               if (esp->erev == fashme)
-                       esp->sreg2 = sbus_readb(esp->eregs + ESP_STATUS2);
-               esp->sreg = (sbus_readb(esp->eregs + ESP_STATUS) & ~(ESP_STAT_INTR));
-               esp->ireg = sbus_readb(esp->eregs + ESP_INTRPT);
-               if (esp->erev == fashme) {
-                       /* This chip is really losing. */
-                       ESPHME(("HME["));
-                       /* Must latch fifo before reading the interrupt
-                        * register else garbage ends up in the FIFO
-                        * which confuses the driver utterly.
-                        * Happy Meal indeed....
-                        */
-                       ESPHME(("fifo_workaround]"));
-                       if (!(esp->sreg2 & ESP_STAT2_FEMPTY) ||
-                           (esp->sreg2 & ESP_STAT2_F1BYTE))
-                               hme_fifo_read(esp);
-               }
-               if (!(esp->ireg & ESP_INTR_SR))
-                       return 0;
-               else
-                       return do_reset_complete;
-       }
-       /* Ho hum, target is taking forever... */
-       scp->SCp.sent_command = new_phase; /* so we don't recurse... */
-       return do_intr_end;
-}
-
-static inline int skipahead2(struct esp *esp, struct scsi_cmnd *scp,
-                            int prev_phase1, int prev_phase2, int new_phase)
-{
-       if (scp->SCp.sent_command != prev_phase1 &&
-           scp->SCp.sent_command != prev_phase2)
-               return 0;
-       if (ESP_IRQ_P(esp->dregs)) {
-               /* Yes, we are able to save an interrupt. */
-               if (esp->erev == fashme)
-                       esp->sreg2 = sbus_readb(esp->eregs + ESP_STATUS2);
-               esp->sreg = (sbus_readb(esp->eregs + ESP_STATUS) & ~(ESP_STAT_INTR));
-               esp->ireg = sbus_readb(esp->eregs + ESP_INTRPT);
-               if (esp->erev == fashme) {
-                       /* This chip is really losing. */
-                       ESPHME(("HME["));
-
-                       /* Must latch fifo before reading the interrupt
-                        * register else garbage ends up in the FIFO
-                        * which confuses the driver utterly.
-                        * Happy Meal indeed....
-                        */
-                       ESPHME(("fifo_workaround]"));
-                       if (!(esp->sreg2 & ESP_STAT2_FEMPTY) ||
-                           (esp->sreg2 & ESP_STAT2_F1BYTE))
-                               hme_fifo_read(esp);
-               }
-               if (!(esp->ireg & ESP_INTR_SR))
-                       return 0;
-               else
-                       return do_reset_complete;
-       }
-       /* Ho hum, target is taking forever... */
-       scp->SCp.sent_command = new_phase; /* so we don't recurse... */
-       return do_intr_end;
-}
-
-/* Now some dma helpers. */
-static void dma_setup(struct esp *esp, __u32 addr, int count, int write)
-{
-       u32 nreg = sbus_readl(esp->dregs + DMA_CSR);
-
-       if (write)
-               nreg |= DMA_ST_WRITE;
-       else
-               nreg &= ~(DMA_ST_WRITE);
-       nreg |= DMA_ENABLE;
-       sbus_writel(nreg, esp->dregs + DMA_CSR);
-       if (esp->dma->revision == dvmaesc1) {
-               /* This ESC gate array sucks! */
-               __u32 src = addr;
-               __u32 dest = src + count;
-
-               if (dest & (PAGE_SIZE - 1))
-                       count = PAGE_ALIGN(count);
-               sbus_writel(count, esp->dregs + DMA_COUNT);
-       }
-       sbus_writel(addr, esp->dregs + DMA_ADDR);
-}
-
-static void dma_drain(struct esp *esp)
-{
-       u32 tmp;
-
-       if (esp->dma->revision == dvmahme)
-               return;
-       if ((tmp = sbus_readl(esp->dregs + DMA_CSR)) & DMA_FIFO_ISDRAIN) {
-               switch (esp->dma->revision) {
-               default:
-                       tmp |= DMA_FIFO_STDRAIN;
-                       sbus_writel(tmp, esp->dregs + DMA_CSR);
-
-               case dvmarev3:
-               case dvmaesc1:
-                       while (sbus_readl(esp->dregs + DMA_CSR) & DMA_FIFO_ISDRAIN)
-                               udelay(1);
-               };
-       }
-}
-
-static void dma_invalidate(struct esp *esp)
-{
-       u32 tmp;
-
-       if (esp->dma->revision == dvmahme) {
-               sbus_writel(DMA_RST_SCSI, esp->dregs + DMA_CSR);
-
-               esp->prev_hme_dmacsr = ((esp->prev_hme_dmacsr |
-                                        (DMA_PARITY_OFF | DMA_2CLKS |
-                                         DMA_SCSI_DISAB | DMA_INT_ENAB)) &
-                                       ~(DMA_ST_WRITE | DMA_ENABLE));
-
-               sbus_writel(0, esp->dregs + DMA_CSR);
-               sbus_writel(esp->prev_hme_dmacsr, esp->dregs + DMA_CSR);
-
-               /* This is necessary to avoid having the SCSI channel
-                * engine lock up on us.
-                */
-               sbus_writel(0, esp->dregs + DMA_ADDR);
-       } else {
-               while ((tmp = sbus_readl(esp->dregs + DMA_CSR)) & DMA_PEND_READ)
-                       udelay(1);
-
-               tmp &= ~(DMA_ENABLE | DMA_ST_WRITE | DMA_BCNT_ENAB);
-               tmp |= DMA_FIFO_INV;
-               sbus_writel(tmp, esp->dregs + DMA_CSR);
-               tmp &= ~DMA_FIFO_INV;
-               sbus_writel(tmp, esp->dregs + DMA_CSR);
-       }
-}
-
-static inline void dma_flashclear(struct esp *esp)
-{
-       dma_drain(esp);
-       dma_invalidate(esp);
-}
-
-static int dma_can_transfer(struct esp *esp, struct scsi_cmnd *sp)
-{
-       __u32 base, end, sz;
-
-       if (esp->dma->revision == dvmarev3) {
-               sz = sp->SCp.this_residual;
-               if (sz > 0x1000000)
-                       sz = 0x1000000;
-       } else {
-               base = ((__u32)((unsigned long)sp->SCp.ptr));
-               base &= (0x1000000 - 1);
-               end = (base + sp->SCp.this_residual);
-               if (end > 0x1000000)
-                       end = 0x1000000;
-               sz = (end - base);
-       }
-       return sz;
-}
-
-/* Misc. esp helper macros. */
-#define esp_setcount(__eregs, __cnt, __hme) \
-       sbus_writeb(((__cnt)&0xff), (__eregs) + ESP_TCLOW); \
-       sbus_writeb((((__cnt)>>8)&0xff), (__eregs) + ESP_TCMED); \
-       if (__hme) { \
-               sbus_writeb((((__cnt)>>16)&0xff), (__eregs) + FAS_RLO); \
-               sbus_writeb(0, (__eregs) + FAS_RHI); \
-       }
-
-#define esp_getcount(__eregs, __hme) \
-       ((sbus_readb((__eregs) + ESP_TCLOW)&0xff) | \
-        ((sbus_readb((__eregs) + ESP_TCMED)&0xff) << 8) | \
-         ((__hme) ? sbus_readb((__eregs) + FAS_RLO) << 16 : 0))
-
-#define fcount(__esp) \
-       (((__esp)->erev == fashme) ? \
-         (__esp)->hme_fifo_workaround_count : \
-         sbus_readb(((__esp)->eregs) + ESP_FFLAGS) & ESP_FF_FBYTES)
-
-#define fnzero(__esp) \
-       (((__esp)->erev == fashme) ? 0 : \
-        sbus_readb(((__esp)->eregs) + ESP_FFLAGS) & ESP_FF_ONOTZERO)
-
-/* XXX speculative nops unnecessary when continuing amidst a data phase
- * XXX even on esp100!!!  another case of flooding the bus with I/O reg
- * XXX writes...
- */
-#define esp_maybe_nop(__esp) \
-       if ((__esp)->erev == esp100) \
-               esp_cmd((__esp), ESP_CMD_NULL)
-
-#define sreg_to_dataphase(__sreg) \
-       ((((__sreg) & ESP_STAT_PMASK) == ESP_DOP) ? in_dataout : in_datain)
-
-/* The ESP100 when in synchronous data phase, can mistake a long final
- * REQ pulse from the target as an extra byte, it places whatever is on
- * the data lines into the fifo.  For now, we will assume when this
- * happens that the target is a bit quirky and we don't want to
- * be talking synchronously to it anyways.  Regardless, we need to
- * tell the ESP to eat the extraneous byte so that we can proceed
- * to the next phase.
- */
-static int esp100_sync_hwbug(struct esp *esp, struct scsi_cmnd *sp, int fifocnt)
-{
-       /* Do not touch this piece of code. */
-       if ((!(esp->erev == esp100)) ||
-           (!(sreg_datainp((esp->sreg = sbus_readb(esp->eregs + ESP_STATUS))) &&
-              !fifocnt) &&
-            !(sreg_dataoutp(esp->sreg) && !fnzero(esp)))) {
-               if (sp->SCp.phase == in_dataout)
-                       esp_cmd(esp, ESP_CMD_FLUSH);
-               return 0;
-       } else {
-               /* Async mode for this guy. */
-               build_sync_nego_msg(esp, 0, 0);
-
-               /* Ack the bogus byte, but set ATN first. */
-               esp_cmd(esp, ESP_CMD_SATN);
-               esp_cmd(esp, ESP_CMD_MOK);
-               return 1;
-       }
-}
-
-/* This closes the window during a selection with a reselect pending, because
- * we use DMA for the selection process the FIFO should hold the correct
- * contents if we get reselected during this process.  So we just need to
- * ack the possible illegal cmd interrupt pending on the esp100.
- */
-static inline int esp100_reconnect_hwbug(struct esp *esp)
-{
-       u8 tmp;
-
-       if (esp->erev != esp100)
-               return 0;
-       tmp = sbus_readb(esp->eregs + ESP_INTRPT);
-       if (tmp & ESP_INTR_SR)
-               return 1;
-       return 0;
-}
-
-/* This verifies the BUSID bits during a reselection so that we know which
- * target is talking to us.
- */
-static inline int reconnect_target(struct esp *esp)
-{
-       int it, me = esp->scsi_id_mask, targ = 0;
-
-       if (2 != fcount(esp))
-               return -1;
-       if (esp->erev == fashme) {
-               /* HME does not latch it's own BUS ID bits during
-                * a reselection.  Also the target number is given
-                * as an unsigned char, not as a sole bit number
-                * like the other ESP's do.
-                * Happy Meal indeed....
-                */
-               targ = esp->hme_fifo_workaround_buffer[0];
-       } else {
-               it = sbus_readb(esp->eregs + ESP_FDATA);
-               if (!(it & me))
-                       return -1;
-               it &= ~me;
-               if (it & (it - 1))
-                       return -1;
-               while (!(it & 1))
-                       targ++, it >>= 1;
-       }
-       return targ;
-}
-
-/* This verifies the identify from the target so that we know which lun is
- * being reconnected.
- */
-static inline int reconnect_lun(struct esp *esp)
-{
-       int lun;
-
-       if ((esp->sreg & ESP_STAT_PMASK) != ESP_MIP)
-               return -1;
-       if (esp->erev == fashme)
-               lun = esp->hme_fifo_workaround_buffer[1];
-       else
-               lun = sbus_readb(esp->eregs + ESP_FDATA);
-
-       /* Yes, you read this correctly.  We report lun of zero
-        * if we see parity error.  ESP reports parity error for
-        * the lun byte, and this is the only way to hope to recover
-        * because the target is connected.
-        */
-       if (esp->sreg & ESP_STAT_PERR)
-               return 0;
-
-       /* Check for illegal bits being set in the lun. */
-       if ((lun & 0x40) || !(lun & 0x80))
-               return -1;
-
-       return lun & 7;
-}
-
-/* This puts the driver in a state where it can revitalize a command that
- * is being continued due to reselection.
- */
-static inline void esp_connect(struct esp *esp, struct scsi_cmnd *sp)
-{
-       struct esp_device *esp_dev = sp->device->hostdata;
-
-       if (esp->prev_soff  != esp_dev->sync_max_offset ||
-           esp->prev_stp   != esp_dev->sync_min_period ||
-           (esp->erev > esp100a &&
-            esp->prev_cfg3 != esp->config3[sp->device->id])) {
-               esp->prev_soff = esp_dev->sync_max_offset;
-               esp->prev_stp = esp_dev->sync_min_period;
-               sbus_writeb(esp->prev_soff, esp->eregs + ESP_SOFF);
-               sbus_writeb(esp->prev_stp, esp->eregs + ESP_STP);
-               if (esp->erev > esp100a) {
-                       esp->prev_cfg3 = esp->config3[sp->device->id];
-                       sbus_writeb(esp->prev_cfg3, esp->eregs + ESP_CFG3);
-               }
-       }
-       esp->current_SC = sp;
-}
-
-/* This will place the current working command back into the issue queue
- * if we are to receive a reselection amidst a selection attempt.
- */
-static inline void esp_reconnect(struct esp *esp, struct scsi_cmnd *sp)
-{
-       if (!esp->disconnected_SC)
-               ESPLOG(("esp%d: Weird, being reselected but disconnected "
-                       "command queue is empty.\n", esp->esp_id));
-       esp->snip = 0;
-       esp->current_SC = NULL;
-       sp->SCp.phase = not_issued;
-       append_SC(&esp->issue_SC, sp);
-}
-
-/* Begin message in phase. */
-static int esp_do_msgin(struct esp *esp)
-{
-       /* Must be very careful with the fifo on the HME */
-       if ((esp->erev != fashme) ||
-           !(sbus_readb(esp->eregs + ESP_STATUS2) & ESP_STAT2_FEMPTY))
-               esp_cmd(esp, ESP_CMD_FLUSH);
-       esp_maybe_nop(esp);
-       esp_cmd(esp, ESP_CMD_TI);
-       esp->msgin_len = 1;
-       esp->msgin_ctr = 0;
-       esp_advance_phase(esp->current_SC, in_msgindone);
-       return do_work_bus;
-}
-
-/* This uses various DMA csr fields and the fifo flags count value to
- * determine how many bytes were successfully sent/received by the ESP.
- */
-static inline int esp_bytes_sent(struct esp *esp, int fifo_count)
-{
-       int rval = sbus_readl(esp->dregs + DMA_ADDR) - esp->esp_command_dvma;
-
-       if (esp->dma->revision == dvmarev1)
-               rval -= (4 - ((sbus_readl(esp->dregs + DMA_CSR) & DMA_READ_AHEAD)>>11));
-       return rval - fifo_count;
-}
-
-static inline void advance_sg(struct scsi_cmnd *sp)
-{
-       ++sp->SCp.buffer;
-       --sp->SCp.buffers_residual;
-       sp->SCp.this_residual = sg_dma_len(sp->SCp.buffer);
-       sp->SCp.ptr = (char *)((unsigned long)sg_dma_address(sp->SCp.buffer));
-}
-
-/* Please note that the way I've coded these routines is that I _always_
- * check for a disconnect during any and all information transfer
- * phases.  The SCSI standard states that the target _can_ cause a BUS
- * FREE condition by dropping all MSG/CD/IO/BSY signals.  Also note
- * that during information transfer phases the target controls every
- * change in phase, the only thing the initiator can do is "ask" for
- * a message out phase by driving ATN true.  The target can, and sometimes
- * will, completely ignore this request so we cannot assume anything when
- * we try to force a message out phase to abort/reset a target.  Most of
- * the time the target will eventually be nice and go to message out, so
- * we may have to hold on to our state about what we want to tell the target
- * for some period of time.
- */
-
-/* I think I have things working here correctly.  Even partial transfers
- * within a buffer or sub-buffer should not upset us at all no matter
- * how bad the target and/or ESP fucks things up.
- */
-static int esp_do_data(struct esp *esp)
-{
-       struct scsi_cmnd *SCptr = esp->current_SC;
-       int thisphase, hmuch;
-
-       ESPDATA(("esp_do_data: "));
-       esp_maybe_nop(esp);
-       thisphase = sreg_to_dataphase(esp->sreg);
-       esp_advance_phase(SCptr, thisphase);
-       ESPDATA(("newphase<%s> ", (thisphase == in_datain) ? "DATAIN" : "DATAOUT"));
-       hmuch = dma_can_transfer(esp, SCptr);
-       if (hmuch > (64 * 1024) && (esp->erev != fashme))
-               hmuch = (64 * 1024);
-       ESPDATA(("hmuch<%d> ", hmuch));
-       esp->current_transfer_size = hmuch;
-
-       if (esp->erev == fashme) {
-               u32 tmp = esp->prev_hme_dmacsr;
-
-               /* Always set the ESP count registers first. */
-               esp_setcount(esp->eregs, hmuch, 1);
-
-               /* Get the DMA csr computed. */
-               tmp |= (DMA_SCSI_DISAB | DMA_ENABLE);
-               if (thisphase == in_datain)
-                       tmp |= DMA_ST_WRITE;
-               else
-                       tmp &= ~(DMA_ST_WRITE);
-               esp->prev_hme_dmacsr = tmp;
-
-               ESPDATA(("DMA|TI --> do_intr_end\n"));
-               if (thisphase == in_datain) {
-                       sbus_writel(hmuch, esp->dregs + DMA_COUNT);
-                       esp_cmd(esp, ESP_CMD_DMA | ESP_CMD_TI);
-               } else {
-                       esp_cmd(esp, ESP_CMD_DMA | ESP_CMD_TI);
-                       sbus_writel(hmuch, esp->dregs + DMA_COUNT);
-               }
-               sbus_writel((__u32)((unsigned long)SCptr->SCp.ptr), esp->dregs+DMA_ADDR);
-               sbus_writel(esp->prev_hme_dmacsr, esp->dregs + DMA_CSR);
-       } else {
-               esp_setcount(esp->eregs, hmuch, 0);
-               dma_setup(esp, ((__u32)((unsigned long)SCptr->SCp.ptr)),
-                         hmuch, (thisphase == in_datain));
-               ESPDATA(("DMA|TI --> do_intr_end\n"));
-               esp_cmd(esp, ESP_CMD_DMA | ESP_CMD_TI);
-       }
-       return do_intr_end;
-}
-
-/* See how successful the data transfer was. */
-static int esp_do_data_finale(struct esp *esp)
-{
-       struct scsi_cmnd *SCptr = esp->current_SC;
-       struct esp_device *esp_dev = SCptr->device->hostdata;
-       int bogus_data = 0, bytes_sent = 0, fifocnt, ecount = 0;
-
-       ESPDATA(("esp_do_data_finale: "));
-
-       if (SCptr->SCp.phase == in_datain) {
-               if (esp->sreg & ESP_STAT_PERR) {
-                       /* Yuck, parity error.  The ESP asserts ATN
-                        * so that we can go to message out phase
-                        * immediately and inform the target that
-                        * something bad happened.
-                        */
-                       ESPLOG(("esp%d: data bad parity detected.\n",
-                               esp->esp_id));
-                       esp->cur_msgout[0] = INITIATOR_ERROR;
-                       esp->msgout_len = 1;
-               }
-               dma_drain(esp);
-       }
-       dma_invalidate(esp);
-
-       /* This could happen for the above parity error case. */
-       if (esp->ireg != ESP_INTR_BSERV) {
-               /* Please go to msgout phase, please please please... */
-               ESPLOG(("esp%d: !BSERV after data, probably to msgout\n",
-                       esp->esp_id));
-               return esp_do_phase_determine(esp);
-       }       
-
-       /* Check for partial transfers and other horrible events.
-        * Note, here we read the real fifo flags register even
-        * on HME broken adapters because we skip the HME fifo
-        * workaround code in esp_handle() if we are doing data
-        * phase things.  We don't want to fuck directly with
-        * the fifo like that, especially if doing synchronous
-        * transfers!  Also, will need to double the count on
-        * HME if we are doing wide transfers, as the HME fifo
-        * will move and count 16-bit quantities during wide data.
-        * SMCC _and_ Qlogic can both bite me.
-        */
-       fifocnt = (sbus_readb(esp->eregs + ESP_FFLAGS) & ESP_FF_FBYTES);
-       if (esp->erev != fashme)
-               ecount = esp_getcount(esp->eregs, 0);
-       bytes_sent = esp->current_transfer_size;
-
-       ESPDATA(("trans_sz(%d), ", bytes_sent));
-       if (esp->erev == fashme) {
-               if (!(esp->sreg & ESP_STAT_TCNT)) {
-                       ecount = esp_getcount(esp->eregs, 1);
-                       bytes_sent -= ecount;
-               }
-
-               /* Always subtract any cruft remaining in the FIFO. */
-               if (esp->prev_cfg3 & ESP_CONFIG3_EWIDE)
-                       fifocnt <<= 1;
-               if (SCptr->SCp.phase == in_dataout)
-                       bytes_sent -= fifocnt;
-
-               /* I have an IBM disk which exhibits the following
-                * behavior during writes to it.  It disconnects in
-                * the middle of a partial transfer, the current sglist
-                * buffer is 1024 bytes, the disk stops data transfer
-                * at 512 bytes.
-                *
-                * However the FAS366 reports that 32 more bytes were
-                * transferred than really were.  This is precisely
-                * the size of a fully loaded FIFO in wide scsi mode.
-                * The FIFO state recorded indicates that it is empty.
-                *
-                * I have no idea if this is a bug in the FAS366 chip
-                * or a bug in the firmware on this IBM disk.  In any
-                * event the following seems to be a good workaround.  -DaveM
-                */
-               if (bytes_sent != esp->current_transfer_size &&
-                   SCptr->SCp.phase == in_dataout) {
-                       int mask = (64 - 1);
-
-                       if ((esp->prev_cfg3 & ESP_CONFIG3_EWIDE) == 0)
-                               mask >>= 1;
-
-                       if (bytes_sent & mask)
-                               bytes_sent -= (bytes_sent & mask);
-               }
-       } else {
-               if (!(esp->sreg & ESP_STAT_TCNT))
-                       bytes_sent -= ecount;
-               if (SCptr->SCp.phase == in_dataout)
-                       bytes_sent -= fifocnt;
-       }
-
-       ESPDATA(("bytes_sent(%d), ", bytes_sent));
-
-       /* If we were in synchronous mode, check for peculiarities. */
-       if (esp->erev == fashme) {
-               if (esp_dev->sync_max_offset) {
-                       if (SCptr->SCp.phase == in_dataout)
-                               esp_cmd(esp, ESP_CMD_FLUSH);
-               } else {
-                       esp_cmd(esp, ESP_CMD_FLUSH);
-               }
-       } else {
-               if (esp_dev->sync_max_offset)
-                       bogus_data = esp100_sync_hwbug(esp, SCptr, fifocnt);
-               else
-                       esp_cmd(esp, ESP_CMD_FLUSH);
-       }
-
-       /* Until we are sure of what has happened, we are certainly
-        * in the dark.
-        */
-       esp_advance_phase(SCptr, in_the_dark);
-
-       if (bytes_sent < 0) {
-               /* I've seen this happen due to lost state in this
-                * driver.  No idea why it happened, but allowing
-                * this value to be negative caused things to
-                * lock up.  This allows greater chance of recovery.
-                * In fact every time I've seen this, it has been
-                * a driver bug without question.
-                */
-               ESPLOG(("esp%d: yieee, bytes_sent < 0!\n", esp->esp_id));
-               ESPLOG(("esp%d: csz=%d fifocount=%d ecount=%d\n",
-                       esp->esp_id,
-                       esp->current_transfer_size, fifocnt, ecount));
-               ESPLOG(("esp%d: use_sg=%d ptr=%p this_residual=%d\n",
-                       esp->esp_id,
-                       SCptr->use_sg, SCptr->SCp.ptr, SCptr->SCp.this_residual));
-               ESPLOG(("esp%d: Forcing async for target %d\n", esp->esp_id, 
-                       SCptr->device->id));
-               SCptr->device->borken = 1;
-               esp_dev->sync = 0;
-               bytes_sent = 0;
-       }
-
-       /* Update the state of our transfer. */
-       SCptr->SCp.ptr += bytes_sent;
-       SCptr->SCp.this_residual -= bytes_sent;
-       if (SCptr->SCp.this_residual < 0) {
-               /* shit */
-               ESPLOG(("esp%d: Data transfer overrun.\n", esp->esp_id));
-               SCptr->SCp.this_residual = 0;
-       }
-
-       /* Maybe continue. */
-       if (!bogus_data) {
-               ESPDATA(("!bogus_data, "));
-
-               /* NO MATTER WHAT, we advance the scatterlist,
-                * if the target should decide to disconnect
-                * in between scatter chunks (which is common)
-                * we could die horribly!  I used to have the sg
-                * advance occur only if we are going back into
-                * (or are staying in) a data phase, you can
-                * imagine the hell I went through trying to
-                * figure this out.
-                */
-               if (SCptr->use_sg && !SCptr->SCp.this_residual)
-                       advance_sg(SCptr);
-               if (sreg_datainp(esp->sreg) || sreg_dataoutp(esp->sreg)) {
-                       ESPDATA(("to more data\n"));
-                       return esp_do_data(esp);
-               }
-               ESPDATA(("to new phase\n"));
-               return esp_do_phase_determine(esp);
-       }
-       /* Bogus data, just wait for next interrupt. */
-       ESPLOG(("esp%d: bogus_data during end of data phase\n",
-               esp->esp_id));
-       return do_intr_end;
-}
-
-/* We received a non-good status return at the end of
- * running a SCSI command.  This is used to decide if
- * we should clear our synchronous transfer state for
- * such a device when that happens.
- *
- * The idea is that when spinning up a disk or rewinding
- * a tape, we don't want to go into a loop re-negotiating
- * synchronous capabilities over and over.
- */
-static int esp_should_clear_sync(struct scsi_cmnd *sp)
-{
-       u8 cmd = sp->cmnd[0];
-
-       /* These cases are for spinning up a disk and
-        * waiting for that spinup to complete.
-        */
-       if (cmd == START_STOP)
-               return 0;
-
-       if (cmd == TEST_UNIT_READY)
-               return 0;
-
-       /* One more special case for SCSI tape drives,
-        * this is what is used to probe the device for
-        * completion of a rewind or tape load operation.
-        */
-       if (sp->device->type == TYPE_TAPE) {
-               if (cmd == MODE_SENSE)
-                       return 0;
-       }
-
-       return 1;
-}
-
-/* Either a command is completing or a target is dropping off the bus
- * to continue the command in the background so we can do other work.
- */
-static int esp_do_freebus(struct esp *esp)
-{
-       struct scsi_cmnd *SCptr = esp->current_SC;
-       struct esp_device *esp_dev = SCptr->device->hostdata;
-       int rval;
-
-       rval = skipahead2(esp, SCptr, in_status, in_msgindone, in_freeing);
-       if (rval)
-               return rval;
-       if (esp->ireg != ESP_INTR_DC) {
-               ESPLOG(("esp%d: Target will not disconnect\n", esp->esp_id));
-               return do_reset_bus; /* target will not drop BSY... */
-       }
-       esp->msgout_len = 0;
-       esp->prevmsgout = NOP;
-       if (esp->prevmsgin == COMMAND_COMPLETE) {
-               /* Normal end of nexus. */
-               if (esp->disconnected_SC || (esp->erev == fashme))
-                       esp_cmd(esp, ESP_CMD_ESEL);
-
-               if (SCptr->SCp.Status != GOOD &&
-                   SCptr->SCp.Status != CONDITION_GOOD &&
-                   ((1<<SCptr->device->id) & esp->targets_present) &&
-                   esp_dev->sync &&
-                   esp_dev->sync_max_offset) {
-                       /* SCSI standard says that the synchronous capabilities
-                        * should be renegotiated at this point.  Most likely
-                        * we are about to request sense from this target
-                        * in which case we want to avoid using sync
-                        * transfers until we are sure of the current target
-                        * state.
-                        */
-                       ESPMISC(("esp: Status <%d> for target %d lun %d\n",
-                                SCptr->SCp.Status, SCptr->device->id, SCptr->device->lun));
-
-                       /* But don't do this when spinning up a disk at
-                        * boot time while we poll for completion as it
-                        * fills up the console with messages.  Also, tapes
-                        * can report not ready many times right after
-                        * loading up a tape.
-                        */
-                       if (esp_should_clear_sync(SCptr) != 0)
-                               esp_dev->sync = 0;
-               }
-               ESPDISC(("F<%02x,%02x>", SCptr->device->id, SCptr->device->lun));
-               esp_done(esp, ((SCptr->SCp.Status & 0xff) |
-                              ((SCptr->SCp.Message & 0xff)<<8) |
-                              (DID_OK << 16)));
-       } else if (esp->prevmsgin == DISCONNECT) {
-               /* Normal disconnect. */
-               esp_cmd(esp, ESP_CMD_ESEL);
-               ESPDISC(("D<%02x,%02x>", SCptr->device->id, SCptr->device->lun));
-               append_SC(&esp->disconnected_SC, SCptr);
-               esp->current_SC = NULL;
-               if (esp->issue_SC)
-                       esp_exec_cmd(esp);
-       } else {
-               /* Driver bug, we do not expect a disconnect here
-                * and should not have advanced the state engine
-                * to in_freeing.
-                */
-               ESPLOG(("esp%d: last msg not disc and not cmd cmplt.\n",
-                       esp->esp_id));
-               return do_reset_bus;
-       }
-       return do_intr_end;
-}
-
-/* When a reselect occurs, and we cannot find the command to
- * reconnect to in our queues, we do this.
- */
-static int esp_bad_reconnect(struct esp *esp)
-{
-       struct scsi_cmnd *sp;
-
-       ESPLOG(("esp%d: Eieeee, reconnecting unknown command!\n",
-               esp->esp_id));
-       ESPLOG(("QUEUE DUMP\n"));
-       sp = esp->issue_SC;
-       ESPLOG(("esp%d: issue_SC[", esp->esp_id));
-       while (sp) {
-               ESPLOG(("<%02x,%02x>", sp->device->id, sp->device->lun));
-               sp = (struct scsi_cmnd *) sp->host_scribble;
-       }
-       ESPLOG(("]\n"));
-       sp = esp->current_SC;
-       ESPLOG(("esp%d: current_SC[", esp->esp_id));
-       if (sp)
-               ESPLOG(("<%02x,%02x>", sp->device->id, sp->device->lun));
-       else
-               ESPLOG(("<NULL>"));
-       ESPLOG(("]\n"));
-       sp = esp->disconnected_SC;
-       ESPLOG(("esp%d: disconnected_SC[", esp->esp_id));
-       while (sp) {
-               ESPLOG(("<%02x,%02x>", sp->device->id, sp->device->lun));
-               sp = (struct scsi_cmnd *) sp->host_scribble;
-       }
-       ESPLOG(("]\n"));
-       return do_reset_bus;
-}
-
-/* Do the needy when a target tries to reconnect to us. */
-static int esp_do_reconnect(struct esp *esp)
-{
-       int lun, target;
-       struct scsi_cmnd *SCptr;
-
-       /* Check for all bogus conditions first. */
-       target = reconnect_target(esp);
-       if (target < 0) {
-               ESPDISC(("bad bus bits\n"));
-               return do_reset_bus;
-       }
-       lun = reconnect_lun(esp);
-       if (lun < 0) {
-               ESPDISC(("target=%2x, bad identify msg\n", target));
-               return do_reset_bus;
-       }
-
-       /* Things look ok... */
-       ESPDISC(("R<%02x,%02x>", target, lun));
-
-       /* Must not flush FIFO or DVMA on HME. */
-       if (esp->erev != fashme) {
-               esp_cmd(esp, ESP_CMD_FLUSH);
-               if (esp100_reconnect_hwbug(esp))
-                       return do_reset_bus;
-               esp_cmd(esp, ESP_CMD_NULL);
-       }
-
-       SCptr = remove_SC(&esp->disconnected_SC, (u8) target, (u8) lun);
-       if (!SCptr)
-               return esp_bad_reconnect(esp);
-
-       esp_connect(esp, SCptr);
-       esp_cmd(esp, ESP_CMD_MOK);
-
-       if (esp->erev == fashme)
-               sbus_writeb(((SCptr->device->id & 0xf) |
-                            (ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT)),
-                           esp->eregs + ESP_BUSID);
-
-       /* Reconnect implies a restore pointers operation. */
-       esp_restore_pointers(esp, SCptr);
-
-       esp->snip = 0;
-       esp_advance_phase(SCptr, in_the_dark);
-       return do_intr_end;
-}
-
-/* End of NEXUS (hopefully), pick up status + message byte then leave if
- * all goes well.
- */
-static int esp_do_status(struct esp *esp)
-{
-       struct scsi_cmnd *SCptr = esp->current_SC;
-       int intr, rval;
-
-       rval = skipahead1(esp, SCptr, in_the_dark, in_status);
-       if (rval)
-               return rval;
-       intr = esp->ireg;
-       ESPSTAT(("esp_do_status: "));
-       if (intr != ESP_INTR_DC) {
-               int message_out = 0; /* for parity problems */
-
-               /* Ack the message. */
-               ESPSTAT(("ack msg, "));
-               esp_cmd(esp, ESP_CMD_MOK);
-
-               if (esp->erev != fashme) {
-                       dma_flashclear(esp);
-
-                       /* Wait till the first bits settle. */
-                       while (esp->esp_command[0] == 0xff)
-                               udelay(1);
-               } else {
-                       esp->esp_command[0] = esp->hme_fifo_workaround_buffer[0];
-                       esp->esp_command[1] = esp->hme_fifo_workaround_buffer[1];
-               }
-
-               ESPSTAT(("got something, "));
-               /* ESP chimes in with one of
-                *
-                * 1) function done interrupt:
-                *      both status and message in bytes
-                *      are available
-                *
-                * 2) bus service interrupt:
-                *      only status byte was acquired
-                *
-                * 3) Anything else:
-                *      can't happen, but we test for it
-                *      anyways
-                *
-                * ALSO: If bad parity was detected on either
-                *       the status _or_ the message byte then
-                *       the ESP has asserted ATN on the bus
-                *       and we must therefore wait for the
-                *       next phase change.
-                */
-               if (intr & ESP_INTR_FDONE) {
-                       /* We got it all, hallejulia. */
-                       ESPSTAT(("got both, "));
-                       SCptr->SCp.Status = esp->esp_command[0];
-                       SCptr->SCp.Message = esp->esp_command[1];
-                       esp->prevmsgin = SCptr->SCp.Message;
-                       esp->cur_msgin[0] = SCptr->SCp.Message;
-                       if (esp->sreg & ESP_STAT_PERR) {
-                               /* There was bad parity for the
-                                * message byte, the status byte
-                                * was ok.
-                                */
-                               message_out = MSG_PARITY_ERROR;
-                       }
-               } else if (intr == ESP_INTR_BSERV) {
-                       /* Only got status byte. */
-                       ESPLOG(("esp%d: got status only, ", esp->esp_id));
-                       if (!(esp->sreg & ESP_STAT_PERR)) {
-                               SCptr->SCp.Status = esp->esp_command[0];
-                               SCptr->SCp.Message = 0xff;
-                       } else {
-                               /* The status byte had bad parity.
-                                * we leave the scsi_pointer Status
-                                * field alone as we set it to a default
-                                * of CHECK_CONDITION in esp_queue.
-                                */
-                               message_out = INITIATOR_ERROR;
-                       }
-               } else {
-                       /* This shouldn't happen ever. */
-                       ESPSTAT(("got bolixed\n"));
-                       esp_advance_phase(SCptr, in_the_dark);
-                       return esp_do_phase_determine(esp);
-               }
-
-               if (!message_out) {
-                       ESPSTAT(("status=%2x msg=%2x, ", SCptr->SCp.Status,
-                               SCptr->SCp.Message));
-                       if (SCptr->SCp.Message == COMMAND_COMPLETE) {
-                               ESPSTAT(("and was COMMAND_COMPLETE\n"));
-                               esp_advance_phase(SCptr, in_freeing);
-                               return esp_do_freebus(esp);
-                       } else {
-                               ESPLOG(("esp%d: and _not_ COMMAND_COMPLETE\n",
-                                       esp->esp_id));
-                               esp->msgin_len = esp->msgin_ctr = 1;
-                               esp_advance_phase(SCptr, in_msgindone);
-                               return esp_do_msgindone(esp);
-                       }
-               } else {
-                       /* With luck we'll be able to let the target
-                        * know that bad parity happened, it will know
-                        * which byte caused the problems and send it
-                        * again.  For the case where the status byte
-                        * receives bad parity, I do not believe most
-                        * targets recover very well.  We'll see.
-                        */
-                       ESPLOG(("esp%d: bad parity somewhere mout=%2x\n",
-                               esp->esp_id, message_out));
-                       esp->cur_msgout[0] = message_out;
-                       esp->msgout_len = esp->msgout_ctr = 1;
-                       esp_advance_phase(SCptr, in_the_dark);
-                       return esp_do_phase_determine(esp);
-               }
-       } else {
-               /* If we disconnect now, all hell breaks loose. */
-               ESPLOG(("esp%d: whoops, disconnect\n", esp->esp_id));
-               esp_advance_phase(SCptr, in_the_dark);
-               return esp_do_phase_determine(esp);
-       }
-}
-
-static int esp_enter_status(struct esp *esp)
-{
-       u8 thecmd = ESP_CMD_ICCSEQ;
-
-       esp_cmd(esp, ESP_CMD_FLUSH);
-       if (esp->erev != fashme) {
-               u32 tmp;
-
-               esp->esp_command[0] = esp->esp_command[1] = 0xff;
-               sbus_writeb(2, esp->eregs + ESP_TCLOW);
-               sbus_writeb(0, esp->eregs + ESP_TCMED);
-               tmp = sbus_readl(esp->dregs + DMA_CSR);
-               tmp |= (DMA_ST_WRITE | DMA_ENABLE);
-               sbus_writel(tmp, esp->dregs + DMA_CSR);
-               if (esp->dma->revision == dvmaesc1)
-                       sbus_writel(0x100, esp->dregs + DMA_COUNT);
-               sbus_writel(esp->esp_command_dvma, esp->dregs + DMA_ADDR);
-               thecmd |= ESP_CMD_DMA;
-       }
-       esp_cmd(esp, thecmd);
-       esp_advance_phase(esp->current_SC, in_status);
-
-       return esp_do_status(esp);
-}
-
-static int esp_disconnect_amidst_phases(struct esp *esp)
-{
-       struct scsi_cmnd *sp = esp->current_SC;
-       struct esp_device *esp_dev = sp->device->hostdata;
-
-       /* This means real problems if we see this
-        * here.  Unless we were actually trying
-        * to force the device to abort/reset.
-        */
-       ESPLOG(("esp%d Disconnect amidst phases, ", esp->esp_id));
-       ESPLOG(("pphase<%s> cphase<%s>, ",
-               phase_string(sp->SCp.phase),
-               phase_string(sp->SCp.sent_command)));
-
-       if (esp->disconnected_SC != NULL || (esp->erev == fashme))
-               esp_cmd(esp, ESP_CMD_ESEL);
-
-       switch (esp->cur_msgout[0]) {
-       default:
-               /* We didn't expect this to happen at all. */
-               ESPLOG(("device is bolixed\n"));
-               esp_advance_phase(sp, in_tgterror);
-               esp_done(esp, (DID_ERROR << 16));
-               break;
-
-       case BUS_DEVICE_RESET:
-               ESPLOG(("device reset successful\n"));
-               esp_dev->sync_max_offset = 0;
-               esp_dev->sync_min_period = 0;
-               esp_dev->sync = 0;
-               esp_advance_phase(sp, in_resetdev);
-               esp_done(esp, (DID_RESET << 16));
-               break;
-
-       case ABORT:
-               ESPLOG(("device abort successful\n"));
-               esp_advance_phase(sp, in_abortone);
-               esp_done(esp, (DID_ABORT << 16));
-               break;
-
-       };
-       return do_intr_end;
-}
-
-static int esp_enter_msgout(struct esp *esp)
-{
-       esp_advance_phase(esp->current_SC, in_msgout);
-       return esp_do_msgout(esp);
-}
-
-static int esp_enter_msgin(struct esp *esp)
-{
-       esp_advance_phase(esp->current_SC, in_msgin);
-       return esp_do_msgin(esp);
-}
-
-static int esp_enter_cmd(struct esp *esp)
-{
-       esp_advance_phase(esp->current_SC, in_cmdbegin);
-       return esp_do_cmdbegin(esp);
-}
-
-static int esp_enter_badphase(struct esp *esp)
-{
-       ESPLOG(("esp%d: Bizarre bus phase %2x.\n", esp->esp_id,
-               esp->sreg & ESP_STAT_PMASK));
-       return do_reset_bus;
-}
-
-typedef int (*espfunc_t)(struct esp *);
-
-static espfunc_t phase_vector[] = {
-       esp_do_data,            /* ESP_DOP */
-       esp_do_data,            /* ESP_DIP */
-       esp_enter_cmd,          /* ESP_CMDP */
-       esp_enter_status,       /* ESP_STATP */
-       esp_enter_badphase,     /* ESP_STAT_PMSG */
-       esp_enter_badphase,     /* ESP_STAT_PMSG | ESP_STAT_PIO */
-       esp_enter_msgout,       /* ESP_MOP */
-       esp_enter_msgin,        /* ESP_MIP */
-};
-
-/* The target has control of the bus and we have to see where it has
- * taken us.
- */
-static int esp_do_phase_determine(struct esp *esp)
-{
-       if ((esp->ireg & ESP_INTR_DC) != 0)
-               return esp_disconnect_amidst_phases(esp);
-       return phase_vector[esp->sreg & ESP_STAT_PMASK](esp);
-}
-
-/* First interrupt after exec'ing a cmd comes here. */
-static int esp_select_complete(struct esp *esp)
-{
-       struct scsi_cmnd *SCptr = esp->current_SC;
-       struct esp_device *esp_dev = SCptr->device->hostdata;
-       int cmd_bytes_sent, fcnt;
-
-       if (esp->erev != fashme)
-               esp->seqreg = (sbus_readb(esp->eregs + ESP_SSTEP) & ESP_STEP_VBITS);
-
-       if (esp->erev == fashme)
-               fcnt = esp->hme_fifo_workaround_count;
-       else
-               fcnt = (sbus_readb(esp->eregs + ESP_FFLAGS) & ESP_FF_FBYTES);
-
-       cmd_bytes_sent = esp_bytes_sent(esp, fcnt);
-       dma_invalidate(esp);
-
-       /* Let's check to see if a reselect happened
-        * while we we're trying to select.  This must
-        * be checked first.
-        */
-       if (esp->ireg == (ESP_INTR_RSEL | ESP_INTR_FDONE)) {
-               esp_reconnect(esp, SCptr);
-               return esp_do_reconnect(esp);
-       }
-
-       /* Looks like things worked, we should see a bus service &
-        * a function complete interrupt at this point.  Note we
-        * are doing a direct comparison because we don't want to
-        * be fooled into thinking selection was successful if
-        * ESP_INTR_DC is set, see below.
-        */
-       if (esp->ireg == (ESP_INTR_FDONE | ESP_INTR_BSERV)) {
-               /* target speaks... */
-               esp->targets_present |= (1<<SCptr->device->id);
-
-               /* What if the target ignores the sdtr? */
-               if (esp->snip)
-                       esp_dev->sync = 1;
-
-               /* See how far, if at all, we got in getting
-                * the information out to the target.
-                */
-               switch (esp->seqreg) {
-               default:
-
-               case ESP_STEP_ASEL:
-                       /* Arbitration won, target selected, but
-                        * we are in some phase which is not command
-                        * phase nor is it message out phase.
-                        *
-                        * XXX We've confused the target, obviously.
-                        * XXX So clear it's state, but we also end
-                        * XXX up clearing everyone elses.  That isn't
-                        * XXX so nice.  I'd like to just reset this
-                        * XXX target, but if I cannot even get it's
-                        * XXX attention and finish selection to talk
-                        * XXX to it, there is not much more I can do.
-                        * XXX If we have a loaded bus we're going to
-                        * XXX spend the next second or so renegotiating
-                        * XXX for synchronous transfers.
-                        */
-                       ESPLOG(("esp%d: STEP_ASEL for tgt %d\n",
-                               esp->esp_id, SCptr->device->id));
-
-               case ESP_STEP_SID:
-                       /* Arbitration won, target selected, went
-                        * to message out phase, sent one message
-                        * byte, then we stopped.  ATN is asserted
-                        * on the SCSI bus and the target is still
-                        * there hanging on.  This is a legal
-                        * sequence step if we gave the ESP a select
-                        * and stop command.
-                        *
-                        * XXX See above, I could set the borken flag
-                        * XXX in the device struct and retry the
-                        * XXX command.  But would that help for
-                        * XXX tagged capable targets?
-                        */
-
-               case ESP_STEP_NCMD:
-                       /* Arbitration won, target selected, maybe
-                        * sent the one message byte in message out
-                        * phase, but we did not go to command phase
-                        * in the end.  Actually, we could have sent
-                        * only some of the message bytes if we tried
-                        * to send out the entire identify and tag
-                        * message using ESP_CMD_SA3.
-                        */
-                       cmd_bytes_sent = 0;
-                       break;
-
-               case ESP_STEP_PPC:
-                       /* No, not the powerPC pinhead.  Arbitration
-                        * won, all message bytes sent if we went to
-                        * message out phase, went to command phase
-                        * but only part of the command was sent.
-                        *
-                        * XXX I've seen this, but usually in conjunction
-                        * XXX with a gross error which appears to have
-                        * XXX occurred between the time I told the
-                        * XXX ESP to arbitrate and when I got the
-                        * XXX interrupt.  Could I have misloaded the
-                        * XXX command bytes into the fifo?  Actually,
-                        * XXX I most likely missed a phase, and therefore
-                        * XXX went into never never land and didn't even
-                        * XXX know it.  That was the old driver though.
-                        * XXX What is even more peculiar is that the ESP
-                        * XXX showed the proper function complete and
-                        * XXX bus service bits in the interrupt register.
-                        */
-
-               case ESP_STEP_FINI4:
-               case ESP_STEP_FINI5:
-               case ESP_STEP_FINI6:
-               case ESP_STEP_FINI7:
-                       /* Account for the identify message */
-                       if (SCptr->SCp.phase == in_slct_norm)
-                               cmd_bytes_sent -= 1;
-               };
-
-               if (esp->erev != fashme)
-                       esp_cmd(esp, ESP_CMD_NULL);
-
-               /* Be careful, we could really get fucked during synchronous
-                * data transfers if we try to flush the fifo now.
-                */
-               if ((esp->erev != fashme) && /* not a Happy Meal and... */
-                   !fcnt && /* Fifo is empty and... */
-                   /* either we are not doing synchronous transfers or... */
-                   (!esp_dev->sync_max_offset ||
-                    /* We are not going into data in phase. */
-                    ((esp->sreg & ESP_STAT_PMASK) != ESP_DIP)))
-                       esp_cmd(esp, ESP_CMD_FLUSH); /* flush is safe */
-
-               /* See how far we got if this is not a slow command. */
-               if (!esp->esp_slowcmd) {
-                       if (cmd_bytes_sent < 0)
-                               cmd_bytes_sent = 0;
-                       if (cmd_bytes_sent != SCptr->cmd_len) {
-                               /* Crapola, mark it as a slowcmd
-                                * so that we have some chance of
-                                * keeping the command alive with
-                                * good luck.
-                                *
-                                * XXX Actually, if we didn't send it all
-                                * XXX this means either we didn't set things
-                                * XXX up properly (driver bug) or the target
-                                * XXX or the ESP detected parity on one of
-                                * XXX the command bytes.  This makes much
-                                * XXX more sense, and therefore this code
-                                * XXX should be changed to send out a
-                                * XXX parity error message or if the status
-                                * XXX register shows no parity error then
-                                * XXX just expect the target to bring the
-                                * XXX bus into message in phase so that it
-                                * XXX can send us the parity error message.
-                                * XXX SCSI sucks...
-                                */
-                               esp->esp_slowcmd = 1;
-                               esp->esp_scmdp = &(SCptr->cmnd[cmd_bytes_sent]);
-                               esp->esp_scmdleft = (SCptr->cmd_len - cmd_bytes_sent);
-                       }
-               }
-
-               /* Now figure out where we went. */
-               esp_advance_phase(SCptr, in_the_dark);
-               return esp_do_phase_determine(esp);
-       }
-
-       /* Did the target even make it? */
-       if (esp->ireg == ESP_INTR_DC) {
-               /* wheee... nobody there or they didn't like
-                * what we told it to do, clean up.
-                */
-
-               /* If anyone is off the bus, but working on
-                * a command in the background for us, tell
-                * the ESP to listen for them.
-                */
-               if (esp->disconnected_SC)
-                       esp_cmd(esp, ESP_CMD_ESEL);
-
-               if (((1<<SCptr->device->id) & esp->targets_present) &&
-                   esp->seqreg != 0 &&
-                   (esp->cur_msgout[0] == EXTENDED_MESSAGE) &&
-                   (SCptr->SCp.phase == in_slct_msg ||
-                    SCptr->SCp.phase == in_slct_stop)) {
-                       /* shit */
-                       esp->snip = 0;
-                       ESPLOG(("esp%d: Failed synchronous negotiation for target %d "
-                               "lun %d\n", esp->esp_id, SCptr->device->id, SCptr->device->lun));
-                       esp_dev->sync_max_offset = 0;
-                       esp_dev->sync_min_period = 0;
-                       esp_dev->sync = 1; /* so we don't negotiate again */
-
-                       /* Run the command again, this time though we
-                        * won't try to negotiate for synchronous transfers.
-                        *
-                        * XXX I'd like to do something like send an
-                        * XXX INITIATOR_ERROR or ABORT message to the
-                        * XXX target to tell it, "Sorry I confused you,
-                        * XXX please come back and I will be nicer next
-                        * XXX time".  But that requires having the target
-                        * XXX on the bus, and it has dropped BSY on us.
-                        */
-                       esp->current_SC = NULL;
-                       esp_advance_phase(SCptr, not_issued);
-                       prepend_SC(&esp->issue_SC, SCptr);
-                       esp_exec_cmd(esp);
-                       return do_intr_end;
-               }
-
-               /* Ok, this is normal, this is what we see during boot
-                * or whenever when we are scanning the bus for targets.
-                * But first make sure that is really what is happening.
-                */
-               if (((1<<SCptr->device->id) & esp->targets_present)) {
-                       ESPLOG(("esp%d: Warning, live target %d not responding to "
-                               "selection.\n", esp->esp_id, SCptr->device->id));
-
-                       /* This _CAN_ happen.  The SCSI standard states that
-                        * the target is to _not_ respond to selection if
-                        * _it_ detects bad parity on the bus for any reason.
-                        * Therefore, we assume that if we've talked successfully
-                        * to this target before, bad parity is the problem.
-                        */
-                       esp_done(esp, (DID_PARITY << 16));
-               } else {
-                       /* Else, there really isn't anyone there. */
-                       ESPMISC(("esp: selection failure, maybe nobody there?\n"));
-                       ESPMISC(("esp: target %d lun %d\n",
-                                SCptr->device->id, SCptr->device->lun));
-                       esp_done(esp, (DID_BAD_TARGET << 16));
-               }
-               return do_intr_end;
-       }
-
-       ESPLOG(("esp%d: Selection failure.\n", esp->esp_id));
-       printk("esp%d: Currently -- ", esp->esp_id);
-       esp_print_ireg(esp->ireg); printk(" ");
-       esp_print_statreg(esp->sreg); printk(" ");
-       esp_print_seqreg(esp->seqreg); printk("\n");
-       printk("esp%d: New -- ", esp->esp_id);
-       esp->sreg = sbus_readb(esp->eregs + ESP_STATUS);
-       esp->seqreg = sbus_readb(esp->eregs + ESP_SSTEP);
-       esp->ireg = sbus_readb(esp->eregs + ESP_INTRPT);
-       esp_print_ireg(esp->ireg); printk(" ");
-       esp_print_statreg(esp->sreg); printk(" ");
-       esp_print_seqreg(esp->seqreg); printk("\n");
-       ESPLOG(("esp%d: resetting bus\n", esp->esp_id));
-       return do_reset_bus; /* ugh... */
-}
-
-/* Continue reading bytes for msgin phase. */
-static int esp_do_msgincont(struct esp *esp)
-{
-       if (esp->ireg & ESP_INTR_BSERV) {
-               /* in the right phase too? */
-               if ((esp->sreg & ESP_STAT_PMASK) == ESP_MIP) {
-                       /* phew... */
-                       esp_cmd(esp, ESP_CMD_TI);
-                       esp_advance_phase(esp->current_SC, in_msgindone);
-                       return do_intr_end;
-               }
-
-               /* We changed phase but ESP shows bus service,
-                * in this case it is most likely that we, the
-                * hacker who has been up for 20hrs straight
-                * staring at the screen, drowned in coffee
-                * smelling like retched cigarette ashes
-                * have miscoded something..... so, try to
-                * recover as best we can.
-                */
-               ESPLOG(("esp%d: message in mis-carriage.\n", esp->esp_id));
-       }
-       esp_advance_phase(esp->current_SC, in_the_dark);
-       return do_phase_determine;
-}
-
-static int check_singlebyte_msg(struct esp *esp)
-{
-       esp->prevmsgin = esp->cur_msgin[0];
-       if (esp->cur_msgin[0] & 0x80) {
-               /* wheee... */
-               ESPLOG(("esp%d: target sends identify amidst phases\n",
-                       esp->esp_id));
-               esp_advance_phase(esp->current_SC, in_the_dark);
-               return 0;
-       } else if (((esp->cur_msgin[0] & 0xf0) == 0x20) ||
-                  (esp->cur_msgin[0] == EXTENDED_MESSAGE)) {
-               esp->msgin_len = 2;
-               esp_advance_phase(esp->current_SC, in_msgincont);
-               return 0;
-       }
-       esp_advance_phase(esp->current_SC, in_the_dark);
-       switch (esp->cur_msgin[0]) {
-       default:
-               /* We don't want to hear about it. */
-               ESPLOG(("esp%d: msg %02x which we don't know about\n", esp->esp_id,
-                       esp->cur_msgin[0]));
-               return MESSAGE_REJECT;
-
-       case NOP:
-               ESPLOG(("esp%d: target %d sends a nop\n", esp->esp_id,
-                       esp->current_SC->device->id));
-               return 0;
-
-       case RESTORE_POINTERS:
-               /* In this case we might also have to backup the
-                * "slow command" pointer.  It is rare to get such
-                * a save/restore pointer sequence so early in the
-                * bus transition sequences, but cover it.
-                */
-               if (esp->esp_slowcmd) {
-                       esp->esp_scmdleft = esp->current_SC->cmd_len;
-                       esp->esp_scmdp = &esp->current_SC->cmnd[0];
-               }
-               esp_restore_pointers(esp, esp->current_SC);
-               return 0;
-
-       case SAVE_POINTERS:
-               esp_save_pointers(esp, esp->current_SC);
-               return 0;
-
-       case COMMAND_COMPLETE:
-       case DISCONNECT:
-               /* Freeing the bus, let it go. */
-               esp->current_SC->SCp.phase = in_freeing;
-               return 0;
-
-       case MESSAGE_REJECT:
-               ESPMISC(("msg reject, "));
-               if (esp->prevmsgout == EXTENDED_MESSAGE) {
-                       struct esp_device *esp_dev = esp->current_SC->device->hostdata;
-
-                       /* Doesn't look like this target can
-                        * do synchronous or WIDE transfers.
-                        */
-                       ESPSDTR(("got reject, was trying nego, clearing sync/WIDE\n"));
-                       esp_dev->sync = 1;
-                       esp_dev->wide = 1;
-                       esp_dev->sync_min_period = 0;
-                       esp_dev->sync_max_offset = 0;
-                       return 0;
-               } else {
-                       ESPMISC(("not sync nego, sending ABORT\n"));
-                       return ABORT;
-               }
-       };
-}
-
-/* Target negotiates for synchronous transfers before we do, this
- * is legal although very strange.  What is even funnier is that
- * the SCSI2 standard specifically recommends against targets doing
- * this because so many initiators cannot cope with this occurring.
- */
-static int target_with_ants_in_pants(struct esp *esp,
-                                    struct scsi_cmnd *SCptr,
-                                    struct esp_device *esp_dev)
-{
-       if (esp_dev->sync || SCptr->device->borken) {
-               /* sorry, no can do */
-               ESPSDTR(("forcing to async, "));
-               build_sync_nego_msg(esp, 0, 0);
-               esp_dev->sync = 1;
-               esp->snip = 1;
-               ESPLOG(("esp%d: hoping for msgout\n", esp->esp_id));
-               esp_advance_phase(SCptr, in_the_dark);
-               return EXTENDED_MESSAGE;
-       }
-
-       /* Ok, we'll check them out... */
-       return 0;
-}
-
-static void sync_report(struct esp *esp)
-{
-       int msg3, msg4;
-       char *type;
-
-       msg3 = esp->cur_msgin[3];
-       msg4 = esp->cur_msgin[4];
-       if (msg4) {
-               int hz = 1000000000 / (msg3 * 4);
-               int integer = hz / 1000000;
-               int fraction = (hz - (integer * 1000000)) / 10000;
-               if ((esp->erev == fashme) &&
-                   (esp->config3[esp->current_SC->device->id] & ESP_CONFIG3_EWIDE)) {
-                       type = "FAST-WIDE";
-                       integer <<= 1;
-                       fraction <<= 1;
-               } else if ((msg3 * 4) < 200) {
-                       type = "FAST";
-               } else {
-                       type = "synchronous";
-               }
-
-               /* Do not transform this back into one big printk
-                * again, it triggers a bug in our sparc64-gcc272
-                * sibling call optimization.  -DaveM
-                */
-               ESPLOG((KERN_INFO "esp%d: target %d ",
-                       esp->esp_id, esp->current_SC->device->id));
-               ESPLOG(("[period %dns offset %d %d.%02dMHz ",
-                       (int) msg3 * 4, (int) msg4,
-                       integer, fraction));
-               ESPLOG(("%s SCSI%s]\n", type,
-                       (((msg3 * 4) < 200) ? "-II" : "")));
-       } else {
-               ESPLOG((KERN_INFO "esp%d: target %d asynchronous\n",
-                       esp->esp_id, esp->current_SC->device->id));
-       }
-}
-
-static int check_multibyte_msg(struct esp *esp)
-{
-       struct scsi_cmnd *SCptr = esp->current_SC;
-       struct esp_device *esp_dev = SCptr->device->hostdata;
-       u8 regval = 0;
-       int message_out = 0;
-
-       ESPSDTR(("chk multibyte msg: "));
-       if (esp->cur_msgin[2] == EXTENDED_SDTR) {
-               int period = esp->cur_msgin[3];
-               int offset = esp->cur_msgin[4];
-
-               ESPSDTR(("is sync nego response, "));
-               if (!esp->snip) {
-                       int rval;
-
-                       /* Target negotiates first! */
-                       ESPSDTR(("target jumps the gun, "));
-                       message_out = EXTENDED_MESSAGE; /* we must respond */
-                       rval = target_with_ants_in_pants(esp, SCptr, esp_dev);
-                       if (rval)
-                               return rval;
-               }
-
-               ESPSDTR(("examining sdtr, "));
-
-               /* Offset cannot be larger than ESP fifo size. */
-               if (offset > 15) {
-                       ESPSDTR(("offset too big %2x, ", offset));
-                       offset = 15;
-                       ESPSDTR(("sending back new offset\n"));
-                       build_sync_nego_msg(esp, period, offset);
-                       return EXTENDED_MESSAGE;
-               }
-
-               if (offset && period > esp->max_period) {
-                       /* Yeee, async for this slow device. */
-                       ESPSDTR(("period too long %2x, ", period));
-                       build_sync_nego_msg(esp, 0, 0);
-                       ESPSDTR(("hoping for msgout\n"));
-                       esp_advance_phase(esp->current_SC, in_the_dark);
-                       return EXTENDED_MESSAGE;
-               } else if (offset && period < esp->min_period) {
-                       ESPSDTR(("period too short %2x, ", period));
-                       period = esp->min_period;
-                       if (esp->erev > esp236)
-                               regval = 4;
-                       else
-                               regval = 5;
-               } else if (offset) {
-                       int tmp;
-
-                       ESPSDTR(("period is ok, "));
-                       tmp = esp->ccycle / 1000;
-                       regval = (((period << 2) + tmp - 1) / tmp);
-                       if (regval && ((esp->erev == fas100a ||
-                                       esp->erev == fas236  ||
-                                       esp->erev == fashme))) {
-                               if (period >= 50)
-                                       regval--;
-                       }
-               }
-
-               if (offset) {
-                       u8 bit;
-
-                       esp_dev->sync_min_period = (regval & 0x1f);
-                       esp_dev->sync_max_offset = (offset | esp->radelay);
-                       if (esp->erev == fas100a || esp->erev == fas236 || esp->erev == fashme) {
-                               if ((esp->erev == fas100a) || (esp->erev == fashme))
-                                       bit = ESP_CONFIG3_FAST;
-                               else
-                                       bit = ESP_CONFIG3_FSCSI;
-                               if (period < 50) {
-                                       /* On FAS366, if using fast-20 synchronous transfers
-                                        * we need to make sure the REQ/ACK assert/deassert
-                                        * control bits are clear.
-                                        */
-                                       if (esp->erev == fashme)
-                                               esp_dev->sync_max_offset &= ~esp->radelay;
-                                       esp->config3[SCptr->device->id] |= bit;
-                               } else {
-                                       esp->config3[SCptr->device->id] &= ~bit;
-                               }
-                               esp->prev_cfg3 = esp->config3[SCptr->device->id];
-                               sbus_writeb(esp->prev_cfg3, esp->eregs + ESP_CFG3);
-                       }
-                       esp->prev_soff = esp_dev->sync_max_offset;
-                       esp->prev_stp = esp_dev->sync_min_period;
-                       sbus_writeb(esp->prev_soff, esp->eregs + ESP_SOFF);
-                       sbus_writeb(esp->prev_stp, esp->eregs + ESP_STP);
-                       ESPSDTR(("soff=%2x stp=%2x cfg3=%2x\n",
-                                esp_dev->sync_max_offset,
-                                esp_dev->sync_min_period,
-                                esp->config3[SCptr->device->id]));
-
-                       esp->snip = 0;
-               } else if (esp_dev->sync_max_offset) {
-                       u8 bit;
-
-                       /* back to async mode */
-                       ESPSDTR(("unaccaptable sync nego, forcing async\n"));
-                       esp_dev->sync_max_offset = 0;
-                       esp_dev->sync_min_period = 0;
-                       esp->prev_soff = 0;
-                       esp->prev_stp = 0;
-                       sbus_writeb(esp->prev_soff, esp->eregs + ESP_SOFF);
-                       sbus_writeb(esp->prev_stp, esp->eregs + ESP_STP);
-                       if (esp->erev == fas100a || esp->erev == fas236 || esp->erev == fashme) {
-                               if ((esp->erev == fas100a) || (esp->erev == fashme))
-                                       bit = ESP_CONFIG3_FAST;
-                               else
-                                       bit = ESP_CONFIG3_FSCSI;
-                               esp->config3[SCptr->device->id] &= ~bit;
-                               esp->prev_cfg3 = esp->config3[SCptr->device->id];
-                               sbus_writeb(esp->prev_cfg3, esp->eregs + ESP_CFG3);
-                       }
-               }
-
-               sync_report(esp);
-
-               ESPSDTR(("chk multibyte msg: sync is known, "));
-               esp_dev->sync = 1;
-
-               if (message_out) {
-                       ESPLOG(("esp%d: sending sdtr back, hoping for msgout\n",
-                               esp->esp_id));
-                       build_sync_nego_msg(esp, period, offset);
-                       esp_advance_phase(SCptr, in_the_dark);
-                       return EXTENDED_MESSAGE;
-               }
-
-               ESPSDTR(("returning zero\n"));
-               esp_advance_phase(SCptr, in_the_dark); /* ...or else! */
-               return 0;
-       } else if (esp->cur_msgin[2] == EXTENDED_WDTR) {
-               int size = 8 << esp->cur_msgin[3];
-
-               esp->wnip = 0;
-               if (esp->erev != fashme) {
-                       ESPLOG(("esp%d: AIEEE wide msg received and not HME.\n",
-                               esp->esp_id));
-                       message_out = MESSAGE_REJECT;
-               } else if (size > 16) {
-                       ESPLOG(("esp%d: AIEEE wide transfer for %d size "
-                               "not supported.\n", esp->esp_id, size));
-                       message_out = MESSAGE_REJECT;
-               } else {
-                       /* Things look good; let's see what we got. */
-                       if (size == 16) {
-                               /* Set config 3 register for this target. */
-                               esp->config3[SCptr->device->id] |= ESP_CONFIG3_EWIDE;
-                       } else {
-                               /* Just make sure it was one byte sized. */
-                               if (size != 8) {
-                                       ESPLOG(("esp%d: Aieee, wide nego of %d size.\n",
-                                               esp->esp_id, size));
-                                       message_out = MESSAGE_REJECT;
-                                       goto finish;
-                               }
-                               /* Pure paranoia. */
-                               esp->config3[SCptr->device->id] &= ~(ESP_CONFIG3_EWIDE);
-                       }
-                       esp->prev_cfg3 = esp->config3[SCptr->device->id];
-                       sbus_writeb(esp->prev_cfg3, esp->eregs + ESP_CFG3);
-
-                       /* Regardless, next try for sync transfers. */
-                       build_sync_nego_msg(esp, esp->sync_defp, 15);
-                       esp_dev->sync = 1;
-                       esp->snip = 1;
-                       message_out = EXTENDED_MESSAGE;
-               }
-       } else if (esp->cur_msgin[2] == EXTENDED_MODIFY_DATA_POINTER) {
-               ESPLOG(("esp%d: rejecting modify data ptr msg\n", esp->esp_id));
-               message_out = MESSAGE_REJECT;
-       }
-finish:
-       esp_advance_phase(SCptr, in_the_dark);
-       return message_out;
-}
-
-static int esp_do_msgindone(struct esp *esp)
-{
-       struct scsi_cmnd *SCptr = esp->current_SC;
-       int message_out = 0, it = 0, rval;
-
-       rval = skipahead1(esp, SCptr, in_msgin, in_msgindone);
-       if (rval)
-               return rval;
-       if (SCptr->SCp.sent_command != in_status) {
-               if (!(esp->ireg & ESP_INTR_DC)) {
-                       if (esp->msgin_len && (esp->sreg & ESP_STAT_PERR)) {
-                               message_out = MSG_PARITY_ERROR;
-                               esp_cmd(esp, ESP_CMD_FLUSH);
-                       } else if (esp->erev != fashme &&
-                         (it = (sbus_readb(esp->eregs + ESP_FFLAGS) & ESP_FF_FBYTES)) != 1) {
-                               /* We certainly dropped the ball somewhere. */
-                               message_out = INITIATOR_ERROR;
-                               esp_cmd(esp, ESP_CMD_FLUSH);
-                       } else if (!esp->msgin_len) {
-                               if (esp->erev == fashme)
-                                       it = esp->hme_fifo_workaround_buffer[0];
-                               else
-                                       it = sbus_readb(esp->eregs + ESP_FDATA);
-                               esp_advance_phase(SCptr, in_msgincont);
-                       } else {
-                               /* it is ok and we want it */
-                               if (esp->erev == fashme)
-                                       it = esp->cur_msgin[esp->msgin_ctr] =
-                                               esp->hme_fifo_workaround_buffer[0];
-                               else
-                                       it = esp->cur_msgin[esp->msgin_ctr] =
-                                               sbus_readb(esp->eregs + ESP_FDATA);
-                               esp->msgin_ctr++;
-                       }
-               } else {
-                       esp_advance_phase(SCptr, in_the_dark);
-                       return do_work_bus;
-               }
-       } else {
-               it = esp->cur_msgin[0];
-       }
-       if (!message_out && esp->msgin_len) {
-               if (esp->msgin_ctr < esp->msgin_len) {
-                       esp_advance_phase(SCptr, in_msgincont);
-               } else if (esp->msgin_len == 1) {
-                       message_out = check_singlebyte_msg(esp);
-               } else if (esp->msgin_len == 2) {
-                       if (esp->cur_msgin[0] == EXTENDED_MESSAGE) {
-                               if ((it + 2) >= 15) {
-                                       message_out = MESSAGE_REJECT;
-                               } else {
-                                       esp->msgin_len = (it + 2);
-                                       esp_advance_phase(SCptr, in_msgincont);
-                               }
-                       } else {
-                               message_out = MESSAGE_REJECT; /* foo on you */
-                       }
-               } else {
-                       message_out = check_multibyte_msg(esp);
-               }
-       }
-       if (message_out < 0) {
-               return -message_out;
-       } else if (message_out) {
-               if (((message_out != 1) &&
-                    ((message_out < 0x20) || (message_out & 0x80))))
-                       esp->msgout_len = 1;
-               esp->cur_msgout[0] = message_out;
-               esp_cmd(esp, ESP_CMD_SATN);
-               esp_advance_phase(SCptr, in_the_dark);
-               esp->msgin_len = 0;
-       }
-       esp->sreg = sbus_readb(esp->eregs + ESP_STATUS);
-       esp->sreg &= ~(ESP_STAT_INTR);
-       if ((esp->sreg & (ESP_STAT_PMSG|ESP_STAT_PCD)) == (ESP_STAT_PMSG|ESP_STAT_PCD))
-               esp_cmd(esp, ESP_CMD_MOK);
-       if ((SCptr->SCp.sent_command == in_msgindone) &&
-           (SCptr->SCp.phase == in_freeing))
-               return esp_do_freebus(esp);
-       return do_intr_end;
-}
-
-static int esp_do_cmdbegin(struct esp *esp)
-{
-       struct scsi_cmnd *SCptr = esp->current_SC;
-
-       esp_advance_phase(SCptr, in_cmdend);
-       if (esp->erev == fashme) {
-               u32 tmp = sbus_readl(esp->dregs + DMA_CSR);
-               int i;
-
-               for (i = 0; i < esp->esp_scmdleft; i++)
-                       esp->esp_command[i] = *esp->esp_scmdp++;
-               esp->esp_scmdleft = 0;
-               esp_cmd(esp, ESP_CMD_FLUSH);
-               esp_setcount(esp->eregs, i, 1);
-               esp_cmd(esp, (ESP_CMD_DMA | ESP_CMD_TI));
-               tmp |= (DMA_SCSI_DISAB | DMA_ENABLE);
-               tmp &= ~(DMA_ST_WRITE);
-               sbus_writel(i, esp->dregs + DMA_COUNT);
-               sbus_writel(esp->esp_command_dvma, esp->dregs + DMA_ADDR);
-               sbus_writel(tmp, esp->dregs + DMA_CSR);
-       } else {
-               u8 tmp;
-
-               esp_cmd(esp, ESP_CMD_FLUSH);
-               tmp = *esp->esp_scmdp++;
-               esp->esp_scmdleft--;
-               sbus_writeb(tmp, esp->eregs + ESP_FDATA);
-               esp_cmd(esp, ESP_CMD_TI);
-       }
-       return do_intr_end;
-}
-
-static int esp_do_cmddone(struct esp *esp)
-{
-       if (esp->erev == fashme)
-               dma_invalidate(esp);
-       else
-               esp_cmd(esp, ESP_CMD_NULL);
-
-       if (esp->ireg & ESP_INTR_BSERV) {
-               esp_advance_phase(esp->current_SC, in_the_dark);
-               return esp_do_phase_determine(esp);
-       }
-
-       ESPLOG(("esp%d: in do_cmddone() but didn't get BSERV interrupt.\n",
-               esp->esp_id));
-       return do_reset_bus;
-}
-
-static int esp_do_msgout(struct esp *esp)
-{
-       esp_cmd(esp, ESP_CMD_FLUSH);
-       switch (esp->msgout_len) {
-       case 1:
-               if (esp->erev == fashme)
-                       hme_fifo_push(esp, &esp->cur_msgout[0], 1);
-               else
-                       sbus_writeb(esp->cur_msgout[0], esp->eregs + ESP_FDATA);
-
-               esp_cmd(esp, ESP_CMD_TI);
-               break;
-
-       case 2:
-               esp->esp_command[0] = esp->cur_msgout[0];
-               esp->esp_command[1] = esp->cur_msgout[1];
-
-               if (esp->erev == fashme) {
-                       hme_fifo_push(esp, &esp->cur_msgout[0], 2);
-                       esp_cmd(esp, ESP_CMD_TI);
-               } else {
-                       dma_setup(esp, esp->esp_command_dvma, 2, 0);
-                       esp_setcount(esp->eregs, 2, 0);
-                       esp_cmd(esp, ESP_CMD_DMA | ESP_CMD_TI);
-               }
-               break;
-
-       case 4:
-               esp->esp_command[0] = esp->cur_msgout[0];
-               esp->esp_command[1] = esp->cur_msgout[1];
-               esp->esp_command[2] = esp->cur_msgout[2];
-               esp->esp_command[3] = esp->cur_msgout[3];
-               esp->snip = 1;
-
-               if (esp->erev == fashme) {
-                       hme_fifo_push(esp, &esp->cur_msgout[0], 4);
-                       esp_cmd(esp, ESP_CMD_TI);
-               } else {
-                       dma_setup(esp, esp->esp_command_dvma, 4, 0);
-                       esp_setcount(esp->eregs, 4, 0);
-                       esp_cmd(esp, ESP_CMD_DMA | ESP_CMD_TI);
-               }
-               break;
-
-       case 5:
-               esp->esp_command[0] = esp->cur_msgout[0];
-               esp->esp_command[1] = esp->cur_msgout[1];
-               esp->esp_command[2] = esp->cur_msgout[2];
-               esp->esp_command[3] = esp->cur_msgout[3];
-               esp->esp_command[4] = esp->cur_msgout[4];
-               esp->snip = 1;
-
-               if (esp->erev == fashme) {
-                       hme_fifo_push(esp, &esp->cur_msgout[0], 5);
-                       esp_cmd(esp, ESP_CMD_TI);
-               } else {
-                       dma_setup(esp, esp->esp_command_dvma, 5, 0);
-                       esp_setcount(esp->eregs, 5, 0);
-                       esp_cmd(esp, ESP_CMD_DMA | ESP_CMD_TI);
-               }
-               break;
-
-       default:
-               /* whoops */
-               ESPMISC(("bogus msgout sending NOP\n"));
-               esp->cur_msgout[0] = NOP;
-
-               if (esp->erev == fashme) {
-                       hme_fifo_push(esp, &esp->cur_msgout[0], 1);
-               } else {
-                       sbus_writeb(esp->cur_msgout[0], esp->eregs + ESP_FDATA);
-               }
-
-               esp->msgout_len = 1;
-               esp_cmd(esp, ESP_CMD_TI);
-               break;
-       };
-
-       esp_advance_phase(esp->current_SC, in_msgoutdone);
-       return do_intr_end;
-}
-
-static int esp_do_msgoutdone(struct esp *esp)
-{
-       if (esp->msgout_len > 1) {
-               /* XXX HME/FAS ATN deassert workaround required,
-                * XXX no DMA flushing, only possible ESP_CMD_FLUSH
-                * XXX to kill the fifo.
-                */
-               if (esp->erev != fashme) {
-                       u32 tmp;
-
-                       while ((tmp = sbus_readl(esp->dregs + DMA_CSR)) & DMA_PEND_READ)
-                               udelay(1);
-                       tmp &= ~DMA_ENABLE;
-                       sbus_writel(tmp, esp->dregs + DMA_CSR);
-                       dma_invalidate(esp);
-               } else {
-                       esp_cmd(esp, ESP_CMD_FLUSH);
-               }
-       }
-       if (!(esp->ireg & ESP_INTR_DC)) {
-               if (esp->erev != fashme)
-                       esp_cmd(esp, ESP_CMD_NULL);
-               switch (esp->sreg & ESP_STAT_PMASK) {
-               case ESP_MOP:
-                       /* whoops, parity error */
-                       ESPLOG(("esp%d: still in msgout, parity error assumed\n",
-                               esp->esp_id));
-                       if (esp->msgout_len > 1)
-                               esp_cmd(esp, ESP_CMD_SATN);
-                       esp_advance_phase(esp->current_SC, in_msgout);
-                       return do_work_bus;
-
-               case ESP_DIP:
-                       break;
-
-               default:
-                       /* Happy Meal fifo is touchy... */
-                       if ((esp->erev != fashme) &&
-                           !fcount(esp) &&
-                           !(((struct esp_device *)esp->current_SC->device->hostdata)->sync_max_offset))
-                               esp_cmd(esp, ESP_CMD_FLUSH);
-                       break;
-
-               };
-       } else {
-               ESPLOG(("esp%d: disconnect, resetting bus\n", esp->esp_id));
-               return do_reset_bus;
-       }
-
-       /* If we sent out a synchronous negotiation message, update
-        * our state.
-        */
-       if (esp->cur_msgout[2] == EXTENDED_MESSAGE &&
-           esp->cur_msgout[4] == EXTENDED_SDTR) {
-               esp->snip = 1; /* anal retentiveness... */
-       }
-
-       esp->prevmsgout = esp->cur_msgout[0];
-       esp->msgout_len = 0;
-       esp_advance_phase(esp->current_SC, in_the_dark);
-       return esp_do_phase_determine(esp);
-}
-
-static int esp_bus_unexpected(struct esp *esp)
-{
-       ESPLOG(("esp%d: command in weird state %2x\n",
-               esp->esp_id, esp->current_SC->SCp.phase));
-       return do_reset_bus;
-}
-
-static espfunc_t bus_vector[] = {
-       esp_do_data_finale,
-       esp_do_data_finale,
-       esp_bus_unexpected,
-       esp_do_msgin,
-       esp_do_msgincont,
-       esp_do_msgindone,
-       esp_do_msgout,
-       esp_do_msgoutdone,
-       esp_do_cmdbegin,
-       esp_do_cmddone,
-       esp_do_status,
-       esp_do_freebus,
-       esp_do_phase_determine,
-       esp_bus_unexpected,
-       esp_bus_unexpected,
-       esp_bus_unexpected,
-};
-
-/* This is the second tier in our dual-level SCSI state machine. */
-static int esp_work_bus(struct esp *esp)
-{
-       struct scsi_cmnd *SCptr = esp->current_SC;
-       unsigned int phase;
-
-       ESPBUS(("esp_work_bus: "));
-       if (!SCptr) {
-               ESPBUS(("reconnect\n"));
-               return esp_do_reconnect(esp);
-       }
-       phase = SCptr->SCp.phase;
-       if ((phase & 0xf0) == in_phases_mask)
-               return bus_vector[(phase & 0x0f)](esp);
-       else if ((phase & 0xf0) == in_slct_mask)
-               return esp_select_complete(esp);
-       else
-               return esp_bus_unexpected(esp);
-}
-
-static espfunc_t isvc_vector[] = {
-       NULL,
-       esp_do_phase_determine,
-       esp_do_resetbus,
-       esp_finish_reset,
-       esp_work_bus
-};
-
-/* Main interrupt handler for an esp adapter. */
-static void esp_handle(struct esp *esp)
-{
-       struct scsi_cmnd *SCptr;
-       int what_next = do_intr_end;
-
-       SCptr = esp->current_SC;
-
-       /* Check for errors. */
-       esp->sreg = sbus_readb(esp->eregs + ESP_STATUS);
-       esp->sreg &= (~ESP_STAT_INTR);
-       if (esp->erev == fashme) {
-               esp->sreg2 = sbus_readb(esp->eregs + ESP_STATUS2);
-               esp->seqreg = (sbus_readb(esp->eregs + ESP_SSTEP) & ESP_STEP_VBITS);
-       }
-
-       if (esp->sreg & (ESP_STAT_SPAM)) {
-               /* Gross error, could be due to one of:
-                *
-                * - top of fifo overwritten, could be because
-                *   we tried to do a synchronous transfer with
-                *   an offset greater than ESP fifo size
-                *
-                * - top of command register overwritten
-                *
-                * - DMA setup to go in one direction, SCSI
-                *   bus points in the other, whoops
-                *
-                * - weird phase change during asynchronous
-                *   data phase while we are initiator
-                */
-               ESPLOG(("esp%d: Gross error sreg=%2x\n", esp->esp_id, esp->sreg));
-
-               /* If a command is live on the bus we cannot safely
-                * reset the bus, so we'll just let the pieces fall
-                * where they may.  Here we are hoping that the
-                * target will be able to cleanly go away soon
-                * so we can safely reset things.
-                */
-               if (!SCptr) {
-                       ESPLOG(("esp%d: No current cmd during gross error, "
-                               "resetting bus\n", esp->esp_id));
-                       what_next = do_reset_bus;
-                       goto state_machine;
-               }
-       }
-
-       if (sbus_readl(esp->dregs + DMA_CSR) & DMA_HNDL_ERROR) {
-               /* A DMA gate array error.  Here we must
-                * be seeing one of two things.  Either the
-                * virtual to physical address translation
-                * on the SBUS could not occur, else the
-                * translation it did get pointed to a bogus
-                * page.  Ho hum...
-                */
-               ESPLOG(("esp%d: DMA error %08x\n", esp->esp_id,
-                       sbus_readl(esp->dregs + DMA_CSR)));
-
-               /* DMA gate array itself must be reset to clear the
-                * error condition.
-                */
-               esp_reset_dma(esp);
-
-               what_next = do_reset_bus;
-               goto state_machine;
-       }
-
-       esp->ireg = sbus_readb(esp->eregs + ESP_INTRPT);   /* Unlatch intr reg */
-
-       if (esp->erev == fashme) {
-               /* This chip is really losing. */
-               ESPHME(("HME["));
-
-               ESPHME(("sreg2=%02x,", esp->sreg2));
-               /* Must latch fifo before reading the interrupt
-                * register else garbage ends up in the FIFO
-                * which confuses the driver utterly.
-                */
-               if (!(esp->sreg2 & ESP_STAT2_FEMPTY) ||
-                   (esp->sreg2 & ESP_STAT2_F1BYTE)) {
-                       ESPHME(("fifo_workaround]"));
-                       hme_fifo_read(esp);
-               } else {
-                       ESPHME(("no_fifo_workaround]"));
-               }
-       }
-
-       /* No current cmd is only valid at this point when there are
-        * commands off the bus or we are trying a reset.
-        */
-       if (!SCptr && !esp->disconnected_SC && !(esp->ireg & ESP_INTR_SR)) {
-               /* Panic is safe, since current_SC is null. */
-               ESPLOG(("esp%d: no command in esp_handle()\n", esp->esp_id));
-               panic("esp_handle: current_SC == penguin within interrupt!");
-       }
-
-       if (esp->ireg & (ESP_INTR_IC)) {
-               /* Illegal command fed to ESP.  Outside of obvious
-                * software bugs that could cause this, there is
-                * a condition with esp100 where we can confuse the
-                * ESP into an erroneous illegal command interrupt
-                * because it does not scrape the FIFO properly
-                * for reselection.  See esp100_reconnect_hwbug()
-                * to see how we try very hard to avoid this.
-                */
-               ESPLOG(("esp%d: invalid command\n", esp->esp_id));
-
-               esp_dump_state(esp);
-
-               if (SCptr != NULL) {
-                       /* Devices with very buggy firmware can drop BSY
-                        * during a scatter list interrupt when using sync
-                        * mode transfers.  We continue the transfer as
-                        * expected, the target drops the bus, the ESP
-                        * gets confused, and we get a illegal command
-                        * interrupt because the bus is in the disconnected
-                        * state now and ESP_CMD_TI is only allowed when
-                        * a nexus is alive on the bus.
-                        */
-                       ESPLOG(("esp%d: Forcing async and disabling disconnect for "
-                               "target %d\n", esp->esp_id, SCptr->device->id));
-                       SCptr->device->borken = 1; /* foo on you */
-               }
-
-               what_next = do_reset_bus;
-       } else if (!(esp->ireg & ~(ESP_INTR_FDONE | ESP_INTR_BSERV | ESP_INTR_DC))) {
-               if (SCptr) {
-                       unsigned int phase = SCptr->SCp.phase;
-
-                       if (phase & in_phases_mask) {
-                               what_next = esp_work_bus(esp);
-                       } else if (phase & in_slct_mask) {
-                               what_next = esp_select_complete(esp);
-                       } else {
-                               ESPLOG(("esp%d: interrupt for no good reason...\n",
-                                       esp->esp_id));
-                               what_next = do_intr_end;
-                       }
-               } else {
-                       ESPLOG(("esp%d: BSERV or FDONE or DC while SCptr==NULL\n",
-                               esp->esp_id));
-                       what_next = do_reset_bus;
-               }
-       } else if (esp->ireg & ESP_INTR_SR) {
-               ESPLOG(("esp%d: SCSI bus reset interrupt\n", esp->esp_id));
-               what_next = do_reset_complete;
-       } else if (esp->ireg & (ESP_INTR_S | ESP_INTR_SATN)) {
-               ESPLOG(("esp%d: AIEEE we have been selected by another initiator!\n",
-                       esp->esp_id));
-               what_next = do_reset_bus;
-       } else if (esp->ireg & ESP_INTR_RSEL) {
-               if (SCptr == NULL) {
-                       /* This is ok. */
-                       what_next = esp_do_reconnect(esp);
-               } else if (SCptr->SCp.phase & in_slct_mask) {
-                       /* Only selection code knows how to clean
-                        * up properly.
-                        */
-                       ESPDISC(("Reselected during selection attempt\n"));
-                       what_next = esp_select_complete(esp);
-               } else {
-                       ESPLOG(("esp%d: Reselected while bus is busy\n",
-                               esp->esp_id));
-                       what_next = do_reset_bus;
-               }
-       }
-
-       /* This is tier-one in our dual level SCSI state machine. */
-state_machine:
-       while (what_next != do_intr_end) {
-               if (what_next >= do_phase_determine &&
-                   what_next < do_intr_end) {
-                       what_next = isvc_vector[what_next](esp);
-               } else {
-                       /* state is completely lost ;-( */
-                       ESPLOG(("esp%d: interrupt engine loses state, resetting bus\n",
-                               esp->esp_id));
-                       what_next = do_reset_bus;
-               }
-       }
-}
-
-/* Service only the ESP described by dev_id. */
-static irqreturn_t esp_intr(int irq, void *dev_id)
-{
-       struct esp *esp = dev_id;
-       unsigned long flags;
-
-       spin_lock_irqsave(esp->ehost->host_lock, flags);
-       if (ESP_IRQ_P(esp->dregs)) {
-               ESP_INTSOFF(esp->dregs);
-
-               ESPIRQ(("I[%d:%d](", smp_processor_id(), esp->esp_id));
-               esp_handle(esp);
-               ESPIRQ((")"));
-
-               ESP_INTSON(esp->dregs);
-       }
-       spin_unlock_irqrestore(esp->ehost->host_lock, flags);
-
-       return IRQ_HANDLED;
-}
-
-static int esp_slave_alloc(struct scsi_device *SDptr)
-{
-       struct esp_device *esp_dev =
-               kmalloc(sizeof(struct esp_device), GFP_ATOMIC);
-
-       if (!esp_dev)
-               return -ENOMEM;
-       memset(esp_dev, 0, sizeof(struct esp_device));
-       SDptr->hostdata = esp_dev;
-       return 0;
-}
-
-static void esp_slave_destroy(struct scsi_device *SDptr)
-{
-       struct esp *esp = (struct esp *) SDptr->host->hostdata;
-
-       esp->targets_present &= ~(1 << SDptr->id);
-       kfree(SDptr->hostdata);
-       SDptr->hostdata = NULL;
-}
-
-static struct scsi_host_template esp_template = {
-       .module                 = THIS_MODULE,
-       .name                   = "esp",
-       .info                   = esp_info,
-       .slave_alloc            = esp_slave_alloc,
-       .slave_destroy          = esp_slave_destroy,
-       .queuecommand           = esp_queue,
-       .eh_abort_handler       = esp_abort,
-       .eh_bus_reset_handler   = esp_reset,
-       .can_queue              = 7,
-       .this_id                = 7,
-       .sg_tablesize           = SG_ALL,
-       .cmd_per_lun            = 1,
-       .use_clustering         = ENABLE_CLUSTERING,
-       .proc_name              = "esp",
-       .proc_info              = esp_proc_info,
-};
-
-#ifndef CONFIG_SUN4
-static struct of_device_id esp_match[] = {
-       {
-               .name = "SUNW,esp",
-               .data = &esp_template,
-       },
-       {
-               .name = "SUNW,fas",
-               .data = &esp_template,
-       },
-       {
-               .name = "esp",
-               .data = &esp_template,
-       },
-       {},
-};
-MODULE_DEVICE_TABLE(of, esp_match);
-
-static struct of_platform_driver esp_sbus_driver = {
-       .name           = "esp",
-       .match_table    = esp_match,
-       .probe          = esp_sbus_probe,
-       .remove         = __devexit_p(esp_sbus_remove),
-};
-#endif
-
-static int __init esp_init(void)
-{
-#ifdef CONFIG_SUN4
-       return esp_sun4_probe(&esp_template);
-#else
-       return of_register_driver(&esp_sbus_driver, &sbus_bus_type);
-#endif
-}
-
-static void __exit esp_exit(void)
-{
-#ifdef CONFIG_SUN4
-       esp_sun4_remove();
-#else
-       of_unregister_driver(&esp_sbus_driver);
-#endif
-}
-
-MODULE_DESCRIPTION("ESP Sun SCSI driver");
-MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
-MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_VERSION);
-
-module_init(esp_init);
-module_exit(esp_exit);
diff --git a/drivers/scsi/esp.h b/drivers/scsi/esp.h
deleted file mode 100644 (file)
index a98cda9..0000000
+++ /dev/null
@@ -1,406 +0,0 @@
-/* $Id: esp.h,v 1.29 2001/12/11 04:55:47 davem Exp $
- * esp.h:  Defines and structures for the Sparc ESP (Enhanced SCSI
- *         Processor) driver under Linux.
- *
- * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
- */
-
-#ifndef _SPARC_ESP_H
-#define _SPARC_ESP_H
-
-/* For dvma controller register definitions. */
-#include <asm/dma.h>
-
-/* The ESP SCSI controllers have their register sets in three
- * "classes":
- *
- * 1) Registers which are both read and write.
- * 2) Registers which are read only.
- * 3) Registers which are write only.
- *
- * Yet, they all live within the same IO space.
- */
-
-/* All the ESP registers are one byte each and are accessed longwords
- * apart with a big-endian ordering to the bytes.
- */
-                                       /* Access    Description              Offset */
-#define ESP_TCLOW      0x00UL          /* rw  Low bits of the transfer count 0x00   */
-#define ESP_TCMED      0x04UL          /* rw  Mid bits of the transfer count 0x04   */
-#define ESP_FDATA      0x08UL          /* rw  FIFO data bits                 0x08   */
-#define ESP_CMD                0x0cUL          /* rw  SCSI command bits              0x0c   */
-#define ESP_STATUS     0x10UL          /* ro  ESP status register            0x10   */
-#define ESP_BUSID      ESP_STATUS      /* wo  Bus ID for select/reselect     0x10   */
-#define ESP_INTRPT     0x14UL          /* ro  Kind of interrupt              0x14   */
-#define ESP_TIMEO      ESP_INTRPT      /* wo  Timeout value for select/resel 0x14   */
-#define ESP_SSTEP      0x18UL          /* ro  Sequence step register         0x18   */
-#define ESP_STP                ESP_SSTEP       /* wo  Transfer period per sync       0x18   */
-#define ESP_FFLAGS     0x1cUL          /* ro  Bits of current FIFO info      0x1c   */
-#define ESP_SOFF       ESP_FFLAGS      /* wo  Sync offset                    0x1c   */
-#define ESP_CFG1       0x20UL          /* rw  First configuration register   0x20   */
-#define ESP_CFACT      0x24UL          /* wo  Clock conversion factor        0x24   */
-#define ESP_STATUS2    ESP_CFACT       /* ro  HME status2 register           0x24   */
-#define ESP_CTEST      0x28UL          /* wo  Chip test register             0x28   */
-#define ESP_CFG2       0x2cUL          /* rw  Second configuration register  0x2c   */
-#define ESP_CFG3       0x30UL          /* rw  Third configuration register   0x30   */
-#define ESP_TCHI       0x38UL          /* rw  High bits of transfer count    0x38   */
-#define ESP_UID                ESP_TCHI        /* ro  Unique ID code                 0x38   */
-#define FAS_RLO                ESP_TCHI        /* rw  HME extended counter           0x38   */
-#define ESP_FGRND      0x3cUL          /* rw  Data base for fifo             0x3c   */
-#define FAS_RHI                ESP_FGRND       /* rw  HME extended counter           0x3c   */
-#define ESP_REG_SIZE   0x40UL
-
-/* Various revisions of the ESP board. */
-enum esp_rev {
-       esp100     = 0x00,  /* NCR53C90 - very broken */
-       esp100a    = 0x01,  /* NCR53C90A */
-       esp236     = 0x02,
-       fas236     = 0x03,
-       fas100a    = 0x04,
-       fast       = 0x05,
-       fashme     = 0x06,
-       espunknown = 0x07
-};
-
-/* We allocate one of these for each scsi device and attach it to
- * SDptr->hostdata for use in the driver
- */
-struct esp_device {
-  unsigned char sync_min_period;
-  unsigned char sync_max_offset;
-  unsigned sync:1;
-  unsigned wide:1;
-  unsigned disconnect:1;
-};
-
-struct scsi_cmnd;
-
-/* We get one of these for each ESP probed. */
-struct esp {
-       void __iomem            *eregs;         /* ESP controller registers */
-       void __iomem            *dregs;         /* DMA controller registers */
-       struct sbus_dma         *dma;           /* DMA controller sw state */
-       struct Scsi_Host        *ehost;         /* Backpointer to SCSI Host */
-       struct sbus_dev         *sdev;          /* Pointer to SBus entry */
-
-       /* ESP Configuration Registers */
-       u8                      config1;        /* Copy of the 1st config register */
-       u8                      config2;        /* Copy of the 2nd config register */
-       u8                      config3[16];    /* Copy of the 3rd config register */
-
-       /* The current command we are sending to the ESP chip.  This esp_command
-        * ptr needs to be mapped in DVMA area so we can send commands and read
-        * from the ESP fifo without burning precious CPU cycles.  Programmed I/O
-        * sucks when we have the DVMA to do it for us.  The ESP is stupid and will
-        * only send out 6, 10, and 12 byte SCSI commands, others we need to send
-        * one byte at a time.  esp_slowcmd being set says that we are doing one
-        * of the command types ESP doesn't understand, esp_scmdp keeps track of
-        * which byte we are sending, esp_scmdleft says how many bytes to go.
-        */
-       volatile u8             *esp_command;    /* Location of command (CPU view)  */
-       __u32                   esp_command_dvma;/* Location of command (DVMA view) */
-       unsigned char           esp_clen;        /* Length of this command */
-       unsigned char           esp_slowcmd;
-       unsigned char           *esp_scmdp;
-       unsigned char           esp_scmdleft;
-
-       /* The following are used to determine the cause of an IRQ. Upon every
-        * IRQ entry we synchronize these with the hardware registers.
-        */
-       u8                      ireg;           /* Copy of ESP interrupt register */
-       u8                      sreg;           /* Copy of ESP status register */
-       u8                      seqreg;         /* Copy of ESP sequence step register */
-       u8                      sreg2;          /* Copy of HME status2 register */
-
-       /* To save register writes to the ESP, which can be expensive, we
-        * keep track of the previous value that various registers had for
-        * the last target we connected to.  If they are the same for the
-        * current target, we skip the register writes as they are not needed.
-        */
-       u8                      prev_soff, prev_stp;
-       u8                      prev_cfg3, __cache_pad;
-
-       /* We also keep a cache of the previous FAS/HME DMA CSR register value.  */
-       u32                     prev_hme_dmacsr;
-
-       /* The HME is the biggest piece of shit I have ever seen. */
-       u8                      hme_fifo_workaround_buffer[16 * 2];
-       u8                      hme_fifo_workaround_count;
-
-       /* For each target we keep track of save/restore data
-        * pointer information.  This needs to be updated majorly
-        * when we add support for tagged queueing.  -DaveM
-        */
-       struct esp_pointers {
-               char                    *saved_ptr;
-               struct scatterlist      *saved_buffer;
-               int                     saved_this_residual;
-               int                     saved_buffers_residual;
-       } data_pointers[16] /*XXX [MAX_TAGS_PER_TARGET]*/;
-
-       /* Clock periods, frequencies, synchronization, etc. */
-       unsigned int            cfreq;          /* Clock frequency in HZ */
-       unsigned int            cfact;          /* Clock conversion factor */
-       unsigned int            raw_cfact;      /* Raw copy from probing */
-       unsigned int            ccycle;         /* One ESP clock cycle */
-       unsigned int            ctick;          /* One ESP clock time */
-       unsigned int            radelay;        /* FAST chip req/ack delay */
-       unsigned int            neg_defp;       /* Default negotiation period */
-       unsigned int            sync_defp;      /* Default sync transfer period */
-       unsigned int            max_period;     /* longest our period can be */
-       unsigned int            min_period;     /* shortest period we can withstand */
-
-       struct esp              *next;          /* Next ESP we probed or NULL */
-       char                    prom_name[64];  /* Name of ESP device from prom */
-       int                     prom_node;      /* Prom node where ESP found */
-       int                     esp_id;         /* Unique per-ESP ID number */
-
-       /* For slow to medium speed input clock rates we shoot for 5mb/s,
-        * but for high input clock rates we try to do 10mb/s although I
-        * don't think a transfer can even run that fast with an ESP even
-        * with DMA2 scatter gather pipelining.
-        */
-#define SYNC_DEFP_SLOW            0x32   /* 5mb/s  */
-#define SYNC_DEFP_FAST            0x19   /* 10mb/s */
-
-       unsigned int            snip;           /* Sync. negotiation in progress */
-       unsigned int            wnip;           /* WIDE negotiation in progress */
-       unsigned int            targets_present;/* targets spoken to before */
-
-       int             current_transfer_size;  /* Set at beginning of data dma */
-
-       u8                      espcmdlog[32];  /* Log of current esp cmds sent. */
-       u8                      espcmdent;      /* Current entry in esp cmd log. */
-
-       /* Misc. info about this ESP */
-       enum esp_rev            erev;           /* ESP revision */
-       int                     irq;            /* SBus IRQ for this ESP */
-       int                     scsi_id;        /* Who am I as initiator? */
-       int                     scsi_id_mask;   /* Bitmask of 'me'. */
-       int                     diff;           /* Differential SCSI bus? */
-       int                     bursts;         /* Burst sizes our DVMA supports */
-
-       /* Our command queues, only one cmd lives in the current_SC queue. */
-       struct scsi_cmnd        *issue_SC;      /* Commands to be issued */
-       struct scsi_cmnd        *current_SC;    /* Who is currently working the bus */
-       struct scsi_cmnd        *disconnected_SC;/* Commands disconnected from the bus */
-
-       /* Message goo */
-       u8                      cur_msgout[16];
-       u8                      cur_msgin[16];
-       u8                      prevmsgout, prevmsgin;
-       u8                      msgout_len, msgin_len;
-       u8                      msgout_ctr, msgin_ctr;
-
-       /* States that we cannot keep in the per cmd structure because they
-        * cannot be assosciated with any specific command.
-        */
-       u8                      resetting_bus;
-       wait_queue_head_t       reset_queue;
-};
-
-/* Bitfield meanings for the above registers. */
-
-/* ESP config reg 1, read-write, found on all ESP chips */
-#define ESP_CONFIG1_ID        0x07             /* My BUS ID bits */
-#define ESP_CONFIG1_CHTEST    0x08             /* Enable ESP chip tests */
-#define ESP_CONFIG1_PENABLE   0x10             /* Enable parity checks */
-#define ESP_CONFIG1_PARTEST   0x20             /* Parity test mode enabled? */
-#define ESP_CONFIG1_SRRDISAB  0x40             /* Disable SCSI reset reports */
-#define ESP_CONFIG1_SLCABLE   0x80             /* Enable slow cable mode */
-
-/* ESP config reg 2, read-write, found only on esp100a+esp200+esp236 chips */
-#define ESP_CONFIG2_DMAPARITY 0x01             /* enable DMA Parity (200,236) */
-#define ESP_CONFIG2_REGPARITY 0x02             /* enable reg Parity (200,236) */
-#define ESP_CONFIG2_BADPARITY 0x04             /* Bad parity target abort  */
-#define ESP_CONFIG2_SCSI2ENAB 0x08             /* Enable SCSI-2 features (tmode only) */
-#define ESP_CONFIG2_HI        0x10             /* High Impedance DREQ ???  */
-#define ESP_CONFIG2_HMEFENAB  0x10             /* HME features enable */
-#define ESP_CONFIG2_BCM       0x20             /* Enable byte-ctrl (236)   */
-#define ESP_CONFIG2_DISPINT   0x20             /* Disable pause irq (hme) */
-#define ESP_CONFIG2_FENAB     0x40             /* Enable features (fas100,esp216)      */
-#define ESP_CONFIG2_SPL       0x40             /* Enable status-phase latch (esp236)   */
-#define ESP_CONFIG2_MKDONE    0x40             /* HME magic feature */
-#define ESP_CONFIG2_HME32     0x80             /* HME 32 extended */
-#define ESP_CONFIG2_MAGIC     0xe0             /* Invalid bits... */
-
-/* ESP config register 3 read-write, found only esp236+fas236+fas100a+hme chips */
-#define ESP_CONFIG3_FCLOCK    0x01             /* FAST SCSI clock rate (esp100a/hme) */
-#define ESP_CONFIG3_TEM       0x01             /* Enable thresh-8 mode (esp/fas236)  */
-#define ESP_CONFIG3_FAST      0x02             /* Enable FAST SCSI     (esp100a/hme) */
-#define ESP_CONFIG3_ADMA      0x02             /* Enable alternate-dma (esp/fas236)  */
-#define ESP_CONFIG3_TENB      0x04             /* group2 SCSI2 support (esp100a/hme) */
-#define ESP_CONFIG3_SRB       0x04             /* Save residual byte   (esp/fas236)  */
-#define ESP_CONFIG3_TMS       0x08             /* Three-byte msg's ok  (esp100a/hme) */
-#define ESP_CONFIG3_FCLK      0x08             /* Fast SCSI clock rate (esp/fas236)  */
-#define ESP_CONFIG3_IDMSG     0x10             /* ID message checking  (esp100a/hme) */
-#define ESP_CONFIG3_FSCSI     0x10             /* Enable FAST SCSI     (esp/fas236)  */
-#define ESP_CONFIG3_GTM       0x20             /* group2 SCSI2 support (esp/fas236)  */
-#define ESP_CONFIG3_IDBIT3    0x20             /* Bit 3 of HME SCSI-ID (hme)         */
-#define ESP_CONFIG3_TBMS      0x40             /* Three-byte msg's ok  (esp/fas236)  */
-#define ESP_CONFIG3_EWIDE     0x40             /* Enable Wide-SCSI     (hme)         */
-#define ESP_CONFIG3_IMS       0x80             /* ID msg chk'ng        (esp/fas236)  */
-#define ESP_CONFIG3_OBPUSH    0x80             /* Push odd-byte to dma (hme)         */
-
-/* ESP command register read-write */
-/* Group 1 commands:  These may be sent at any point in time to the ESP
- *                    chip.  None of them can generate interrupts 'cept
- *                    the "SCSI bus reset" command if you have not disabled
- *                    SCSI reset interrupts in the config1 ESP register.
- */
-#define ESP_CMD_NULL          0x00             /* Null command, ie. a nop */
-#define ESP_CMD_FLUSH         0x01             /* FIFO Flush */
-#define ESP_CMD_RC            0x02             /* Chip reset */
-#define ESP_CMD_RS            0x03             /* SCSI bus reset */
-
-/* Group 2 commands:  ESP must be an initiator and connected to a target
- *                    for these commands to work.
- */
-#define ESP_CMD_TI            0x10             /* Transfer Information */
-#define ESP_CMD_ICCSEQ        0x11             /* Initiator cmd complete sequence */
-#define ESP_CMD_MOK           0x12             /* Message okie-dokie */
-#define ESP_CMD_TPAD          0x18             /* Transfer Pad */
-#define ESP_CMD_SATN          0x1a             /* Set ATN */
-#define ESP_CMD_RATN          0x1b             /* De-assert ATN */
-
-/* Group 3 commands:  ESP must be in the MSGOUT or MSGIN state and be connected
- *                    to a target as the initiator for these commands to work.
- */
-#define ESP_CMD_SMSG          0x20             /* Send message */
-#define ESP_CMD_SSTAT         0x21             /* Send status */
-#define ESP_CMD_SDATA         0x22             /* Send data */
-#define ESP_CMD_DSEQ          0x23             /* Discontinue Sequence */
-#define ESP_CMD_TSEQ          0x24             /* Terminate Sequence */
-#define ESP_CMD_TCCSEQ        0x25             /* Target cmd cmplt sequence */
-#define ESP_CMD_DCNCT         0x27             /* Disconnect */
-#define ESP_CMD_RMSG          0x28             /* Receive Message */
-#define ESP_CMD_RCMD          0x29             /* Receive Command */
-#define ESP_CMD_RDATA         0x2a             /* Receive Data */
-#define ESP_CMD_RCSEQ         0x2b             /* Receive cmd sequence */
-
-/* Group 4 commands:  The ESP must be in the disconnected state and must
- *                    not be connected to any targets as initiator for
- *                    these commands to work.
- */
-#define ESP_CMD_RSEL          0x40             /* Reselect */
-#define ESP_CMD_SEL           0x41             /* Select w/o ATN */
-#define ESP_CMD_SELA          0x42             /* Select w/ATN */
-#define ESP_CMD_SELAS         0x43             /* Select w/ATN & STOP */
-#define ESP_CMD_ESEL          0x44             /* Enable selection */
-#define ESP_CMD_DSEL          0x45             /* Disable selections */
-#define ESP_CMD_SA3           0x46             /* Select w/ATN3 */
-#define ESP_CMD_RSEL3         0x47             /* Reselect3 */
-
-/* This bit enables the ESP's DMA on the SBus */
-#define ESP_CMD_DMA           0x80             /* Do DMA? */
-
-
-/* ESP status register read-only */
-#define ESP_STAT_PIO          0x01             /* IO phase bit */
-#define ESP_STAT_PCD          0x02             /* CD phase bit */
-#define ESP_STAT_PMSG         0x04             /* MSG phase bit */
-#define ESP_STAT_PMASK        0x07             /* Mask of phase bits */
-#define ESP_STAT_TDONE        0x08             /* Transfer Completed */
-#define ESP_STAT_TCNT         0x10             /* Transfer Counter Is Zero */
-#define ESP_STAT_PERR         0x20             /* Parity error */
-#define ESP_STAT_SPAM         0x40             /* Real bad error */
-/* This indicates the 'interrupt pending' condition on esp236, it is a reserved
- * bit on other revs of the ESP.
- */
-#define ESP_STAT_INTR         0x80             /* Interrupt */
-
-/* HME only: status 2 register */
-#define ESP_STAT2_SCHBIT      0x01 /* Upper bits 3-7 of sstep enabled */
-#define ESP_STAT2_FFLAGS      0x02 /* The fifo flags are now latched */
-#define ESP_STAT2_XCNT        0x04 /* The transfer counter is latched */
-#define ESP_STAT2_CREGA       0x08 /* The command reg is active now */
-#define ESP_STAT2_WIDE        0x10 /* Interface on this adapter is wide */
-#define ESP_STAT2_F1BYTE      0x20 /* There is one byte at top of fifo */
-#define ESP_STAT2_FMSB        0x40 /* Next byte in fifo is most significant */
-#define ESP_STAT2_FEMPTY      0x80 /* FIFO is empty */
-
-/* The status register can be masked with ESP_STAT_PMASK and compared
- * with the following values to determine the current phase the ESP
- * (at least thinks it) is in.  For our purposes we also add our own
- * software 'done' bit for our phase management engine.
- */
-#define ESP_DOP   (0)                                       /* Data Out  */
-#define ESP_DIP   (ESP_STAT_PIO)                            /* Data In   */
-#define ESP_CMDP  (ESP_STAT_PCD)                            /* Command   */
-#define ESP_STATP (ESP_STAT_PCD|ESP_STAT_PIO)               /* Status    */
-#define ESP_MOP   (ESP_STAT_PMSG|ESP_STAT_PCD)              /* Message Out */
-#define ESP_MIP   (ESP_STAT_PMSG|ESP_STAT_PCD|ESP_STAT_PIO) /* Message In */
-
-/* ESP interrupt register read-only */
-#define ESP_INTR_S            0x01             /* Select w/o ATN */
-#define ESP_INTR_SATN         0x02             /* Select w/ATN */
-#define ESP_INTR_RSEL         0x04             /* Reselected */
-#define ESP_INTR_FDONE        0x08             /* Function done */
-#define ESP_INTR_BSERV        0x10             /* Bus service */
-#define ESP_INTR_DC           0x20             /* Disconnect */
-#define ESP_INTR_IC           0x40             /* Illegal command given */
-#define ESP_INTR_SR           0x80             /* SCSI bus reset detected */
-
-/* Interrupt status macros */
-#define ESP_SRESET_IRQ(esp)  ((esp)->intreg & (ESP_INTR_SR))
-#define ESP_ILLCMD_IRQ(esp)  ((esp)->intreg & (ESP_INTR_IC))
-#define ESP_SELECT_WITH_ATN_IRQ(esp)     ((esp)->intreg & (ESP_INTR_SATN))
-#define ESP_SELECT_WITHOUT_ATN_IRQ(esp)  ((esp)->intreg & (ESP_INTR_S))
-#define ESP_SELECTION_IRQ(esp)  ((ESP_SELECT_WITH_ATN_IRQ(esp)) ||         \
-                                (ESP_SELECT_WITHOUT_ATN_IRQ(esp)))
-#define ESP_RESELECTION_IRQ(esp)         ((esp)->intreg & (ESP_INTR_RSEL))
-
-/* ESP sequence step register read-only */
-#define ESP_STEP_VBITS        0x07             /* Valid bits */
-#define ESP_STEP_ASEL         0x00             /* Selection&Arbitrate cmplt */
-#define ESP_STEP_SID          0x01             /* One msg byte sent */
-#define ESP_STEP_NCMD         0x02             /* Was not in command phase */
-#define ESP_STEP_PPC          0x03             /* Early phase chg caused cmnd
-                                                * bytes to be lost
-                                                */
-#define ESP_STEP_FINI4        0x04             /* Command was sent ok */
-
-/* Ho hum, some ESP's set the step register to this as well... */
-#define ESP_STEP_FINI5        0x05
-#define ESP_STEP_FINI6        0x06
-#define ESP_STEP_FINI7        0x07
-
-/* ESP chip-test register read-write */
-#define ESP_TEST_TARG         0x01             /* Target test mode */
-#define ESP_TEST_INI          0x02             /* Initiator test mode */
-#define ESP_TEST_TS           0x04             /* Tristate test mode */
-
-/* ESP unique ID register read-only, found on fas236+fas100a only */
-#define ESP_UID_F100A         0x00             /* ESP FAS100A  */
-#define ESP_UID_F236          0x02             /* ESP FAS236   */
-#define ESP_UID_REV           0x07             /* ESP revision */
-#define ESP_UID_FAM           0xf8             /* ESP family   */
-
-/* ESP fifo flags register read-only */
-/* Note that the following implies a 16 byte FIFO on the ESP. */
-#define ESP_FF_FBYTES         0x1f             /* Num bytes in FIFO */
-#define ESP_FF_ONOTZERO       0x20             /* offset ctr not zero (esp100) */
-#define ESP_FF_SSTEP          0xe0             /* Sequence step */
-
-/* ESP clock conversion factor register write-only */
-#define ESP_CCF_F0            0x00             /* 35.01MHz - 40MHz */
-#define ESP_CCF_NEVER         0x01             /* Set it to this and die */
-#define ESP_CCF_F2            0x02             /* 10MHz */
-#define ESP_CCF_F3            0x03             /* 10.01MHz - 15MHz */
-#define ESP_CCF_F4            0x04             /* 15.01MHz - 20MHz */
-#define ESP_CCF_F5            0x05             /* 20.01MHz - 25MHz */
-#define ESP_CCF_F6            0x06             /* 25.01MHz - 30MHz */
-#define ESP_CCF_F7            0x07             /* 30.01MHz - 35MHz */
-
-/* HME only... */
-#define ESP_BUSID_RESELID     0x10
-#define ESP_BUSID_CTR32BIT    0x40
-
-#define ESP_BUS_TIMEOUT        275             /* In milli-seconds */
-#define ESP_TIMEO_CONST       8192
-#define ESP_NEG_DEFP(mhz, cfact) \
-        ((ESP_BUS_TIMEOUT * ((mhz) / 1000)) / (8192 * (cfact)))
-#define ESP_MHZ_TO_CYCLE(mhertz)  ((1000000000) / ((mhertz) / 1000))
-#define ESP_TICK(ccf, cycle)  ((7682 * (ccf) * (cycle) / 1000))
-
-#endif /* !(_SPARC_ESP_H) */
diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c
new file mode 100644 (file)
index 0000000..3cd5bf7
--- /dev/null
@@ -0,0 +1,2710 @@
+/* esp_scsi.c: ESP SCSI driver.
+ *
+ * Copyright (C) 2007 David S. Miller (davem@davemloft.net)
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/list.h>
+#include <linux/completion.h>
+#include <linux/kallsyms.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+
+#include <asm/irq.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_transport_spi.h>
+
+#include "esp_scsi.h"
+
+#define DRV_MODULE_NAME                "esp"
+#define PFX DRV_MODULE_NAME    ": "
+#define DRV_VERSION            "2.000"
+#define DRV_MODULE_RELDATE     "April 19, 2007"
+
+/* SCSI bus reset settle time in seconds.  */
+static int esp_bus_reset_settle = 3;
+
+static u32 esp_debug;
+#define ESP_DEBUG_INTR         0x00000001
+#define ESP_DEBUG_SCSICMD      0x00000002
+#define ESP_DEBUG_RESET                0x00000004
+#define ESP_DEBUG_MSGIN                0x00000008
+#define ESP_DEBUG_MSGOUT       0x00000010
+#define ESP_DEBUG_CMDDONE      0x00000020
+#define ESP_DEBUG_DISCONNECT   0x00000040
+#define ESP_DEBUG_DATASTART    0x00000080
+#define ESP_DEBUG_DATADONE     0x00000100
+#define ESP_DEBUG_RECONNECT    0x00000200
+#define ESP_DEBUG_AUTOSENSE    0x00000400
+
+#define esp_log_intr(f, a...) \
+do {   if (esp_debug & ESP_DEBUG_INTR) \
+               printk(f, ## a); \
+} while (0)
+
+#define esp_log_reset(f, a...) \
+do {   if (esp_debug & ESP_DEBUG_RESET) \
+               printk(f, ## a); \
+} while (0)
+
+#define esp_log_msgin(f, a...) \
+do {   if (esp_debug & ESP_DEBUG_MSGIN) \
+               printk(f, ## a); \
+} while (0)
+
+#define esp_log_msgout(f, a...) \
+do {   if (esp_debug & ESP_DEBUG_MSGOUT) \
+               printk(f, ## a); \
+} while (0)
+
+#define esp_log_cmddone(f, a...) \
+do {   if (esp_debug & ESP_DEBUG_CMDDONE) \
+               printk(f, ## a); \
+} while (0)
+
+#define esp_log_disconnect(f, a...) \
+do {   if (esp_debug & ESP_DEBUG_DISCONNECT) \
+               printk(f, ## a); \
+} while (0)
+
+#define esp_log_datastart(f, a...) \
+do {   if (esp_debug & ESP_DEBUG_DATASTART) \
+               printk(f, ## a); \
+} while (0)
+
+#define esp_log_datadone(f, a...) \
+do {   if (esp_debug & ESP_DEBUG_DATADONE) \
+               printk(f, ## a); \
+} while (0)
+
+#define esp_log_reconnect(f, a...) \
+do {   if (esp_debug & ESP_DEBUG_RECONNECT) \
+               printk(f, ## a); \
+} while (0)
+
+#define esp_log_autosense(f, a...) \
+do {   if (esp_debug & ESP_DEBUG_AUTOSENSE) \
+               printk(f, ## a); \
+} while (0)
+
+#define esp_read8(REG)         esp->ops->esp_read8(esp, REG)
+#define esp_write8(VAL,REG)    esp->ops->esp_write8(esp, VAL, REG)
+
+static void esp_log_fill_regs(struct esp *esp,
+                             struct esp_event_ent *p)
+{
+       p->sreg = esp->sreg;
+       p->seqreg = esp->seqreg;
+       p->sreg2 = esp->sreg2;
+       p->ireg = esp->ireg;
+       p->select_state = esp->select_state;
+       p->event = esp->event;
+}
+
+void scsi_esp_cmd(struct esp *esp, u8 val)
+{
+       struct esp_event_ent *p;
+       int idx = esp->esp_event_cur;
+
+       p = &esp->esp_event_log[idx];
+       p->type = ESP_EVENT_TYPE_CMD;
+       p->val = val;
+       esp_log_fill_regs(esp, p);
+
+       esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
+
+       esp_write8(val, ESP_CMD);
+}
+EXPORT_SYMBOL(scsi_esp_cmd);
+
+static void esp_event(struct esp *esp, u8 val)
+{
+       struct esp_event_ent *p;
+       int idx = esp->esp_event_cur;
+
+       p = &esp->esp_event_log[idx];
+       p->type = ESP_EVENT_TYPE_EVENT;
+       p->val = val;
+       esp_log_fill_regs(esp, p);
+
+       esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
+
+       esp->event = val;
+}
+
+static void esp_dump_cmd_log(struct esp *esp)
+{
+       int idx = esp->esp_event_cur;
+       int stop = idx;
+
+       printk(KERN_INFO PFX "esp%d: Dumping command log\n",
+              esp->host->unique_id);
+       do {
+               struct esp_event_ent *p = &esp->esp_event_log[idx];
+
+               printk(KERN_INFO PFX "esp%d: ent[%d] %s ",
+                      esp->host->unique_id, idx,
+                      p->type == ESP_EVENT_TYPE_CMD ? "CMD" : "EVENT");
+
+               printk("val[%02x] sreg[%02x] seqreg[%02x] "
+                      "sreg2[%02x] ireg[%02x] ss[%02x] event[%02x]\n",
+                      p->val, p->sreg, p->seqreg,
+                      p->sreg2, p->ireg, p->select_state, p->event);
+
+               idx = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
+       } while (idx != stop);
+}
+
+static void esp_flush_fifo(struct esp *esp)
+{
+       scsi_esp_cmd(esp, ESP_CMD_FLUSH);
+       if (esp->rev == ESP236) {
+               int lim = 1000;
+
+               while (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES) {
+                       if (--lim == 0) {
+                               printk(KERN_ALERT PFX "esp%d: ESP_FF_BYTES "
+                                      "will not clear!\n",
+                                      esp->host->unique_id);
+                               break;
+                       }
+                       udelay(1);
+               }
+       }
+}
+
+static void hme_read_fifo(struct esp *esp)
+{
+       int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
+       int idx = 0;
+
+       while (fcnt--) {
+               esp->fifo[idx++] = esp_read8(ESP_FDATA);
+               esp->fifo[idx++] = esp_read8(ESP_FDATA);
+       }
+       if (esp->sreg2 & ESP_STAT2_F1BYTE) {
+               esp_write8(0, ESP_FDATA);
+               esp->fifo[idx++] = esp_read8(ESP_FDATA);
+               scsi_esp_cmd(esp, ESP_CMD_FLUSH);
+       }
+       esp->fifo_cnt = idx;
+}
+
+static void esp_set_all_config3(struct esp *esp, u8 val)
+{
+       int i;
+
+       for (i = 0; i < ESP_MAX_TARGET; i++)
+               esp->target[i].esp_config3 = val;
+}
+
+/* Reset the ESP chip, _not_ the SCSI bus. */
+static void esp_reset_esp(struct esp *esp)
+{
+       u8 family_code, version;
+
+       /* Now reset the ESP chip */
+       scsi_esp_cmd(esp, ESP_CMD_RC);
+       scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
+       scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
+
+       /* Reload the configuration registers */
+       esp_write8(esp->cfact, ESP_CFACT);
+
+       esp->prev_stp = 0;
+       esp_write8(esp->prev_stp, ESP_STP);
+
+       esp->prev_soff = 0;
+       esp_write8(esp->prev_soff, ESP_SOFF);
+
+       esp_write8(esp->neg_defp, ESP_TIMEO);
+
+       /* This is the only point at which it is reliable to read
+        * the ID-code for a fast ESP chip variants.
+        */
+       esp->max_period = ((35 * esp->ccycle) / 1000);
+       if (esp->rev == FAST) {
+               version = esp_read8(ESP_UID);
+               family_code = (version & 0xf8) >> 3;
+               if (family_code == 0x02)
+                       esp->rev = FAS236;
+               else if (family_code == 0x0a)
+                       esp->rev = FASHME; /* Version is usually '5'. */
+               else
+                       esp->rev = FAS100A;
+               esp->min_period = ((4 * esp->ccycle) / 1000);
+       } else {
+               esp->min_period = ((5 * esp->ccycle) / 1000);
+       }
+       esp->max_period = (esp->max_period + 3)>>2;
+       esp->min_period = (esp->min_period + 3)>>2;
+
+       esp_write8(esp->config1, ESP_CFG1);
+       switch (esp->rev) {
+       case ESP100:
+               /* nothing to do */
+               break;
+
+       case ESP100A:
+               esp_write8(esp->config2, ESP_CFG2);
+               break;
+
+       case ESP236:
+               /* Slow 236 */
+               esp_write8(esp->config2, ESP_CFG2);
+               esp->prev_cfg3 = esp->target[0].esp_config3;
+               esp_write8(esp->prev_cfg3, ESP_CFG3);
+               break;
+
+       case FASHME:
+               esp->config2 |= (ESP_CONFIG2_HME32 | ESP_CONFIG2_HMEFENAB);
+               /* fallthrough... */
+
+       case FAS236:
+               /* Fast 236 or HME */
+               esp_write8(esp->config2, ESP_CFG2);
+               if (esp->rev == FASHME) {
+                       u8 cfg3 = esp->target[0].esp_config3;
+
+                       cfg3 |= ESP_CONFIG3_FCLOCK | ESP_CONFIG3_OBPUSH;
+                       if (esp->scsi_id >= 8)
+                               cfg3 |= ESP_CONFIG3_IDBIT3;
+                       esp_set_all_config3(esp, cfg3);
+               } else {
+                       u32 cfg3 = esp->target[0].esp_config3;
+
+                       cfg3 |= ESP_CONFIG3_FCLK;
+                       esp_set_all_config3(esp, cfg3);
+               }
+               esp->prev_cfg3 = esp->target[0].esp_config3;
+               esp_write8(esp->prev_cfg3, ESP_CFG3);
+               if (esp->rev == FASHME) {
+                       esp->radelay = 80;
+               } else {
+                       if (esp->flags & ESP_FLAG_DIFFERENTIAL)
+                               esp->radelay = 0;
+                       else
+                               esp->radelay = 96;
+               }
+               break;
+
+       case FAS100A:
+               /* Fast 100a */
+               esp_write8(esp->config2, ESP_CFG2);
+               esp_set_all_config3(esp,
+                                   (esp->target[0].esp_config3 |
+                                    ESP_CONFIG3_FCLOCK));
+               esp->prev_cfg3 = esp->target[0].esp_config3;
+               esp_write8(esp->prev_cfg3, ESP_CFG3);
+               esp->radelay = 32;
+               break;
+
+       default:
+               break;
+       }
+
+       /* Eat any bitrot in the chip */
+       esp_read8(ESP_INTRPT);
+       udelay(100);
+}
+
+static void esp_map_dma(struct esp *esp, struct scsi_cmnd *cmd)
+{
+       struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
+       struct scatterlist *sg = cmd->request_buffer;
+       int dir = cmd->sc_data_direction;
+       int total, i;
+
+       if (dir == DMA_NONE)
+               return;
+
+       BUG_ON(cmd->use_sg == 0);
+
+       spriv->u.num_sg = esp->ops->map_sg(esp, sg,
+                                          cmd->use_sg, dir);
+       spriv->cur_residue = sg_dma_len(sg);
+       spriv->cur_sg = sg;
+
+       total = 0;
+       for (i = 0; i < spriv->u.num_sg; i++)
+               total += sg_dma_len(&sg[i]);
+       spriv->tot_residue = total;
+}
+
+static dma_addr_t esp_cur_dma_addr(struct esp_cmd_entry *ent,
+                                  struct scsi_cmnd *cmd)
+{
+       struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
+
+       if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
+               return ent->sense_dma +
+                       (ent->sense_ptr - cmd->sense_buffer);
+       }
+
+       return sg_dma_address(p->cur_sg) +
+               (sg_dma_len(p->cur_sg) -
+                p->cur_residue);
+}
+
+static unsigned int esp_cur_dma_len(struct esp_cmd_entry *ent,
+                                   struct scsi_cmnd *cmd)
+{
+       struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
+
+       if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
+               return SCSI_SENSE_BUFFERSIZE -
+                       (ent->sense_ptr - cmd->sense_buffer);
+       }
+       return p->cur_residue;
+}
+
+static void esp_advance_dma(struct esp *esp, struct esp_cmd_entry *ent,
+                           struct scsi_cmnd *cmd, unsigned int len)
+{
+       struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
+
+       if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
+               ent->sense_ptr += len;
+               return;
+       }
+
+       p->cur_residue -= len;
+       p->tot_residue -= len;
+       if (p->cur_residue < 0 || p->tot_residue < 0) {
+               printk(KERN_ERR PFX "esp%d: Data transfer overflow.\n",
+                      esp->host->unique_id);
+               printk(KERN_ERR PFX "esp%d: cur_residue[%d] tot_residue[%d] "
+                      "len[%u]\n",
+                      esp->host->unique_id,
+                      p->cur_residue, p->tot_residue, len);
+               p->cur_residue = 0;
+               p->tot_residue = 0;
+       }
+       if (!p->cur_residue && p->tot_residue) {
+               p->cur_sg++;
+               p->cur_residue = sg_dma_len(p->cur_sg);
+       }
+}
+
+static void esp_unmap_dma(struct esp *esp, struct scsi_cmnd *cmd)
+{
+       struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
+       int dir = cmd->sc_data_direction;
+
+       if (dir == DMA_NONE)
+               return;
+
+       esp->ops->unmap_sg(esp, cmd->request_buffer,
+                          spriv->u.num_sg, dir);
+}
+
+static void esp_save_pointers(struct esp *esp, struct esp_cmd_entry *ent)
+{
+       struct scsi_cmnd *cmd = ent->cmd;
+       struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
+
+       if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
+               ent->saved_sense_ptr = ent->sense_ptr;
+               return;
+       }
+       ent->saved_cur_residue = spriv->cur_residue;
+       ent->saved_cur_sg = spriv->cur_sg;
+       ent->saved_tot_residue = spriv->tot_residue;
+}
+
+static void esp_restore_pointers(struct esp *esp, struct esp_cmd_entry *ent)
+{
+       struct scsi_cmnd *cmd = ent->cmd;
+       struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
+
+       if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
+               ent->sense_ptr = ent->saved_sense_ptr;
+               return;
+       }
+       spriv->cur_residue = ent->saved_cur_residue;
+       spriv->cur_sg = ent->saved_cur_sg;
+       spriv->tot_residue = ent->saved_tot_residue;
+}
+
+static void esp_check_command_len(struct esp *esp, struct scsi_cmnd *cmd)
+{
+       if (cmd->cmd_len == 6 ||
+           cmd->cmd_len == 10 ||
+           cmd->cmd_len == 12) {
+               esp->flags &= ~ESP_FLAG_DOING_SLOWCMD;
+       } else {
+               esp->flags |= ESP_FLAG_DOING_SLOWCMD;
+       }
+}
+
+static void esp_write_tgt_config3(struct esp *esp, int tgt)
+{
+       if (esp->rev > ESP100A) {
+               u8 val = esp->target[tgt].esp_config3;
+
+               if (val != esp->prev_cfg3) {
+                       esp->prev_cfg3 = val;
+                       esp_write8(val, ESP_CFG3);
+               }
+       }
+}
+
+static void esp_write_tgt_sync(struct esp *esp, int tgt)
+{
+       u8 off = esp->target[tgt].esp_offset;
+       u8 per = esp->target[tgt].esp_period;
+
+       if (off != esp->prev_soff) {
+               esp->prev_soff = off;
+               esp_write8(off, ESP_SOFF);
+       }
+       if (per != esp->prev_stp) {
+               esp->prev_stp = per;
+               esp_write8(per, ESP_STP);
+       }
+}
+
+static u32 esp_dma_length_limit(struct esp *esp, u32 dma_addr, u32 dma_len)
+{
+       if (esp->rev == FASHME) {
+               /* Arbitrary segment boundaries, 24-bit counts.  */
+               if (dma_len > (1U << 24))
+                       dma_len = (1U << 24);
+       } else {
+               u32 base, end;
+
+               /* ESP chip limits other variants by 16-bits of transfer
+                * count.  Actually on FAS100A and FAS236 we could get
+                * 24-bits of transfer count by enabling ESP_CONFIG2_FENAB
+                * in the ESP_CFG2 register but that causes other unwanted
+                * changes so we don't use it currently.
+                */
+               if (dma_len > (1U << 16))
+                       dma_len = (1U << 16);
+
+               /* All of the DMA variants hooked up to these chips
+                * cannot handle crossing a 24-bit address boundary.
+                */
+               base = dma_addr & ((1U << 24) - 1U);
+               end = base + dma_len;
+               if (end > (1U << 24))
+                       end = (1U <<24);
+               dma_len = end - base;
+       }
+       return dma_len;
+}
+
+static int esp_need_to_nego_wide(struct esp_target_data *tp)
+{
+       struct scsi_target *target = tp->starget;
+
+       return spi_width(target) != tp->nego_goal_width;
+}
+
+static int esp_need_to_nego_sync(struct esp_target_data *tp)
+{
+       struct scsi_target *target = tp->starget;
+
+       /* When offset is zero, period is "don't care".  */
+       if (!spi_offset(target) && !tp->nego_goal_offset)
+               return 0;
+
+       if (spi_offset(target) == tp->nego_goal_offset &&
+           spi_period(target) == tp->nego_goal_period)
+               return 0;
+
+       return 1;
+}
+
+static int esp_alloc_lun_tag(struct esp_cmd_entry *ent,
+                            struct esp_lun_data *lp)
+{
+       if (!ent->tag[0]) {
+               /* Non-tagged, slot already taken?  */
+               if (lp->non_tagged_cmd)
+                       return -EBUSY;
+
+               if (lp->hold) {
+                       /* We are being held by active tagged
+                        * commands.
+                        */
+                       if (lp->num_tagged)
+                               return -EBUSY;
+
+                       /* Tagged commands completed, we can unplug
+                        * the queue and run this untagged command.
+                        */
+                       lp->hold = 0;
+               } else if (lp->num_tagged) {
+                       /* Plug the queue until num_tagged decreases
+                        * to zero in esp_free_lun_tag.
+                        */
+                       lp->hold = 1;
+                       return -EBUSY;
+               }
+
+               lp->non_tagged_cmd = ent;
+               return 0;
+       } else {
+               /* Tagged command, see if blocked by a
+                * non-tagged one.
+                */
+               if (lp->non_tagged_cmd || lp->hold)
+                       return -EBUSY;
+       }
+
+       BUG_ON(lp->tagged_cmds[ent->tag[1]]);
+
+       lp->tagged_cmds[ent->tag[1]] = ent;
+       lp->num_tagged++;
+
+       return 0;
+}
+
+static void esp_free_lun_tag(struct esp_cmd_entry *ent,
+                            struct esp_lun_data *lp)
+{
+       if (ent->tag[0]) {
+               BUG_ON(lp->tagged_cmds[ent->tag[1]] != ent);
+               lp->tagged_cmds[ent->tag[1]] = NULL;
+               lp->num_tagged--;
+       } else {
+               BUG_ON(lp->non_tagged_cmd != ent);
+               lp->non_tagged_cmd = NULL;
+       }
+}
+
+/* When a contingent allegiance conditon is created, we force feed a
+ * REQUEST_SENSE command to the device to fetch the sense data.  I
+ * tried many other schemes, relying on the scsi error handling layer
+ * to send out the REQUEST_SENSE automatically, but this was difficult
+ * to get right especially in the presence of applications like smartd
+ * which use SG_IO to send out their own REQUEST_SENSE commands.
+ */
+static void esp_autosense(struct esp *esp, struct esp_cmd_entry *ent)
+{
+       struct scsi_cmnd *cmd = ent->cmd;
+       struct scsi_device *dev = cmd->device;
+       int tgt, lun;
+       u8 *p, val;
+
+       tgt = dev->id;
+       lun = dev->lun;
+
+
+       if (!ent->sense_ptr) {
+               esp_log_autosense("esp%d: Doing auto-sense for "
+                                 "tgt[%d] lun[%d]\n",
+                                 esp->host->unique_id, tgt, lun);
+
+               ent->sense_ptr = cmd->sense_buffer;
+               ent->sense_dma = esp->ops->map_single(esp,
+                                                     ent->sense_ptr,
+                                                     SCSI_SENSE_BUFFERSIZE,
+                                                     DMA_FROM_DEVICE);
+       }
+       ent->saved_sense_ptr = ent->sense_ptr;
+
+       esp->active_cmd = ent;
+
+       p = esp->command_block;
+       esp->msg_out_len = 0;
+
+       *p++ = IDENTIFY(0, lun);
+       *p++ = REQUEST_SENSE;
+       *p++ = ((dev->scsi_level <= SCSI_2) ?
+               (lun << 5) : 0);
+       *p++ = 0;
+       *p++ = 0;
+       *p++ = SCSI_SENSE_BUFFERSIZE;
+       *p++ = 0;
+
+       esp->select_state = ESP_SELECT_BASIC;
+
+       val = tgt;
+       if (esp->rev == FASHME)
+               val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT;
+       esp_write8(val, ESP_BUSID);
+
+       esp_write_tgt_sync(esp, tgt);
+       esp_write_tgt_config3(esp, tgt);
+
+       val = (p - esp->command_block);
+
+       if (esp->rev == FASHME)
+               scsi_esp_cmd(esp, ESP_CMD_FLUSH);
+       esp->ops->send_dma_cmd(esp, esp->command_block_dma,
+                              val, 16, 0, ESP_CMD_DMA | ESP_CMD_SELA);
+}
+
+static struct esp_cmd_entry *find_and_prep_issuable_command(struct esp *esp)
+{
+       struct esp_cmd_entry *ent;
+
+       list_for_each_entry(ent, &esp->queued_cmds, list) {
+               struct scsi_cmnd *cmd = ent->cmd;
+               struct scsi_device *dev = cmd->device;
+               struct esp_lun_data *lp = dev->hostdata;
+
+               if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
+                       ent->tag[0] = 0;
+                       ent->tag[1] = 0;
+                       return ent;
+               }
+
+               if (!scsi_populate_tag_msg(cmd, &ent->tag[0])) {
+                       ent->tag[0] = 0;
+                       ent->tag[1] = 0;
+               }
+
+               if (esp_alloc_lun_tag(ent, lp) < 0)
+                       continue;
+
+               return ent;
+       }
+
+       return NULL;
+}
+
+static void esp_maybe_execute_command(struct esp *esp)
+{
+       struct esp_target_data *tp;
+       struct esp_lun_data *lp;
+       struct scsi_device *dev;
+       struct scsi_cmnd *cmd;
+       struct esp_cmd_entry *ent;
+       int tgt, lun, i;
+       u32 val, start_cmd;
+       u8 *p;
+
+       if (esp->active_cmd ||
+           (esp->flags & ESP_FLAG_RESETTING))
+               return;
+
+       ent = find_and_prep_issuable_command(esp);
+       if (!ent)
+               return;
+
+       if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
+               esp_autosense(esp, ent);
+               return;
+       }
+
+       cmd = ent->cmd;
+       dev = cmd->device;
+       tgt = dev->id;
+       lun = dev->lun;
+       tp = &esp->target[tgt];
+       lp = dev->hostdata;
+
+       list_del(&ent->list);
+       list_add(&ent->list, &esp->active_cmds);
+
+       esp->active_cmd = ent;
+
+       esp_map_dma(esp, cmd);
+       esp_save_pointers(esp, ent);
+
+       esp_check_command_len(esp, cmd);
+
+       p = esp->command_block;
+
+       esp->msg_out_len = 0;
+       if (tp->flags & ESP_TGT_CHECK_NEGO) {
+               /* Need to negotiate.  If the target is broken
+                * go for synchronous transfers and non-wide.
+                */
+               if (tp->flags & ESP_TGT_BROKEN) {
+                       tp->flags &= ~ESP_TGT_DISCONNECT;
+                       tp->nego_goal_period = 0;
+                       tp->nego_goal_offset = 0;
+                       tp->nego_goal_width = 0;
+                       tp->nego_goal_tags = 0;
+               }
+
+               /* If the settings are not changing, skip this.  */
+               if (spi_width(tp->starget) == tp->nego_goal_width &&
+                   spi_period(tp->starget) == tp->nego_goal_period &&
+                   spi_offset(tp->starget) == tp->nego_goal_offset) {
+                       tp->flags &= ~ESP_TGT_CHECK_NEGO;
+                       goto build_identify;
+               }
+
+               if (esp->rev == FASHME && esp_need_to_nego_wide(tp)) {
+                       esp->msg_out_len =
+                               spi_populate_width_msg(&esp->msg_out[0],
+                                                      (tp->nego_goal_width ?
+                                                       1 : 0));
+                       tp->flags |= ESP_TGT_NEGO_WIDE;
+               } else if (esp_need_to_nego_sync(tp)) {
+                       esp->msg_out_len =
+                               spi_populate_sync_msg(&esp->msg_out[0],
+                                                     tp->nego_goal_period,
+                                                     tp->nego_goal_offset);
+                       tp->flags |= ESP_TGT_NEGO_SYNC;
+               } else {
+                       tp->flags &= ~ESP_TGT_CHECK_NEGO;
+               }
+
+               /* Process it like a slow command.  */
+               if (tp->flags & (ESP_TGT_NEGO_WIDE | ESP_TGT_NEGO_SYNC))
+                       esp->flags |= ESP_FLAG_DOING_SLOWCMD;
+       }
+
+build_identify:
+       /* If we don't have a lun-data struct yet, we're probing
+        * so do not disconnect.  Also, do not disconnect unless
+        * we have a tag on this command.
+        */
+       if (lp && (tp->flags & ESP_TGT_DISCONNECT) && ent->tag[0])
+               *p++ = IDENTIFY(1, lun);
+       else
+               *p++ = IDENTIFY(0, lun);
+
+       if (ent->tag[0] && esp->rev == ESP100) {
+               /* ESP100 lacks select w/atn3 command, use select
+                * and stop instead.
+                */
+               esp->flags |= ESP_FLAG_DOING_SLOWCMD;
+       }
+
+       if (!(esp->flags & ESP_FLAG_DOING_SLOWCMD)) {
+               start_cmd = ESP_CMD_DMA | ESP_CMD_SELA;
+               if (ent->tag[0]) {
+                       *p++ = ent->tag[0];
+                       *p++ = ent->tag[1];
+
+                       start_cmd = ESP_CMD_DMA | ESP_CMD_SA3;
+               }
+
+               for (i = 0; i < cmd->cmd_len; i++)
+                       *p++ = cmd->cmnd[i];
+
+               esp->select_state = ESP_SELECT_BASIC;
+       } else {
+               esp->cmd_bytes_left = cmd->cmd_len;
+               esp->cmd_bytes_ptr = &cmd->cmnd[0];
+
+               if (ent->tag[0]) {
+                       for (i = esp->msg_out_len - 1;
+                            i >= 0; i--)
+                               esp->msg_out[i + 2] = esp->msg_out[i];
+                       esp->msg_out[0] = ent->tag[0];
+                       esp->msg_out[1] = ent->tag[1];
+                       esp->msg_out_len += 2;
+               }
+
+               start_cmd = ESP_CMD_DMA | ESP_CMD_SELAS;
+               esp->select_state = ESP_SELECT_MSGOUT;
+       }
+       val = tgt;
+       if (esp->rev == FASHME)
+               val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT;
+       esp_write8(val, ESP_BUSID);
+
+       esp_write_tgt_sync(esp, tgt);
+       esp_write_tgt_config3(esp, tgt);
+
+       val = (p - esp->command_block);
+
+       if (esp_debug & ESP_DEBUG_SCSICMD) {
+               printk("ESP: tgt[%d] lun[%d] scsi_cmd [ ", tgt, lun);
+               for (i = 0; i < cmd->cmd_len; i++)
+                       printk("%02x ", cmd->cmnd[i]);
+               printk("]\n");
+       }
+
+       if (esp->rev == FASHME)
+               scsi_esp_cmd(esp, ESP_CMD_FLUSH);
+       esp->ops->send_dma_cmd(esp, esp->command_block_dma,
+                              val, 16, 0, start_cmd);
+}
+
+static struct esp_cmd_entry *esp_get_ent(struct esp *esp)
+{
+       struct list_head *head = &esp->esp_cmd_pool;
+       struct esp_cmd_entry *ret;
+
+       if (list_empty(head)) {
+               ret = kzalloc(sizeof(struct esp_cmd_entry), GFP_ATOMIC);
+       } else {
+               ret = list_entry(head->next, struct esp_cmd_entry, list);
+               list_del(&ret->list);
+               memset(ret, 0, sizeof(*ret));
+       }
+       return ret;
+}
+
+static void esp_put_ent(struct esp *esp, struct esp_cmd_entry *ent)
+{
+       list_add(&ent->list, &esp->esp_cmd_pool);
+}
+
+static void esp_cmd_is_done(struct esp *esp, struct esp_cmd_entry *ent,
+                           struct scsi_cmnd *cmd, unsigned int result)
+{
+       struct scsi_device *dev = cmd->device;
+       int tgt = dev->id;
+       int lun = dev->lun;
+
+       esp->active_cmd = NULL;
+       esp_unmap_dma(esp, cmd);
+       esp_free_lun_tag(ent, dev->hostdata);
+       cmd->result = result;
+
+       if (ent->eh_done) {
+               complete(ent->eh_done);
+               ent->eh_done = NULL;
+       }
+
+       if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
+               esp->ops->unmap_single(esp, ent->sense_dma,
+                                      SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
+               ent->sense_ptr = NULL;
+
+               /* Restore the message/status bytes to what we actually
+                * saw originally.  Also, report that we are providing
+                * the sense data.
+                */
+               cmd->result = ((DRIVER_SENSE << 24) |
+                              (DID_OK << 16) |
+                              (COMMAND_COMPLETE << 8) |
+                              (SAM_STAT_CHECK_CONDITION << 0));
+
+               ent->flags &= ~ESP_CMD_FLAG_AUTOSENSE;
+               if (esp_debug & ESP_DEBUG_AUTOSENSE) {
+                       int i;
+
+                       printk("esp%d: tgt[%d] lun[%d] AUTO SENSE[ ",
+                              esp->host->unique_id, tgt, lun);
+                       for (i = 0; i < 18; i++)
+                               printk("%02x ", cmd->sense_buffer[i]);
+                       printk("]\n");
+               }
+       }
+
+       cmd->scsi_done(cmd);
+
+       list_del(&ent->list);
+       esp_put_ent(esp, ent);
+
+       esp_maybe_execute_command(esp);
+}
+
+static unsigned int compose_result(unsigned int status, unsigned int message,
+                                  unsigned int driver_code)
+{
+       return (status | (message << 8) | (driver_code << 16));
+}
+
+static void esp_event_queue_full(struct esp *esp, struct esp_cmd_entry *ent)
+{
+       struct scsi_device *dev = ent->cmd->device;
+       struct esp_lun_data *lp = dev->hostdata;
+
+       scsi_track_queue_full(dev, lp->num_tagged - 1);
+}
+
+static int esp_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
+{
+       struct scsi_device *dev = cmd->device;
+       struct esp *esp = host_to_esp(dev->host);
+       struct esp_cmd_priv *spriv;
+       struct esp_cmd_entry *ent;
+
+       ent = esp_get_ent(esp);
+       if (!ent)
+               return SCSI_MLQUEUE_HOST_BUSY;
+
+       ent->cmd = cmd;
+
+       cmd->scsi_done = done;
+
+       spriv = ESP_CMD_PRIV(cmd);
+       spriv->u.dma_addr = ~(dma_addr_t)0x0;
+
+       list_add_tail(&ent->list, &esp->queued_cmds);
+
+       esp_maybe_execute_command(esp);
+
+       return 0;
+}
+
+static int esp_check_gross_error(struct esp *esp)
+{
+       if (esp->sreg & ESP_STAT_SPAM) {
+               /* Gross Error, could be one of:
+                * - top of fifo overwritten
+                * - top of command register overwritten
+                * - DMA programmed with wrong direction
+                * - improper phase change
+                */
+               printk(KERN_ERR PFX "esp%d: Gross error sreg[%02x]\n",
+                      esp->host->unique_id, esp->sreg);
+               /* XXX Reset the chip. XXX */
+               return 1;
+       }
+       return 0;
+}
+
+static int esp_check_spur_intr(struct esp *esp)
+{
+       switch (esp->rev) {
+       case ESP100:
+       case ESP100A:
+               /* The interrupt pending bit of the status register cannot
+                * be trusted on these revisions.
+                */
+               esp->sreg &= ~ESP_STAT_INTR;
+               break;
+
+       default:
+               if (!(esp->sreg & ESP_STAT_INTR)) {
+                       esp->ireg = esp_read8(ESP_INTRPT);
+                       if (esp->ireg & ESP_INTR_SR)
+                               return 1;
+
+                       /* If the DMA is indicating interrupt pending and the
+                        * ESP is not, the only possibility is a DMA error.
+                        */
+                       if (!esp->ops->dma_error(esp)) {
+                               printk(KERN_ERR PFX "esp%d: Spurious irq, "
+                                      "sreg=%x.\n",
+                                      esp->host->unique_id, esp->sreg);
+                               return -1;
+                       }
+
+                       printk(KERN_ERR PFX "esp%d: DMA error\n",
+                              esp->host->unique_id);
+
+                       /* XXX Reset the chip. XXX */
+                       return -1;
+               }
+               break;
+       }
+
+       return 0;
+}
+
+static void esp_schedule_reset(struct esp *esp)
+{
+       esp_log_reset("ESP: esp_schedule_reset() from %p\n",
+                     __builtin_return_address(0));
+       esp->flags |= ESP_FLAG_RESETTING;
+       esp_event(esp, ESP_EVENT_RESET);
+}
+
+/* In order to avoid having to add a special half-reconnected state
+ * into the driver we just sit here and poll through the rest of
+ * the reselection process to get the tag message bytes.
+ */
+static struct esp_cmd_entry *esp_reconnect_with_tag(struct esp *esp,
+                                                   struct esp_lun_data *lp)
+{
+       struct esp_cmd_entry *ent;
+       int i;
+
+       if (!lp->num_tagged) {
+               printk(KERN_ERR PFX "esp%d: Reconnect w/num_tagged==0\n",
+                      esp->host->unique_id);
+               return NULL;
+       }
+
+       esp_log_reconnect("ESP: reconnect tag, ");
+
+       for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) {
+               if (esp->ops->irq_pending(esp))
+                       break;
+       }
+       if (i == ESP_QUICKIRQ_LIMIT) {
+               printk(KERN_ERR PFX "esp%d: Reconnect IRQ1 timeout\n",
+                      esp->host->unique_id);
+               return NULL;
+       }
+
+       esp->sreg = esp_read8(ESP_STATUS);
+       esp->ireg = esp_read8(ESP_INTRPT);
+
+       esp_log_reconnect("IRQ(%d:%x:%x), ",
+                         i, esp->ireg, esp->sreg);
+
+       if (esp->ireg & ESP_INTR_DC) {
+               printk(KERN_ERR PFX "esp%d: Reconnect, got disconnect.\n",
+                      esp->host->unique_id);
+               return NULL;
+       }
+
+       if ((esp->sreg & ESP_STAT_PMASK) != ESP_MIP) {
+               printk(KERN_ERR PFX "esp%d: Reconnect, not MIP sreg[%02x].\n",
+                      esp->host->unique_id, esp->sreg);
+               return NULL;
+       }
+
+       /* DMA in the tag bytes... */
+       esp->command_block[0] = 0xff;
+       esp->command_block[1] = 0xff;
+       esp->ops->send_dma_cmd(esp, esp->command_block_dma,
+                              2, 2, 1, ESP_CMD_DMA | ESP_CMD_TI);
+
+       /* ACK the msssage.  */
+       scsi_esp_cmd(esp, ESP_CMD_MOK);
+
+       for (i = 0; i < ESP_RESELECT_TAG_LIMIT; i++) {
+               if (esp->ops->irq_pending(esp)) {
+                       esp->sreg = esp_read8(ESP_STATUS);
+                       esp->ireg = esp_read8(ESP_INTRPT);
+                       if (esp->ireg & ESP_INTR_FDONE)
+                               break;
+               }
+               udelay(1);
+       }
+       if (i == ESP_RESELECT_TAG_LIMIT) {
+               printk(KERN_ERR PFX "esp%d: Reconnect IRQ2 timeout\n",
+                      esp->host->unique_id);
+               return NULL;
+       }
+       esp->ops->dma_drain(esp);
+       esp->ops->dma_invalidate(esp);
+
+       esp_log_reconnect("IRQ2(%d:%x:%x) tag[%x:%x]\n",
+                         i, esp->ireg, esp->sreg,
+                         esp->command_block[0],
+                         esp->command_block[1]);
+
+       if (esp->command_block[0] < SIMPLE_QUEUE_TAG ||
+           esp->command_block[0] > ORDERED_QUEUE_TAG) {
+               printk(KERN_ERR PFX "esp%d: Reconnect, bad tag "
+                      "type %02x.\n",
+                      esp->host->unique_id, esp->command_block[0]);
+               return NULL;
+       }
+
+       ent = lp->tagged_cmds[esp->command_block[1]];
+       if (!ent) {
+               printk(KERN_ERR PFX "esp%d: Reconnect, no entry for "
+                      "tag %02x.\n",
+                      esp->host->unique_id, esp->command_block[1]);
+               return NULL;
+       }
+
+       return ent;
+}
+
+static int esp_reconnect(struct esp *esp)
+{
+       struct esp_cmd_entry *ent;
+       struct esp_target_data *tp;
+       struct esp_lun_data *lp;
+       struct scsi_device *dev;
+       int target, lun;
+
+       BUG_ON(esp->active_cmd);
+       if (esp->rev == FASHME) {
+               /* FASHME puts the target and lun numbers directly
+                * into the fifo.
+                */
+               target = esp->fifo[0];
+               lun = esp->fifo[1] & 0x7;
+       } else {
+               u8 bits = esp_read8(ESP_FDATA);
+
+               /* Older chips put the lun directly into the fifo, but
+                * the target is given as a sample of the arbitration
+                * lines on the bus at reselection time.  So we should
+                * see the ID of the ESP and the one reconnecting target
+                * set in the bitmap.
+                */
+               if (!(bits & esp->scsi_id_mask))
+                       goto do_reset;
+               bits &= ~esp->scsi_id_mask;
+               if (!bits || (bits & (bits - 1)))
+                       goto do_reset;
+
+               target = ffs(bits) - 1;
+               lun = (esp_read8(ESP_FDATA) & 0x7);
+
+               scsi_esp_cmd(esp, ESP_CMD_FLUSH);
+               if (esp->rev == ESP100) {
+                       u8 ireg = esp_read8(ESP_INTRPT);
+                       /* This chip has a bug during reselection that can
+                        * cause a spurious illegal-command interrupt, which
+                        * we simply ACK here.  Another possibility is a bus
+                        * reset so we must check for that.
+                        */
+                       if (ireg & ESP_INTR_SR)
+                               goto do_reset;
+               }
+               scsi_esp_cmd(esp, ESP_CMD_NULL);
+       }
+
+       esp_write_tgt_sync(esp, target);
+       esp_write_tgt_config3(esp, target);
+
+       scsi_esp_cmd(esp, ESP_CMD_MOK);
+
+       if (esp->rev == FASHME)
+               esp_write8(target | ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT,
+                          ESP_BUSID);
+
+       tp = &esp->target[target];
+       dev = __scsi_device_lookup_by_target(tp->starget, lun);
+       if (!dev) {
+               printk(KERN_ERR PFX "esp%d: Reconnect, no lp "
+                      "tgt[%u] lun[%u]\n",
+                      esp->host->unique_id, target, lun);
+               goto do_reset;
+       }
+       lp = dev->hostdata;
+
+       ent = lp->non_tagged_cmd;
+       if (!ent) {
+               ent = esp_reconnect_with_tag(esp, lp);
+               if (!ent)
+                       goto do_reset;
+       }
+
+       esp->active_cmd = ent;
+
+       if (ent->flags & ESP_CMD_FLAG_ABORT) {
+               esp->msg_out[0] = ABORT_TASK_SET;
+               esp->msg_out_len = 1;
+               scsi_esp_cmd(esp, ESP_CMD_SATN);
+       }
+
+       esp_event(esp, ESP_EVENT_CHECK_PHASE);
+       esp_restore_pointers(esp, ent);
+       esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
+       return 1;
+
+do_reset:
+       esp_schedule_reset(esp);
+       return 0;
+}
+
+static int esp_finish_select(struct esp *esp)
+{
+       struct esp_cmd_entry *ent;
+       struct scsi_cmnd *cmd;
+       u8 orig_select_state;
+
+       orig_select_state = esp->select_state;
+
+       /* No longer selecting.  */
+       esp->select_state = ESP_SELECT_NONE;
+
+       esp->seqreg = esp_read8(ESP_SSTEP) & ESP_STEP_VBITS;
+       ent = esp->active_cmd;
+       cmd = ent->cmd;
+
+       if (esp->ops->dma_error(esp)) {
+               /* If we see a DMA error during or as a result of selection,
+                * all bets are off.
+                */
+               esp_schedule_reset(esp);
+               esp_cmd_is_done(esp, ent, cmd, (DID_ERROR << 16));
+               return 0;
+       }
+
+       esp->ops->dma_invalidate(esp);
+
+       if (esp->ireg == (ESP_INTR_RSEL | ESP_INTR_FDONE)) {
+               struct esp_target_data *tp = &esp->target[cmd->device->id];
+
+               /* Carefully back out of the selection attempt.  Release
+                * resources (such as DMA mapping & TAG) and reset state (such
+                * as message out and command delivery variables).
+                */
+               if (!(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) {
+                       esp_unmap_dma(esp, cmd);
+                       esp_free_lun_tag(ent, cmd->device->hostdata);
+                       tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_NEGO_WIDE);
+                       esp->flags &= ~ESP_FLAG_DOING_SLOWCMD;
+                       esp->cmd_bytes_ptr = NULL;
+                       esp->cmd_bytes_left = 0;
+               } else {
+                       esp->ops->unmap_single(esp, ent->sense_dma,
+                                              SCSI_SENSE_BUFFERSIZE,
+                                              DMA_FROM_DEVICE);
+                       ent->sense_ptr = NULL;
+               }
+
+               /* Now that the state is unwound properly, put back onto
+                * the issue queue.  This command is no longer active.
+                */
+               list_del(&ent->list);
+               list_add(&ent->list, &esp->queued_cmds);
+               esp->active_cmd = NULL;
+
+               /* Return value ignored by caller, it directly invokes
+                * esp_reconnect().
+                */
+               return 0;
+       }
+
+       if (esp->ireg == ESP_INTR_DC) {
+               struct scsi_device *dev = cmd->device;
+
+               /* Disconnect.  Make sure we re-negotiate sync and
+                * wide parameters if this target starts responding
+                * again in the future.
+                */
+               esp->target[dev->id].flags |= ESP_TGT_CHECK_NEGO;
+
+               scsi_esp_cmd(esp, ESP_CMD_ESEL);
+               esp_cmd_is_done(esp, ent, cmd, (DID_BAD_TARGET << 16));
+               return 1;
+       }
+
+       if (esp->ireg == (ESP_INTR_FDONE | ESP_INTR_BSERV)) {
+               /* Selection successful.  On pre-FAST chips we have
+                * to do a NOP and possibly clean out the FIFO.
+                */
+               if (esp->rev <= ESP236) {
+                       int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
+
+                       scsi_esp_cmd(esp, ESP_CMD_NULL);
+
+                       if (!fcnt &&
+                           (!esp->prev_soff ||
+                            ((esp->sreg & ESP_STAT_PMASK) != ESP_DIP)))
+                               esp_flush_fifo(esp);
+               }
+
+               /* If we are doing a slow command, negotiation, etc.
+                * we'll do the right thing as we transition to the
+                * next phase.
+                */
+               esp_event(esp, ESP_EVENT_CHECK_PHASE);
+               return 0;
+       }
+
+       printk("ESP: Unexpected selection completion ireg[%x].\n",
+              esp->ireg);
+       esp_schedule_reset(esp);
+       return 0;
+}
+
+static int esp_data_bytes_sent(struct esp *esp, struct esp_cmd_entry *ent,
+                              struct scsi_cmnd *cmd)
+{
+       int fifo_cnt, ecount, bytes_sent, flush_fifo;
+
+       fifo_cnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
+       if (esp->prev_cfg3 & ESP_CONFIG3_EWIDE)
+               fifo_cnt <<= 1;
+
+       ecount = 0;
+       if (!(esp->sreg & ESP_STAT_TCNT)) {
+               ecount = ((unsigned int)esp_read8(ESP_TCLOW) |
+                         (((unsigned int)esp_read8(ESP_TCMED)) << 8));
+               if (esp->rev == FASHME)
+                       ecount |= ((unsigned int)esp_read8(FAS_RLO)) << 16;
+       }
+
+       bytes_sent = esp->data_dma_len;
+       bytes_sent -= ecount;
+
+       if (!(ent->flags & ESP_CMD_FLAG_WRITE))
+               bytes_sent -= fifo_cnt;
+
+       flush_fifo = 0;
+       if (!esp->prev_soff) {
+               /* Synchronous data transfer, always flush fifo. */
+               flush_fifo = 1;
+       } else {
+               if (esp->rev == ESP100) {
+                       u32 fflags, phase;
+
+                       /* ESP100 has a chip bug where in the synchronous data
+                        * phase it can mistake a final long REQ pulse from the
+                        * target as an extra data byte.  Fun.
+                        *
+                        * To detect this case we resample the status register
+                        * and fifo flags.  If we're still in a data phase and
+                        * we see spurious chunks in the fifo, we return error
+                        * to the caller which should reset and set things up
+                        * such that we only try future transfers to this
+                        * target in synchronous mode.
+                        */
+                       esp->sreg = esp_read8(ESP_STATUS);
+                       phase = esp->sreg & ESP_STAT_PMASK;
+                       fflags = esp_read8(ESP_FFLAGS);
+
+                       if ((phase == ESP_DOP &&
+                            (fflags & ESP_FF_ONOTZERO)) ||
+                           (phase == ESP_DIP &&
+                            (fflags & ESP_FF_FBYTES)))
+                               return -1;
+               }
+               if (!(ent->flags & ESP_CMD_FLAG_WRITE))
+                       flush_fifo = 1;
+       }
+
+       if (flush_fifo)
+               esp_flush_fifo(esp);
+
+       return bytes_sent;
+}
+
+static void esp_setsync(struct esp *esp, struct esp_target_data *tp,
+                       u8 scsi_period, u8 scsi_offset,
+                       u8 esp_stp, u8 esp_soff)
+{
+       spi_period(tp->starget) = scsi_period;
+       spi_offset(tp->starget) = scsi_offset;
+       spi_width(tp->starget) = (tp->flags & ESP_TGT_WIDE) ? 1 : 0;
+
+       if (esp_soff) {
+               esp_stp &= 0x1f;
+               esp_soff |= esp->radelay;
+               if (esp->rev >= FAS236) {
+                       u8 bit = ESP_CONFIG3_FSCSI;
+                       if (esp->rev >= FAS100A)
+                               bit = ESP_CONFIG3_FAST;
+
+                       if (scsi_period < 50) {
+                               if (esp->rev == FASHME)
+                                       esp_soff &= ~esp->radelay;
+                               tp->esp_config3 |= bit;
+                       } else {
+                               tp->esp_config3 &= ~bit;
+                       }
+                       esp->prev_cfg3 = tp->esp_config3;
+                       esp_write8(esp->prev_cfg3, ESP_CFG3);
+               }
+       }
+
+       tp->esp_period = esp->prev_stp = esp_stp;
+       tp->esp_offset = esp->prev_soff = esp_soff;
+
+       esp_write8(esp_soff, ESP_SOFF);
+       esp_write8(esp_stp, ESP_STP);
+
+       tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO);
+
+       spi_display_xfer_agreement(tp->starget);
+}
+
+static void esp_msgin_reject(struct esp *esp)
+{
+       struct esp_cmd_entry *ent = esp->active_cmd;
+       struct scsi_cmnd *cmd = ent->cmd;
+       struct esp_target_data *tp;
+       int tgt;
+
+       tgt = cmd->device->id;
+       tp = &esp->target[tgt];
+
+       if (tp->flags & ESP_TGT_NEGO_WIDE) {
+               tp->flags &= ~(ESP_TGT_NEGO_WIDE | ESP_TGT_WIDE);
+
+               if (!esp_need_to_nego_sync(tp)) {
+                       tp->flags &= ~ESP_TGT_CHECK_NEGO;
+                       scsi_esp_cmd(esp, ESP_CMD_RATN);
+               } else {
+                       esp->msg_out_len =
+                               spi_populate_sync_msg(&esp->msg_out[0],
+                                                     tp->nego_goal_period,
+                                                     tp->nego_goal_offset);
+                       tp->flags |= ESP_TGT_NEGO_SYNC;
+                       scsi_esp_cmd(esp, ESP_CMD_SATN);
+               }
+               return;
+       }
+
+       if (tp->flags & ESP_TGT_NEGO_SYNC) {
+               tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO);
+               tp->esp_period = 0;
+               tp->esp_offset = 0;
+               esp_setsync(esp, tp, 0, 0, 0, 0);
+               scsi_esp_cmd(esp, ESP_CMD_RATN);
+               return;
+       }
+
+       esp->msg_out[0] = ABORT_TASK_SET;
+       esp->msg_out_len = 1;
+       scsi_esp_cmd(esp, ESP_CMD_SATN);
+}
+
+static void esp_msgin_sdtr(struct esp *esp, struct esp_target_data *tp)
+{
+       u8 period = esp->msg_in[3];
+       u8 offset = esp->msg_in[4];
+       u8 stp;
+
+       if (!(tp->flags & ESP_TGT_NEGO_SYNC))
+               goto do_reject;
+
+       if (offset > 15)
+               goto do_reject;
+
+       if (offset) {
+               int rounded_up, one_clock;
+
+               if (period > esp->max_period) {
+                       period = offset = 0;
+                       goto do_sdtr;
+               }
+               if (period < esp->min_period)
+                       goto do_reject;
+
+               one_clock = esp->ccycle / 1000;
+               rounded_up = (period << 2);
+               rounded_up = (rounded_up + one_clock - 1) / one_clock;
+               stp = rounded_up;
+               if (stp && esp->rev >= FAS236) {
+                       if (stp >= 50)
+                               stp--;
+               }
+       } else {
+               stp = 0;
+       }
+
+       esp_setsync(esp, tp, period, offset, stp, offset);
+       return;
+
+do_reject:
+       esp->msg_out[0] = MESSAGE_REJECT;
+       esp->msg_out_len = 1;
+       scsi_esp_cmd(esp, ESP_CMD_SATN);
+       return;
+
+do_sdtr:
+       tp->nego_goal_period = period;
+       tp->nego_goal_offset = offset;
+       esp->msg_out_len =
+               spi_populate_sync_msg(&esp->msg_out[0],
+                                     tp->nego_goal_period,
+                                     tp->nego_goal_offset);
+       scsi_esp_cmd(esp, ESP_CMD_SATN);
+}
+
+static void esp_msgin_wdtr(struct esp *esp, struct esp_target_data *tp)
+{
+       int size = 8 << esp->msg_in[3];
+       u8 cfg3;
+
+       if (esp->rev != FASHME)
+               goto do_reject;
+
+       if (size != 8 && size != 16)
+               goto do_reject;
+
+       if (!(tp->flags & ESP_TGT_NEGO_WIDE))
+               goto do_reject;
+
+       cfg3 = tp->esp_config3;
+       if (size == 16) {
+               tp->flags |= ESP_TGT_WIDE;
+               cfg3 |= ESP_CONFIG3_EWIDE;
+       } else {
+               tp->flags &= ~ESP_TGT_WIDE;
+               cfg3 &= ~ESP_CONFIG3_EWIDE;
+       }
+       tp->esp_config3 = cfg3;
+       esp->prev_cfg3 = cfg3;
+       esp_write8(cfg3, ESP_CFG3);
+
+       tp->flags &= ~ESP_TGT_NEGO_WIDE;
+
+       spi_period(tp->starget) = 0;
+       spi_offset(tp->starget) = 0;
+       if (!esp_need_to_nego_sync(tp)) {
+               tp->flags &= ~ESP_TGT_CHECK_NEGO;
+               scsi_esp_cmd(esp, ESP_CMD_RATN);
+       } else {
+               esp->msg_out_len =
+                       spi_populate_sync_msg(&esp->msg_out[0],
+                                             tp->nego_goal_period,
+                                             tp->nego_goal_offset);
+               tp->flags |= ESP_TGT_NEGO_SYNC;
+               scsi_esp_cmd(esp, ESP_CMD_SATN);
+       }
+       return;
+
+do_reject:
+       esp->msg_out[0] = MESSAGE_REJECT;
+       esp->msg_out_len = 1;
+       scsi_esp_cmd(esp, ESP_CMD_SATN);
+}
+
+static void esp_msgin_extended(struct esp *esp)
+{
+       struct esp_cmd_entry *ent = esp->active_cmd;
+       struct scsi_cmnd *cmd = ent->cmd;
+       struct esp_target_data *tp;
+       int tgt = cmd->device->id;
+
+       tp = &esp->target[tgt];
+       if (esp->msg_in[2] == EXTENDED_SDTR) {
+               esp_msgin_sdtr(esp, tp);
+               return;
+       }
+       if (esp->msg_in[2] == EXTENDED_WDTR) {
+               esp_msgin_wdtr(esp, tp);
+               return;
+       }
+
+       printk("ESP: Unexpected extended msg type %x\n",
+              esp->msg_in[2]);
+
+       esp->msg_out[0] = ABORT_TASK_SET;
+       esp->msg_out_len = 1;
+       scsi_esp_cmd(esp, ESP_CMD_SATN);
+}
+
+/* Analyze msgin bytes received from target so far.  Return non-zero
+ * if there are more bytes needed to complete the message.
+ */
+static int esp_msgin_process(struct esp *esp)
+{
+       u8 msg0 = esp->msg_in[0];
+       int len = esp->msg_in_len;
+
+       if (msg0 & 0x80) {
+               /* Identify */
+               printk("ESP: Unexpected msgin identify\n");
+               return 0;
+       }
+
+       switch (msg0) {
+       case EXTENDED_MESSAGE:
+               if (len == 1)
+                       return 1;
+               if (len < esp->msg_in[1] + 2)
+                       return 1;
+               esp_msgin_extended(esp);
+               return 0;
+
+       case IGNORE_WIDE_RESIDUE: {
+               struct esp_cmd_entry *ent;
+               struct esp_cmd_priv *spriv;
+               if (len == 1)
+                       return 1;
+
+               if (esp->msg_in[1] != 1)
+                       goto do_reject;
+
+               ent = esp->active_cmd;
+               spriv = ESP_CMD_PRIV(ent->cmd);
+
+               if (spriv->cur_residue == sg_dma_len(spriv->cur_sg)) {
+                       spriv->cur_sg--;
+                       spriv->cur_residue = 1;
+               } else
+                       spriv->cur_residue++;
+               spriv->tot_residue++;
+               return 0;
+       }
+       case NOP:
+               return 0;
+       case RESTORE_POINTERS:
+               esp_restore_pointers(esp, esp->active_cmd);
+               return 0;
+       case SAVE_POINTERS:
+               esp_save_pointers(esp, esp->active_cmd);
+               return 0;
+
+       case COMMAND_COMPLETE:
+       case DISCONNECT: {
+               struct esp_cmd_entry *ent = esp->active_cmd;
+
+               ent->message = msg0;
+               esp_event(esp, ESP_EVENT_FREE_BUS);
+               esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
+               return 0;
+       }
+       case MESSAGE_REJECT:
+               esp_msgin_reject(esp);
+               return 0;
+
+       default:
+       do_reject:
+               esp->msg_out[0] = MESSAGE_REJECT;
+               esp->msg_out_len = 1;
+               scsi_esp_cmd(esp, ESP_CMD_SATN);
+               return 0;
+       }
+}
+
+static int esp_process_event(struct esp *esp)
+{
+       int write;
+
+again:
+       write = 0;
+       switch (esp->event) {
+       case ESP_EVENT_CHECK_PHASE:
+               switch (esp->sreg & ESP_STAT_PMASK) {
+               case ESP_DOP:
+                       esp_event(esp, ESP_EVENT_DATA_OUT);
+                       break;
+               case ESP_DIP:
+                       esp_event(esp, ESP_EVENT_DATA_IN);
+                       break;
+               case ESP_STATP:
+                       esp_flush_fifo(esp);
+                       scsi_esp_cmd(esp, ESP_CMD_ICCSEQ);
+                       esp_event(esp, ESP_EVENT_STATUS);
+                       esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
+                       return 1;
+
+               case ESP_MOP:
+                       esp_event(esp, ESP_EVENT_MSGOUT);
+                       break;
+
+               case ESP_MIP:
+                       esp_event(esp, ESP_EVENT_MSGIN);
+                       break;
+
+               case ESP_CMDP:
+                       esp_event(esp, ESP_EVENT_CMD_START);
+                       break;
+
+               default:
+                       printk("ESP: Unexpected phase, sreg=%02x\n",
+                              esp->sreg);
+                       esp_schedule_reset(esp);
+                       return 0;
+               }
+               goto again;
+               break;
+
+       case ESP_EVENT_DATA_IN:
+               write = 1;
+               /* fallthru */
+
+       case ESP_EVENT_DATA_OUT: {
+               struct esp_cmd_entry *ent = esp->active_cmd;
+               struct scsi_cmnd *cmd = ent->cmd;
+               dma_addr_t dma_addr = esp_cur_dma_addr(ent, cmd);
+               unsigned int dma_len = esp_cur_dma_len(ent, cmd);
+
+               if (esp->rev == ESP100)
+                       scsi_esp_cmd(esp, ESP_CMD_NULL);
+
+               if (write)
+                       ent->flags |= ESP_CMD_FLAG_WRITE;
+               else
+                       ent->flags &= ~ESP_CMD_FLAG_WRITE;
+
+               dma_len = esp_dma_length_limit(esp, dma_addr, dma_len);
+               esp->data_dma_len = dma_len;
+
+               if (!dma_len) {
+                       printk(KERN_ERR PFX "esp%d: DMA length is zero!\n",
+                              esp->host->unique_id);
+                       printk(KERN_ERR PFX "esp%d: cur adr[%08x] len[%08x]\n",
+                              esp->host->unique_id,
+                              esp_cur_dma_addr(ent, cmd),
+                              esp_cur_dma_len(ent, cmd));
+                       esp_schedule_reset(esp);
+                       return 0;
+               }
+
+               esp_log_datastart("ESP: start data addr[%08x] len[%u] "
+                                 "write(%d)\n",
+                                 dma_addr, dma_len, write);
+
+               esp->ops->send_dma_cmd(esp, dma_addr, dma_len, dma_len,
+                                      write, ESP_CMD_DMA | ESP_CMD_TI);
+               esp_event(esp, ESP_EVENT_DATA_DONE);
+               break;
+       }
+       case ESP_EVENT_DATA_DONE: {
+               struct esp_cmd_entry *ent = esp->active_cmd;
+               struct scsi_cmnd *cmd = ent->cmd;
+               int bytes_sent;
+
+               if (esp->ops->dma_error(esp)) {
+                       printk("ESP: data done, DMA error, resetting\n");
+                       esp_schedule_reset(esp);
+                       return 0;
+               }
+
+               if (ent->flags & ESP_CMD_FLAG_WRITE) {
+                       /* XXX parity errors, etc. XXX */
+
+                       esp->ops->dma_drain(esp);
+               }
+               esp->ops->dma_invalidate(esp);
+
+               if (esp->ireg != ESP_INTR_BSERV) {
+                       /* We should always see exactly a bus-service
+                        * interrupt at the end of a successful transfer.
+                        */
+                       printk("ESP: data done, not BSERV, resetting\n");
+                       esp_schedule_reset(esp);
+                       return 0;
+               }
+
+               bytes_sent = esp_data_bytes_sent(esp, ent, cmd);
+
+               esp_log_datadone("ESP: data done flgs[%x] sent[%d]\n",
+                                ent->flags, bytes_sent);
+
+               if (bytes_sent < 0) {
+                       /* XXX force sync mode for this target XXX */
+                       esp_schedule_reset(esp);
+                       return 0;
+               }
+
+               esp_advance_dma(esp, ent, cmd, bytes_sent);
+               esp_event(esp, ESP_EVENT_CHECK_PHASE);
+               goto again;
+               break;
+       }
+
+       case ESP_EVENT_STATUS: {
+               struct esp_cmd_entry *ent = esp->active_cmd;
+
+               if (esp->ireg & ESP_INTR_FDONE) {
+                       ent->status = esp_read8(ESP_FDATA);
+                       ent->message = esp_read8(ESP_FDATA);
+                       scsi_esp_cmd(esp, ESP_CMD_MOK);
+               } else if (esp->ireg == ESP_INTR_BSERV) {
+                       ent->status = esp_read8(ESP_FDATA);
+                       ent->message = 0xff;
+                       esp_event(esp, ESP_EVENT_MSGIN);
+                       return 0;
+               }
+
+               if (ent->message != COMMAND_COMPLETE) {
+                       printk("ESP: Unexpected message %x in status\n",
+                              ent->message);
+                       esp_schedule_reset(esp);
+                       return 0;
+               }
+
+               esp_event(esp, ESP_EVENT_FREE_BUS);
+               esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
+               break;
+       }
+       case ESP_EVENT_FREE_BUS: {
+               struct esp_cmd_entry *ent = esp->active_cmd;
+               struct scsi_cmnd *cmd = ent->cmd;
+
+               if (ent->message == COMMAND_COMPLETE ||
+                   ent->message == DISCONNECT)
+                       scsi_esp_cmd(esp, ESP_CMD_ESEL);
+
+               if (ent->message == COMMAND_COMPLETE) {
+                       esp_log_cmddone("ESP: Command done status[%x] "
+                                       "message[%x]\n",
+                                       ent->status, ent->message);
+                       if (ent->status == SAM_STAT_TASK_SET_FULL)
+                               esp_event_queue_full(esp, ent);
+
+                       if (ent->status == SAM_STAT_CHECK_CONDITION &&
+                           !(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) {
+                               ent->flags |= ESP_CMD_FLAG_AUTOSENSE;
+                               esp_autosense(esp, ent);
+                       } else {
+                               esp_cmd_is_done(esp, ent, cmd,
+                                               compose_result(ent->status,
+                                                              ent->message,
+                                                              DID_OK));
+                       }
+               } else if (ent->message == DISCONNECT) {
+                       esp_log_disconnect("ESP: Disconnecting tgt[%d] "
+                                          "tag[%x:%x]\n",
+                                          cmd->device->id,
+                                          ent->tag[0], ent->tag[1]);
+
+                       esp->active_cmd = NULL;
+                       esp_maybe_execute_command(esp);
+               } else {
+                       printk("ESP: Unexpected message %x in freebus\n",
+                              ent->message);
+                       esp_schedule_reset(esp);
+                       return 0;
+               }
+               if (esp->active_cmd)
+                       esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
+               break;
+       }
+       case ESP_EVENT_MSGOUT: {
+               scsi_esp_cmd(esp, ESP_CMD_FLUSH);
+
+               if (esp_debug & ESP_DEBUG_MSGOUT) {
+                       int i;
+                       printk("ESP: Sending message [ ");
+                       for (i = 0; i < esp->msg_out_len; i++)
+                               printk("%02x ", esp->msg_out[i]);
+                       printk("]\n");
+               }
+
+               if (esp->rev == FASHME) {
+                       int i;
+
+                       /* Always use the fifo.  */
+                       for (i = 0; i < esp->msg_out_len; i++) {
+                               esp_write8(esp->msg_out[i], ESP_FDATA);
+                               esp_write8(0, ESP_FDATA);
+                       }
+                       scsi_esp_cmd(esp, ESP_CMD_TI);
+               } else {
+                       if (esp->msg_out_len == 1) {
+                               esp_write8(esp->msg_out[0], ESP_FDATA);
+                               scsi_esp_cmd(esp, ESP_CMD_TI);
+                       } else {
+                               /* Use DMA. */
+                               memcpy(esp->command_block,
+                                      esp->msg_out,
+                                      esp->msg_out_len);
+
+                               esp->ops->send_dma_cmd(esp,
+                                                      esp->command_block_dma,
+                                                      esp->msg_out_len,
+                                                      esp->msg_out_len,
+                                                      0,
+                                                      ESP_CMD_DMA|ESP_CMD_TI);
+                       }
+               }
+               esp_event(esp, ESP_EVENT_MSGOUT_DONE);
+               break;
+       }
+       case ESP_EVENT_MSGOUT_DONE:
+               if (esp->rev == FASHME) {
+                       scsi_esp_cmd(esp, ESP_CMD_FLUSH);
+               } else {
+                       if (esp->msg_out_len > 1)
+                               esp->ops->dma_invalidate(esp);
+               }
+
+               if (!(esp->ireg & ESP_INTR_DC)) {
+                       if (esp->rev != FASHME)
+                               scsi_esp_cmd(esp, ESP_CMD_NULL);
+               }
+               esp_event(esp, ESP_EVENT_CHECK_PHASE);
+               goto again;
+       case ESP_EVENT_MSGIN:
+               if (esp->ireg & ESP_INTR_BSERV) {
+                       if (esp->rev == FASHME) {
+                               if (!(esp_read8(ESP_STATUS2) &
+                                     ESP_STAT2_FEMPTY))
+                                       scsi_esp_cmd(esp, ESP_CMD_FLUSH);
+                       } else {
+                               scsi_esp_cmd(esp, ESP_CMD_FLUSH);
+                               if (esp->rev == ESP100)
+                                       scsi_esp_cmd(esp, ESP_CMD_NULL);
+                       }
+                       scsi_esp_cmd(esp, ESP_CMD_TI);
+                       esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
+                       return 1;
+               }
+               if (esp->ireg & ESP_INTR_FDONE) {
+                       u8 val;
+
+                       if (esp->rev == FASHME)
+                               val = esp->fifo[0];
+                       else
+                               val = esp_read8(ESP_FDATA);
+                       esp->msg_in[esp->msg_in_len++] = val;
+
+                       esp_log_msgin("ESP: Got msgin byte %x\n", val);
+
+                       if (!esp_msgin_process(esp))
+                               esp->msg_in_len = 0;
+
+                       if (esp->rev == FASHME)
+                               scsi_esp_cmd(esp, ESP_CMD_FLUSH);
+
+                       scsi_esp_cmd(esp, ESP_CMD_MOK);
+
+                       if (esp->event != ESP_EVENT_FREE_BUS)
+                               esp_event(esp, ESP_EVENT_CHECK_PHASE);
+               } else {
+                       printk("ESP: MSGIN neither BSERV not FDON, resetting");
+                       esp_schedule_reset(esp);
+                       return 0;
+               }
+               break;
+       case ESP_EVENT_CMD_START:
+               memcpy(esp->command_block, esp->cmd_bytes_ptr,
+                      esp->cmd_bytes_left);
+               if (esp->rev == FASHME)
+                       scsi_esp_cmd(esp, ESP_CMD_FLUSH);
+               esp->ops->send_dma_cmd(esp, esp->command_block_dma,
+                                      esp->cmd_bytes_left, 16, 0,
+                                      ESP_CMD_DMA | ESP_CMD_TI);
+               esp_event(esp, ESP_EVENT_CMD_DONE);
+               esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
+               break;
+       case ESP_EVENT_CMD_DONE:
+               esp->ops->dma_invalidate(esp);
+               if (esp->ireg & ESP_INTR_BSERV) {
+                       esp_event(esp, ESP_EVENT_CHECK_PHASE);
+                       goto again;
+               }
+               esp_schedule_reset(esp);
+               return 0;
+               break;
+
+       case ESP_EVENT_RESET:
+               scsi_esp_cmd(esp, ESP_CMD_RS);
+               break;
+
+       default:
+               printk("ESP: Unexpected event %x, resetting\n",
+                      esp->event);
+               esp_schedule_reset(esp);
+               return 0;
+               break;
+       }
+       return 1;
+}
+
+static void esp_reset_cleanup_one(struct esp *esp, struct esp_cmd_entry *ent)
+{
+       struct scsi_cmnd *cmd = ent->cmd;
+
+       esp_unmap_dma(esp, cmd);
+       esp_free_lun_tag(ent, cmd->device->hostdata);
+       cmd->result = DID_RESET << 16;
+
+       if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
+               esp->ops->unmap_single(esp, ent->sense_dma,
+                                      SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
+               ent->sense_ptr = NULL;
+       }
+
+       cmd->scsi_done(cmd);
+       list_del(&ent->list);
+       esp_put_ent(esp, ent);
+}
+
+static void esp_clear_hold(struct scsi_device *dev, void *data)
+{
+       struct esp_lun_data *lp = dev->hostdata;
+
+       BUG_ON(lp->num_tagged);
+       lp->hold = 0;
+}
+
+static void esp_reset_cleanup(struct esp *esp)
+{
+       struct esp_cmd_entry *ent, *tmp;
+       int i;
+
+       list_for_each_entry_safe(ent, tmp, &esp->queued_cmds, list) {
+               struct scsi_cmnd *cmd = ent->cmd;
+
+               list_del(&ent->list);
+               cmd->result = DID_RESET << 16;
+               cmd->scsi_done(cmd);
+               esp_put_ent(esp, ent);
+       }
+
+       list_for_each_entry_safe(ent, tmp, &esp->active_cmds, list) {
+               if (ent == esp->active_cmd)
+                       esp->active_cmd = NULL;
+               esp_reset_cleanup_one(esp, ent);
+       }
+
+       BUG_ON(esp->active_cmd != NULL);
+
+       /* Force renegotiation of sync/wide transfers.  */
+       for (i = 0; i < ESP_MAX_TARGET; i++) {
+               struct esp_target_data *tp = &esp->target[i];
+
+               tp->esp_period = 0;
+               tp->esp_offset = 0;
+               tp->esp_config3 &= ~(ESP_CONFIG3_EWIDE |
+                                    ESP_CONFIG3_FSCSI |
+                                    ESP_CONFIG3_FAST);
+               tp->flags &= ~ESP_TGT_WIDE;
+               tp->flags |= ESP_TGT_CHECK_NEGO;
+
+               if (tp->starget)
+                       starget_for_each_device(tp->starget, NULL,
+                                               esp_clear_hold);
+       }
+}
+
+/* Runs under host->lock */
+static void __esp_interrupt(struct esp *esp)
+{
+       int finish_reset, intr_done;
+       u8 phase;
+
+       esp->sreg = esp_read8(ESP_STATUS);
+
+       if (esp->flags & ESP_FLAG_RESETTING) {
+               finish_reset = 1;
+       } else {
+               if (esp_check_gross_error(esp))
+                       return;
+
+               finish_reset = esp_check_spur_intr(esp);
+               if (finish_reset < 0)
+                       return;
+       }
+
+       esp->ireg = esp_read8(ESP_INTRPT);
+
+       if (esp->ireg & ESP_INTR_SR)
+               finish_reset = 1;
+
+       if (finish_reset) {
+               esp_reset_cleanup(esp);
+               if (esp->eh_reset) {
+                       complete(esp->eh_reset);
+                       esp->eh_reset = NULL;
+               }
+               return;
+       }
+
+       phase = (esp->sreg & ESP_STAT_PMASK);
+       if (esp->rev == FASHME) {
+               if (((phase != ESP_DIP && phase != ESP_DOP) &&
+                    esp->select_state == ESP_SELECT_NONE &&
+                    esp->event != ESP_EVENT_STATUS &&
+                    esp->event != ESP_EVENT_DATA_DONE) ||
+                   (esp->ireg & ESP_INTR_RSEL)) {
+                       esp->sreg2 = esp_read8(ESP_STATUS2);
+                       if (!(esp->sreg2 & ESP_STAT2_FEMPTY) ||
+                           (esp->sreg2 & ESP_STAT2_F1BYTE))
+                               hme_read_fifo(esp);
+               }
+       }
+
+       esp_log_intr("ESP: intr sreg[%02x] seqreg[%02x] "
+                    "sreg2[%02x] ireg[%02x]\n",
+                    esp->sreg, esp->seqreg, esp->sreg2, esp->ireg);
+
+       intr_done = 0;
+
+       if (esp->ireg & (ESP_INTR_S | ESP_INTR_SATN | ESP_INTR_IC)) {
+               printk("ESP: unexpected IREG %02x\n", esp->ireg);
+               if (esp->ireg & ESP_INTR_IC)
+                       esp_dump_cmd_log(esp);
+
+               esp_schedule_reset(esp);
+       } else {
+               if (!(esp->ireg & ESP_INTR_RSEL)) {
+                       /* Some combination of FDONE, BSERV, DC.  */
+                       if (esp->select_state != ESP_SELECT_NONE)
+                               intr_done = esp_finish_select(esp);
+               } else if (esp->ireg & ESP_INTR_RSEL) {
+                       if (esp->active_cmd)
+                               (void) esp_finish_select(esp);
+                       intr_done = esp_reconnect(esp);
+               }
+       }
+       while (!intr_done)
+               intr_done = esp_process_event(esp);
+}
+
+irqreturn_t scsi_esp_intr(int irq, void *dev_id)
+{
+       struct esp *esp = dev_id;
+       unsigned long flags;
+       irqreturn_t ret;
+
+       spin_lock_irqsave(esp->host->host_lock, flags);
+       ret = IRQ_NONE;
+       if (esp->ops->irq_pending(esp)) {
+               ret = IRQ_HANDLED;
+               for (;;) {
+                       int i;
+
+                       __esp_interrupt(esp);
+                       if (!(esp->flags & ESP_FLAG_QUICKIRQ_CHECK))
+                               break;
+                       esp->flags &= ~ESP_FLAG_QUICKIRQ_CHECK;
+
+                       for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) {
+                               if (esp->ops->irq_pending(esp))
+                                       break;
+                       }
+                       if (i == ESP_QUICKIRQ_LIMIT)
+                               break;
+               }
+       }
+       spin_unlock_irqrestore(esp->host->host_lock, flags);
+
+       return ret;
+}
+EXPORT_SYMBOL(scsi_esp_intr);
+
+static void __devinit esp_get_revision(struct esp *esp)
+{
+       u8 val;
+
+       esp->config1 = (ESP_CONFIG1_PENABLE | (esp->scsi_id & 7));
+       esp->config2 = (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY);
+       esp_write8(esp->config2, ESP_CFG2);
+
+       val = esp_read8(ESP_CFG2);
+       val &= ~ESP_CONFIG2_MAGIC;
+       if (val != (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY)) {
+               /* If what we write to cfg2 does not come back, cfg2 is not
+                * implemented, therefore this must be a plain esp100.
+                */
+               esp->rev = ESP100;
+       } else {
+               esp->config2 = 0;
+               esp_set_all_config3(esp, 5);
+               esp->prev_cfg3 = 5;
+               esp_write8(esp->config2, ESP_CFG2);
+               esp_write8(0, ESP_CFG3);
+               esp_write8(esp->prev_cfg3, ESP_CFG3);
+
+               val = esp_read8(ESP_CFG3);
+               if (val != 5) {
+                       /* The cfg2 register is implemented, however
+                        * cfg3 is not, must be esp100a.
+                        */
+                       esp->rev = ESP100A;
+               } else {
+                       esp_set_all_config3(esp, 0);
+                       esp->prev_cfg3 = 0;
+                       esp_write8(esp->prev_cfg3, ESP_CFG3);
+
+                       /* All of cfg{1,2,3} implemented, must be one of
+                        * the fas variants, figure out which one.
+                        */
+                       if (esp->cfact == 0 || esp->cfact > ESP_CCF_F5) {
+                               esp->rev = FAST;
+                               esp->sync_defp = SYNC_DEFP_FAST;
+                       } else {
+                               esp->rev = ESP236;
+                       }
+                       esp->config2 = 0;
+                       esp_write8(esp->config2, ESP_CFG2);
+               }
+       }
+}
+
+static void __devinit esp_init_swstate(struct esp *esp)
+{
+       int i;
+
+       INIT_LIST_HEAD(&esp->queued_cmds);
+       INIT_LIST_HEAD(&esp->active_cmds);
+       INIT_LIST_HEAD(&esp->esp_cmd_pool);
+
+       /* Start with a clear state, domain validation (via ->slave_configure,
+        * spi_dv_device()) will attempt to enable SYNC, WIDE, and tagged
+        * commands.
+        */
+       for (i = 0 ; i < ESP_MAX_TARGET; i++) {
+               esp->target[i].flags = 0;
+               esp->target[i].nego_goal_period = 0;
+               esp->target[i].nego_goal_offset = 0;
+               esp->target[i].nego_goal_width = 0;
+               esp->target[i].nego_goal_tags = 0;
+       }
+}
+
+/* This places the ESP into a known state at boot time. */
+static void __devinit esp_bootup_reset(struct esp *esp)
+{
+       u8 val;
+
+       /* Reset the DMA */
+       esp->ops->reset_dma(esp);
+
+       /* Reset the ESP */
+       esp_reset_esp(esp);
+
+       /* Reset the SCSI bus, but tell ESP not to generate an irq */
+       val = esp_read8(ESP_CFG1);
+       val |= ESP_CONFIG1_SRRDISAB;
+       esp_write8(val, ESP_CFG1);
+
+       scsi_esp_cmd(esp, ESP_CMD_RS);
+       udelay(400);
+
+       esp_write8(esp->config1, ESP_CFG1);
+
+       /* Eat any bitrot in the chip and we are done... */
+       esp_read8(ESP_INTRPT);
+}
+
+static void __devinit esp_set_clock_params(struct esp *esp)
+{
+       int fmhz;
+       u8 ccf;
+
+       /* This is getting messy but it has to be done correctly or else
+        * you get weird behavior all over the place.  We are trying to
+        * basically figure out three pieces of information.
+        *
+        * a) Clock Conversion Factor
+        *
+        *    This is a representation of the input crystal clock frequency
+        *    going into the ESP on this machine.  Any operation whose timing
+        *    is longer than 400ns depends on this value being correct.  For
+        *    example, you'll get blips for arbitration/selection during high
+        *    load or with multiple targets if this is not set correctly.
+        *
+        * b) Selection Time-Out
+        *
+        *    The ESP isn't very bright and will arbitrate for the bus and try
+        *    to select a target forever if you let it.  This value tells the
+        *    ESP when it has taken too long to negotiate and that it should
+        *    interrupt the CPU so we can see what happened.  The value is
+        *    computed as follows (from NCR/Symbios chip docs).
+        *
+        *          (Time Out Period) *  (Input Clock)
+        *    STO = ----------------------------------
+        *          (8192) * (Clock Conversion Factor)
+        *
+        *    We use a time out period of 250ms (ESP_BUS_TIMEOUT).
+        *
+        * c) Imperical constants for synchronous offset and transfer period
+         *    register values
+        *
+        *    This entails the smallest and largest sync period we could ever
+        *    handle on this ESP.
+        */
+       fmhz = esp->cfreq;
+
+       ccf = ((fmhz / 1000000) + 4) / 5;
+       if (ccf == 1)
+               ccf = 2;
+
+       /* If we can't find anything reasonable, just assume 20MHZ.
+        * This is the clock frequency of the older sun4c's where I've
+        * been unable to find the clock-frequency PROM property.  All
+        * other machines provide useful values it seems.
+        */
+       if (fmhz <= 5000000 || ccf < 1 || ccf > 8) {
+               fmhz = 20000000;
+               ccf = 4;
+       }
+
+       esp->cfact = (ccf == 8 ? 0 : ccf);
+       esp->cfreq = fmhz;
+       esp->ccycle = ESP_MHZ_TO_CYCLE(fmhz);
+       esp->ctick = ESP_TICK(ccf, esp->ccycle);
+       esp->neg_defp = ESP_NEG_DEFP(fmhz, ccf);
+       esp->sync_defp = SYNC_DEFP_SLOW;
+}
+
+static const char *esp_chip_names[] = {
+       "ESP100",
+       "ESP100A",
+       "ESP236",
+       "FAS236",
+       "FAS100A",
+       "FAST",
+       "FASHME",
+};
+
+static struct scsi_transport_template *esp_transport_template;
+
+int __devinit scsi_esp_register(struct esp *esp, struct device *dev)
+{
+       static int instance;
+       int err;
+
+       esp->host->transportt = esp_transport_template;
+       esp->host->max_lun = ESP_MAX_LUN;
+       esp->host->cmd_per_lun = 2;
+
+       esp_set_clock_params(esp);
+
+       esp_get_revision(esp);
+
+       esp_init_swstate(esp);
+
+       esp_bootup_reset(esp);
+
+       printk(KERN_INFO PFX "esp%u, regs[%1p:%1p] irq[%u]\n",
+              esp->host->unique_id, esp->regs, esp->dma_regs,
+              esp->host->irq);
+       printk(KERN_INFO PFX "esp%u is a %s, %u MHz (ccf=%u), SCSI ID %u\n",
+              esp->host->unique_id, esp_chip_names[esp->rev],
+              esp->cfreq / 1000000, esp->cfact, esp->scsi_id);
+
+       /* Let the SCSI bus reset settle. */
+       ssleep(esp_bus_reset_settle);
+
+       err = scsi_add_host(esp->host, dev);
+       if (err)
+               return err;
+
+       esp->host->unique_id = instance++;
+
+       scsi_scan_host(esp->host);
+
+       return 0;
+}
+EXPORT_SYMBOL(scsi_esp_register);
+
+void __devexit scsi_esp_unregister(struct esp *esp)
+{
+       scsi_remove_host(esp->host);
+}
+EXPORT_SYMBOL(scsi_esp_unregister);
+
+static int esp_slave_alloc(struct scsi_device *dev)
+{
+       struct esp *esp = host_to_esp(dev->host);
+       struct esp_target_data *tp = &esp->target[dev->id];
+       struct esp_lun_data *lp;
+
+       lp = kzalloc(sizeof(*lp), GFP_KERNEL);
+       if (!lp)
+               return -ENOMEM;
+       dev->hostdata = lp;
+
+       tp->starget = dev->sdev_target;
+
+       spi_min_period(tp->starget) = esp->min_period;
+       spi_max_offset(tp->starget) = 15;
+
+       if (esp->flags & ESP_FLAG_WIDE_CAPABLE)
+               spi_max_width(tp->starget) = 1;
+       else
+               spi_max_width(tp->starget) = 0;
+
+       return 0;
+}
+
+static int esp_slave_configure(struct scsi_device *dev)
+{
+       struct esp *esp = host_to_esp(dev->host);
+       struct esp_target_data *tp = &esp->target[dev->id];
+       int goal_tags, queue_depth;
+
+       goal_tags = 0;
+
+       if (dev->tagged_supported) {
+               /* XXX make this configurable somehow XXX */
+               goal_tags = ESP_DEFAULT_TAGS;
+
+               if (goal_tags > ESP_MAX_TAG)
+                       goal_tags = ESP_MAX_TAG;
+       }
+
+       queue_depth = goal_tags;
+       if (queue_depth < dev->host->cmd_per_lun)
+               queue_depth = dev->host->cmd_per_lun;
+
+       if (goal_tags) {
+               scsi_set_tag_type(dev, MSG_ORDERED_TAG);
+               scsi_activate_tcq(dev, queue_depth);
+       } else {
+               scsi_deactivate_tcq(dev, queue_depth);
+       }
+       tp->flags |= ESP_TGT_DISCONNECT;
+
+       if (!spi_initial_dv(dev->sdev_target))
+               spi_dv_device(dev);
+
+       return 0;
+}
+
+static void esp_slave_destroy(struct scsi_device *dev)
+{
+       struct esp_lun_data *lp = dev->hostdata;
+
+       kfree(lp);
+       dev->hostdata = NULL;
+}
+
+static int esp_eh_abort_handler(struct scsi_cmnd *cmd)
+{
+       struct esp *esp = host_to_esp(cmd->device->host);
+       struct esp_cmd_entry *ent, *tmp;
+       struct completion eh_done;
+       unsigned long flags;
+
+       /* XXX This helps a lot with debugging but might be a bit
+        * XXX much for the final driver.
+        */
+       spin_lock_irqsave(esp->host->host_lock, flags);
+       printk(KERN_ERR PFX "esp%d: Aborting command [%p:%02x]\n",
+              esp->host->unique_id, cmd, cmd->cmnd[0]);
+       ent = esp->active_cmd;
+       if (ent)
+               printk(KERN_ERR PFX "esp%d: Current command [%p:%02x]\n",
+                      esp->host->unique_id, ent->cmd, ent->cmd->cmnd[0]);
+       list_for_each_entry(ent, &esp->queued_cmds, list) {
+               printk(KERN_ERR PFX "esp%d: Queued command [%p:%02x]\n",
+                      esp->host->unique_id, ent->cmd, ent->cmd->cmnd[0]);
+       }
+       list_for_each_entry(ent, &esp->active_cmds, list) {
+               printk(KERN_ERR PFX "esp%d: Active command [%p:%02x]\n",
+                      esp->host->unique_id, ent->cmd, ent->cmd->cmnd[0]);
+       }
+       esp_dump_cmd_log(esp);
+       spin_unlock_irqrestore(esp->host->host_lock, flags);
+
+       spin_lock_irqsave(esp->host->host_lock, flags);
+
+       ent = NULL;
+       list_for_each_entry(tmp, &esp->queued_cmds, list) {
+               if (tmp->cmd == cmd) {
+                       ent = tmp;
+                       break;
+               }
+       }
+
+       if (ent) {
+               /* Easiest case, we didn't even issue the command
+                * yet so it is trivial to abort.
+                */
+               list_del(&ent->list);
+
+               cmd->result = DID_ABORT << 16;
+               cmd->scsi_done(cmd);
+
+               esp_put_ent(esp, ent);
+
+               goto out_success;
+       }
+
+       init_completion(&eh_done);
+
+       ent = esp->active_cmd;
+       if (ent && ent->cmd == cmd) {
+               /* Command is the currently active command on
+                * the bus.  If we already have an output message
+                * pending, no dice.
+                */
+               if (esp->msg_out_len)
+                       goto out_failure;
+
+               /* Send out an abort, encouraging the target to
+                * go to MSGOUT phase by asserting ATN.
+                */
+               esp->msg_out[0] = ABORT_TASK_SET;
+               esp->msg_out_len = 1;
+               ent->eh_done = &eh_done;
+
+               scsi_esp_cmd(esp, ESP_CMD_SATN);
+       } else {
+               /* The command is disconnected.  This is not easy to
+                * abort.  For now we fail and let the scsi error
+                * handling layer go try a scsi bus reset or host
+                * reset.
+                *
+                * What we could do is put together a scsi command
+                * solely for the purpose of sending an abort message
+                * to the target.  Coming up with all the code to
+                * cook up scsi commands, special case them everywhere,
+                * etc. is for questionable gain and it would be better
+                * if the generic scsi error handling layer could do at
+                * least some of that for us.
+                *
+                * Anyways this is an area for potential future improvement
+                * in this driver.
+                */
+               goto out_failure;
+       }
+
+       spin_unlock_irqrestore(esp->host->host_lock, flags);
+
+       if (!wait_for_completion_timeout(&eh_done, 5 * HZ)) {
+               spin_lock_irqsave(esp->host->host_lock, flags);
+               ent->eh_done = NULL;
+               spin_unlock_irqrestore(esp->host->host_lock, flags);
+
+               return FAILED;
+       }
+
+       return SUCCESS;
+
+out_success:
+       spin_unlock_irqrestore(esp->host->host_lock, flags);
+       return SUCCESS;
+
+out_failure:
+       /* XXX This might be a good location to set ESP_TGT_BROKEN
+        * XXX since we know which target/lun in particular is
+        * XXX causing trouble.
+        */
+       spin_unlock_irqrestore(esp->host->host_lock, flags);
+       return FAILED;
+}
+
+static int esp_eh_bus_reset_handler(struct scsi_cmnd *cmd)
+{
+       struct esp *esp = host_to_esp(cmd->device->host);
+       struct completion eh_reset;
+       unsigned long flags;
+
+       init_completion(&eh_reset);
+
+       spin_lock_irqsave(esp->host->host_lock, flags);
+
+       esp->eh_reset = &eh_reset;
+
+       /* XXX This is too simple... We should add lots of
+        * XXX checks here so that if we find that the chip is
+        * XXX very wedged we return failure immediately so
+        * XXX that we can perform a full chip reset.
+        */
+       esp->flags |= ESP_FLAG_RESETTING;
+       scsi_esp_cmd(esp, ESP_CMD_RS);
+
+       spin_unlock_irqrestore(esp->host->host_lock, flags);
+
+       ssleep(esp_bus_reset_settle);
+
+       if (!wait_for_completion_timeout(&eh_reset, 5 * HZ)) {
+               spin_lock_irqsave(esp->host->host_lock, flags);
+               esp->eh_reset = NULL;
+               spin_unlock_irqrestore(esp->host->host_lock, flags);
+
+               return FAILED;
+       }
+
+       return SUCCESS;
+}
+
+/* All bets are off, reset the entire device.  */
+static int esp_eh_host_reset_handler(struct scsi_cmnd *cmd)
+{
+       struct esp *esp = host_to_esp(cmd->device->host);
+       unsigned long flags;
+
+       spin_lock_irqsave(esp->host->host_lock, flags);
+       esp_bootup_reset(esp);
+       esp_reset_cleanup(esp);
+       spin_unlock_irqrestore(esp->host->host_lock, flags);
+
+       ssleep(esp_bus_reset_settle);
+
+       return SUCCESS;
+}
+
+static const char *esp_info(struct Scsi_Host *host)
+{
+       return "esp";
+}
+
+struct scsi_host_template scsi_esp_template = {
+       .module                 = THIS_MODULE,
+       .name                   = "esp",
+       .info                   = esp_info,
+       .queuecommand           = esp_queuecommand,
+       .slave_alloc            = esp_slave_alloc,
+       .slave_configure        = esp_slave_configure,
+       .slave_destroy          = esp_slave_destroy,
+       .eh_abort_handler       = esp_eh_abort_handler,
+       .eh_bus_reset_handler   = esp_eh_bus_reset_handler,
+       .eh_host_reset_handler  = esp_eh_host_reset_handler,
+       .can_queue              = 7,
+       .this_id                = 7,
+       .sg_tablesize           = SG_ALL,
+       .use_clustering         = ENABLE_CLUSTERING,
+       .max_sectors            = 0xffff,
+       .skip_settle_delay      = 1,
+};
+EXPORT_SYMBOL(scsi_esp_template);
+
+static void esp_get_signalling(struct Scsi_Host *host)
+{
+       struct esp *esp = host_to_esp(host);
+       enum spi_signal_type type;
+
+       if (esp->flags & ESP_FLAG_DIFFERENTIAL)
+               type = SPI_SIGNAL_HVD;
+       else
+               type = SPI_SIGNAL_SE;
+
+       spi_signalling(host) = type;
+}
+
+static void esp_set_offset(struct scsi_target *target, int offset)
+{
+       struct Scsi_Host *host = dev_to_shost(target->dev.parent);
+       struct esp *esp = host_to_esp(host);
+       struct esp_target_data *tp = &esp->target[target->id];
+
+       tp->nego_goal_offset = offset;
+       tp->flags |= ESP_TGT_CHECK_NEGO;
+}
+
+static void esp_set_period(struct scsi_target *target, int period)
+{
+       struct Scsi_Host *host = dev_to_shost(target->dev.parent);
+       struct esp *esp = host_to_esp(host);
+       struct esp_target_data *tp = &esp->target[target->id];
+
+       tp->nego_goal_period = period;
+       tp->flags |= ESP_TGT_CHECK_NEGO;
+}
+
+static void esp_set_width(struct scsi_target *target, int width)
+{
+       struct Scsi_Host *host = dev_to_shost(target->dev.parent);
+       struct esp *esp = host_to_esp(host);
+       struct esp_target_data *tp = &esp->target[target->id];
+
+       tp->nego_goal_width = (width ? 1 : 0);
+       tp->flags |= ESP_TGT_CHECK_NEGO;
+}
+
+static struct spi_function_template esp_transport_ops = {
+       .set_offset             = esp_set_offset,
+       .show_offset            = 1,
+       .set_period             = esp_set_period,
+       .show_period            = 1,
+       .set_width              = esp_set_width,
+       .show_width             = 1,
+       .get_signalling         = esp_get_signalling,
+};
+
+static int __init esp_init(void)
+{
+       BUILD_BUG_ON(sizeof(struct scsi_pointer) <
+                    sizeof(struct esp_cmd_priv));
+
+       esp_transport_template = spi_attach_transport(&esp_transport_ops);
+       if (!esp_transport_template)
+               return -ENODEV;
+
+       return 0;
+}
+
+static void __exit esp_exit(void)
+{
+       spi_release_transport(esp_transport_template);
+}
+
+MODULE_DESCRIPTION("ESP SCSI driver core");
+MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+module_param(esp_bus_reset_settle, int, 0);
+MODULE_PARM_DESC(esp_bus_reset_settle,
+                "ESP scsi bus reset delay in seconds");
+
+module_param(esp_debug, int, 0);
+MODULE_PARM_DESC(esp_debug,
+"ESP bitmapped debugging message enable value:\n"
+"      0x00000001      Log interrupt events\n"
+"      0x00000002      Log scsi commands\n"
+"      0x00000004      Log resets\n"
+"      0x00000008      Log message in events\n"
+"      0x00000010      Log message out events\n"
+"      0x00000020      Log command completion\n"
+"      0x00000040      Log disconnects\n"
+"      0x00000080      Log data start\n"
+"      0x00000100      Log data done\n"
+"      0x00000200      Log reconnects\n"
+"      0x00000400      Log auto-sense data\n"
+);
+
+module_init(esp_init);
+module_exit(esp_exit);
diff --git a/drivers/scsi/esp_scsi.h b/drivers/scsi/esp_scsi.h
new file mode 100644 (file)
index 0000000..8d4a669
--- /dev/null
@@ -0,0 +1,560 @@
+/* esp_scsi.h: Defines and structures for the ESP drier.
+ *
+ * Copyright (C) 2007 David S. Miller (davem@davemloft.net)
+ */
+
+#ifndef _ESP_SCSI_H
+#define _ESP_SCSI_H
+
+                                       /* Access    Description      Offset */
+#define ESP_TCLOW      0x00UL          /* rw  Low bits transfer count 0x00  */
+#define ESP_TCMED      0x01UL          /* rw  Mid bits transfer count 0x04  */
+#define ESP_FDATA      0x02UL          /* rw  FIFO data bits          0x08  */
+#define ESP_CMD                0x03UL          /* rw  SCSI command bits       0x0c  */
+#define ESP_STATUS     0x04UL          /* ro  ESP status register     0x10  */
+#define ESP_BUSID      ESP_STATUS      /* wo  BusID for sel/resel     0x10  */
+#define ESP_INTRPT     0x05UL          /* ro  Kind of interrupt       0x14  */
+#define ESP_TIMEO      ESP_INTRPT      /* wo  Timeout for sel/resel   0x14  */
+#define ESP_SSTEP      0x06UL          /* ro  Sequence step register  0x18  */
+#define ESP_STP                ESP_SSTEP       /* wo  Transfer period/sync    0x18  */
+#define ESP_FFLAGS     0x07UL          /* ro  Bits current FIFO info  0x1c  */
+#define ESP_SOFF       ESP_FFLAGS      /* wo  Sync offset             0x1c  */
+#define ESP_CFG1       0x08UL          /* rw  First cfg register      0x20  */
+#define ESP_CFACT      0x09UL          /* wo  Clock conv factor       0x24  */
+#define ESP_STATUS2    ESP_CFACT       /* ro  HME status2 register    0x24  */
+#define ESP_CTEST      0x0aUL          /* wo  Chip test register      0x28  */
+#define ESP_CFG2       0x0bUL          /* rw  Second cfg register     0x2c  */
+#define ESP_CFG3       0x0cUL          /* rw  Third cfg register      0x30  */
+#define ESP_TCHI       0x0eUL          /* rw  High bits transf count  0x38  */
+#define ESP_UID                ESP_TCHI        /* ro  Unique ID code          0x38  */
+#define FAS_RLO                ESP_TCHI        /* rw  HME extended counter    0x38  */
+#define ESP_FGRND      0x0fUL          /* rw  Data base for fifo      0x3c  */
+#define FAS_RHI                ESP_FGRND       /* rw  HME extended counter    0x3c  */
+
+#define SBUS_ESP_REG_SIZE      0x40UL
+
+/* Bitfield meanings for the above registers. */
+
+/* ESP config reg 1, read-write, found on all ESP chips */
+#define ESP_CONFIG1_ID        0x07      /* My BUS ID bits */
+#define ESP_CONFIG1_CHTEST    0x08      /* Enable ESP chip tests */
+#define ESP_CONFIG1_PENABLE   0x10      /* Enable parity checks */
+#define ESP_CONFIG1_PARTEST   0x20      /* Parity test mode enabled? */
+#define ESP_CONFIG1_SRRDISAB  0x40      /* Disable SCSI reset reports */
+#define ESP_CONFIG1_SLCABLE   0x80      /* Enable slow cable mode */
+
+/* ESP config reg 2, read-write, found only on esp100a+esp200+esp236 chips */
+#define ESP_CONFIG2_DMAPARITY 0x01      /* enable DMA Parity (200,236) */
+#define ESP_CONFIG2_REGPARITY 0x02      /* enable reg Parity (200,236) */
+#define ESP_CONFIG2_BADPARITY 0x04      /* Bad parity target abort  */
+#define ESP_CONFIG2_SCSI2ENAB 0x08      /* Enable SCSI-2 features (tgtmode) */
+#define ESP_CONFIG2_HI        0x10      /* High Impedance DREQ ???  */
+#define ESP_CONFIG2_HMEFENAB  0x10      /* HME features enable */
+#define ESP_CONFIG2_BCM       0x20      /* Enable byte-ctrl (236)   */
+#define ESP_CONFIG2_DISPINT   0x20      /* Disable pause irq (hme) */
+#define ESP_CONFIG2_FENAB     0x40      /* Enable features (fas100,216) */
+#define ESP_CONFIG2_SPL       0x40      /* Enable status-phase latch (236) */
+#define ESP_CONFIG2_MKDONE    0x40      /* HME magic feature */
+#define ESP_CONFIG2_HME32     0x80      /* HME 32 extended */
+#define ESP_CONFIG2_MAGIC     0xe0      /* Invalid bits... */
+
+/* ESP config register 3 read-write, found only esp236+fas236+fas100a+hme chips */
+#define ESP_CONFIG3_FCLOCK    0x01     /* FAST SCSI clock rate (esp100a/hme) */
+#define ESP_CONFIG3_TEM       0x01     /* Enable thresh-8 mode (esp/fas236)  */
+#define ESP_CONFIG3_FAST      0x02     /* Enable FAST SCSI     (esp100a/hme) */
+#define ESP_CONFIG3_ADMA      0x02     /* Enable alternate-dma (esp/fas236)  */
+#define ESP_CONFIG3_TENB      0x04     /* group2 SCSI2 support (esp100a/hme) */
+#define ESP_CONFIG3_SRB       0x04     /* Save residual byte   (esp/fas236)  */
+#define ESP_CONFIG3_TMS       0x08     /* Three-byte msg's ok  (esp100a/hme) */
+#define ESP_CONFIG3_FCLK      0x08     /* Fast SCSI clock rate (esp/fas236)  */
+#define ESP_CONFIG3_IDMSG     0x10     /* ID message checking  (esp100a/hme) */
+#define ESP_CONFIG3_FSCSI     0x10     /* Enable FAST SCSI     (esp/fas236)  */
+#define ESP_CONFIG3_GTM       0x20     /* group2 SCSI2 support (esp/fas236)  */
+#define ESP_CONFIG3_IDBIT3    0x20     /* Bit 3 of HME SCSI-ID (hme)         */
+#define ESP_CONFIG3_TBMS      0x40     /* Three-byte msg's ok  (esp/fas236)  */
+#define ESP_CONFIG3_EWIDE     0x40     /* Enable Wide-SCSI     (hme)         */
+#define ESP_CONFIG3_IMS       0x80     /* ID msg chk'ng        (esp/fas236)  */
+#define ESP_CONFIG3_OBPUSH    0x80     /* Push odd-byte to dma (hme)         */
+
+/* ESP command register read-write */
+/* Group 1 commands:  These may be sent at any point in time to the ESP
+ *                    chip.  None of them can generate interrupts 'cept
+ *                    the "SCSI bus reset" command if you have not disabled
+ *                    SCSI reset interrupts in the config1 ESP register.
+ */
+#define ESP_CMD_NULL          0x00     /* Null command, ie. a nop */
+#define ESP_CMD_FLUSH         0x01     /* FIFO Flush */
+#define ESP_CMD_RC            0x02     /* Chip reset */
+#define ESP_CMD_RS            0x03     /* SCSI bus reset */
+
+/* Group 2 commands:  ESP must be an initiator and connected to a target
+ *                    for these commands to work.
+ */
+#define ESP_CMD_TI            0x10     /* Transfer Information */
+#define ESP_CMD_ICCSEQ        0x11     /* Initiator cmd complete sequence */
+#define ESP_CMD_MOK           0x12     /* Message okie-dokie */
+#define ESP_CMD_TPAD          0x18     /* Transfer Pad */
+#define ESP_CMD_SATN          0x1a     /* Set ATN */
+#define ESP_CMD_RATN          0x1b     /* De-assert ATN */
+
+/* Group 3 commands:  ESP must be in the MSGOUT or MSGIN state and be connected
+ *                    to a target as the initiator for these commands to work.
+ */
+#define ESP_CMD_SMSG          0x20     /* Send message */
+#define ESP_CMD_SSTAT         0x21     /* Send status */
+#define ESP_CMD_SDATA         0x22     /* Send data */
+#define ESP_CMD_DSEQ          0x23     /* Discontinue Sequence */
+#define ESP_CMD_TSEQ          0x24     /* Terminate Sequence */
+#define ESP_CMD_TCCSEQ        0x25     /* Target cmd cmplt sequence */
+#define ESP_CMD_DCNCT         0x27     /* Disconnect */
+#define ESP_CMD_RMSG          0x28     /* Receive Message */
+#define ESP_CMD_RCMD          0x29     /* Receive Command */
+#define ESP_CMD_RDATA         0x2a     /* Receive Data */
+#define ESP_CMD_RCSEQ         0x2b     /* Receive cmd sequence */
+
+/* Group 4 commands:  The ESP must be in the disconnected state and must
+ *                    not be connected to any targets as initiator for
+ *                    these commands to work.
+ */
+#define ESP_CMD_RSEL          0x40     /* Reselect */
+#define ESP_CMD_SEL           0x41     /* Select w/o ATN */
+#define ESP_CMD_SELA          0x42     /* Select w/ATN */
+#define ESP_CMD_SELAS         0x43     /* Select w/ATN & STOP */
+#define ESP_CMD_ESEL          0x44     /* Enable selection */
+#define ESP_CMD_DSEL          0x45     /* Disable selections */
+#define ESP_CMD_SA3           0x46     /* Select w/ATN3 */
+#define ESP_CMD_RSEL3         0x47     /* Reselect3 */
+
+/* This bit enables the ESP's DMA on the SBus */
+#define ESP_CMD_DMA           0x80     /* Do DMA? */
+
+/* ESP status register read-only */
+#define ESP_STAT_PIO          0x01     /* IO phase bit */
+#define ESP_STAT_PCD          0x02     /* CD phase bit */
+#define ESP_STAT_PMSG         0x04     /* MSG phase bit */
+#define ESP_STAT_PMASK        0x07     /* Mask of phase bits */
+#define ESP_STAT_TDONE        0x08     /* Transfer Completed */
+#define ESP_STAT_TCNT         0x10     /* Transfer Counter Is Zero */
+#define ESP_STAT_PERR         0x20     /* Parity error */
+#define ESP_STAT_SPAM         0x40     /* Real bad error */
+/* This indicates the 'interrupt pending' condition on esp236, it is a reserved
+ * bit on other revs of the ESP.
+ */
+#define ESP_STAT_INTR         0x80             /* Interrupt */
+
+/* The status register can be masked with ESP_STAT_PMASK and compared
+ * with the following values to determine the current phase the ESP
+ * (at least thinks it) is in.  For our purposes we also add our own
+ * software 'done' bit for our phase management engine.
+ */
+#define ESP_DOP   (0)                                       /* Data Out  */
+#define ESP_DIP   (ESP_STAT_PIO)                            /* Data In   */
+#define ESP_CMDP  (ESP_STAT_PCD)                            /* Command   */
+#define ESP_STATP (ESP_STAT_PCD|ESP_STAT_PIO)               /* Status    */
+#define ESP_MOP   (ESP_STAT_PMSG|ESP_STAT_PCD)              /* Message Out */
+#define ESP_MIP   (ESP_STAT_PMSG|ESP_STAT_PCD|ESP_STAT_PIO) /* Message In */
+
+/* HME only: status 2 register */
+#define ESP_STAT2_SCHBIT      0x01 /* Upper bits 3-7 of sstep enabled */
+#define ESP_STAT2_FFLAGS      0x02 /* The fifo flags are now latched */
+#define ESP_STAT2_XCNT        0x04 /* The transfer counter is latched */
+#define ESP_STAT2_CREGA       0x08 /* The command reg is active now */
+#define ESP_STAT2_WIDE        0x10 /* Interface on this adapter is wide */
+#define ESP_STAT2_F1BYTE      0x20 /* There is one byte at top of fifo */
+#define ESP_STAT2_FMSB        0x40 /* Next byte in fifo is most significant */
+#define ESP_STAT2_FEMPTY      0x80 /* FIFO is empty */
+
+/* ESP interrupt register read-only */
+#define ESP_INTR_S            0x01     /* Select w/o ATN */
+#define ESP_INTR_SATN         0x02     /* Select w/ATN */
+#define ESP_INTR_RSEL         0x04     /* Reselected */
+#define ESP_INTR_FDONE        0x08     /* Function done */
+#define ESP_INTR_BSERV        0x10     /* Bus service */
+#define ESP_INTR_DC           0x20     /* Disconnect */
+#define ESP_INTR_IC           0x40     /* Illegal command given */
+#define ESP_INTR_SR           0x80     /* SCSI bus reset detected */
+
+/* ESP sequence step register read-only */
+#define ESP_STEP_VBITS        0x07     /* Valid bits */
+#define ESP_STEP_ASEL         0x00     /* Selection&Arbitrate cmplt */
+#define ESP_STEP_SID          0x01     /* One msg byte sent */
+#define ESP_STEP_NCMD         0x02     /* Was not in command phase */
+#define ESP_STEP_PPC          0x03     /* Early phase chg caused cmnd
+                                        * bytes to be lost
+                                        */
+#define ESP_STEP_FINI4        0x04     /* Command was sent ok */
+
+/* Ho hum, some ESP's set the step register to this as well... */
+#define ESP_STEP_FINI5        0x05
+#define ESP_STEP_FINI6        0x06
+#define ESP_STEP_FINI7        0x07
+
+/* ESP chip-test register read-write */
+#define ESP_TEST_TARG         0x01     /* Target test mode */
+#define ESP_TEST_INI          0x02     /* Initiator test mode */
+#define ESP_TEST_TS           0x04     /* Tristate test mode */
+
+/* ESP unique ID register read-only, found on fas236+fas100a only */
+#define ESP_UID_F100A         0x00     /* ESP FAS100A  */
+#define ESP_UID_F236          0x02     /* ESP FAS236   */
+#define ESP_UID_REV           0x07     /* ESP revision */
+#define ESP_UID_FAM           0xf8     /* ESP family   */
+
+/* ESP fifo flags register read-only */
+/* Note that the following implies a 16 byte FIFO on the ESP. */
+#define ESP_FF_FBYTES         0x1f     /* Num bytes in FIFO */
+#define ESP_FF_ONOTZERO       0x20     /* offset ctr not zero (esp100) */
+#define ESP_FF_SSTEP          0xe0     /* Sequence step */
+
+/* ESP clock conversion factor register write-only */
+#define ESP_CCF_F0            0x00     /* 35.01MHz - 40MHz */
+#define ESP_CCF_NEVER         0x01     /* Set it to this and die */
+#define ESP_CCF_F2            0x02     /* 10MHz */
+#define ESP_CCF_F3            0x03     /* 10.01MHz - 15MHz */
+#define ESP_CCF_F4            0x04     /* 15.01MHz - 20MHz */
+#define ESP_CCF_F5            0x05     /* 20.01MHz - 25MHz */
+#define ESP_CCF_F6            0x06     /* 25.01MHz - 30MHz */
+#define ESP_CCF_F7            0x07     /* 30.01MHz - 35MHz */
+
+/* HME only... */
+#define ESP_BUSID_RESELID     0x10
+#define ESP_BUSID_CTR32BIT    0x40
+
+#define ESP_BUS_TIMEOUT        250     /* In milli-seconds */
+#define ESP_TIMEO_CONST       8192
+#define ESP_NEG_DEFP(mhz, cfact) \
+        ((ESP_BUS_TIMEOUT * ((mhz) / 1000)) / (8192 * (cfact)))
+#define ESP_MHZ_TO_CYCLE(mhertz)  ((1000000000) / ((mhertz) / 1000))
+#define ESP_TICK(ccf, cycle)  ((7682 * (ccf) * (cycle) / 1000))
+
+/* For slow to medium speed input clock rates we shoot for 5mb/s, but for high
+ * input clock rates we try to do 10mb/s although I don't think a transfer can
+ * even run that fast with an ESP even with DMA2 scatter gather pipelining.
+ */
+#define SYNC_DEFP_SLOW            0x32   /* 5mb/s  */
+#define SYNC_DEFP_FAST            0x19   /* 10mb/s */
+
+struct esp_cmd_priv {
+       union {
+               dma_addr_t      dma_addr;
+               int             num_sg;
+       } u;
+
+       unsigned int            cur_residue;
+       struct scatterlist      *cur_sg;
+       unsigned int            tot_residue;
+};
+#define ESP_CMD_PRIV(CMD)      ((struct esp_cmd_priv *)(&(CMD)->SCp))
+
+enum esp_rev {
+       ESP100     = 0x00,  /* NCR53C90 - very broken */
+       ESP100A    = 0x01,  /* NCR53C90A */
+       ESP236     = 0x02,
+       FAS236     = 0x03,
+       FAS100A    = 0x04,
+       FAST       = 0x05,
+       FASHME     = 0x06,
+};
+
+struct esp_cmd_entry {
+       struct list_head        list;
+
+       struct scsi_cmnd        *cmd;
+
+       unsigned int            saved_cur_residue;
+       struct scatterlist      *saved_cur_sg;
+       unsigned int            saved_tot_residue;
+
+       u8                      flags;
+#define ESP_CMD_FLAG_WRITE     0x01 /* DMA is a write */
+#define ESP_CMD_FLAG_ABORT     0x02 /* being aborted */
+#define ESP_CMD_FLAG_AUTOSENSE 0x04 /* Doing automatic REQUEST_SENSE */
+
+       u8                      tag[2];
+
+       u8                      status;
+       u8                      message;
+
+       unsigned char           *sense_ptr;
+       unsigned char           *saved_sense_ptr;
+       dma_addr_t              sense_dma;
+
+       struct completion       *eh_done;
+};
+
+/* XXX make this configurable somehow XXX */
+#define ESP_DEFAULT_TAGS       16
+
+#define ESP_MAX_TARGET         16
+#define ESP_MAX_LUN            8
+#define ESP_MAX_TAG            256
+
+struct esp_lun_data {
+       struct esp_cmd_entry    *non_tagged_cmd;
+       int                     num_tagged;
+       int                     hold;
+       struct esp_cmd_entry    *tagged_cmds[ESP_MAX_TAG];
+};
+
+struct esp_target_data {
+       /* These are the ESP_STP, ESP_SOFF, and ESP_CFG3 register values which
+        * match the currently negotiated settings for this target.  The SCSI
+        * protocol values are maintained in spi_{offset,period,wide}(starget).
+        */
+       u8                      esp_period;
+       u8                      esp_offset;
+       u8                      esp_config3;
+
+       u8                      flags;
+#define ESP_TGT_WIDE           0x01
+#define ESP_TGT_DISCONNECT     0x02
+#define ESP_TGT_NEGO_WIDE      0x04
+#define ESP_TGT_NEGO_SYNC      0x08
+#define ESP_TGT_CHECK_NEGO     0x40
+#define ESP_TGT_BROKEN         0x80
+
+       /* When ESP_TGT_CHECK_NEGO is set, on the next scsi command to this
+        * device we will try to negotiate the following parameters.
+        */
+       u8                      nego_goal_period;
+       u8                      nego_goal_offset;
+       u8                      nego_goal_width;
+       u8                      nego_goal_tags;
+
+       struct scsi_target      *starget;
+};
+
+struct esp_event_ent {
+       u8                      type;
+#define ESP_EVENT_TYPE_EVENT   0x01
+#define ESP_EVENT_TYPE_CMD     0x02
+       u8                      val;
+
+       u8                      sreg;
+       u8                      seqreg;
+       u8                      sreg2;
+       u8                      ireg;
+       u8                      select_state;
+       u8                      event;
+       u8                      __pad;
+};
+
+struct esp;
+struct esp_driver_ops {
+       /* Read and write the ESP 8-bit registers.  On some
+        * applications of the ESP chip the registers are at 4-byte
+        * instead of 1-byte intervals.
+        */
+       void (*esp_write8)(struct esp *esp, u8 val, unsigned long reg);
+       u8 (*esp_read8)(struct esp *esp, unsigned long reg);
+
+       /* Map and unmap DMA memory.  Eventually the driver will be
+        * converted to the generic DMA API as soon as SBUS is able to
+        * cope with that.  At such time we can remove this.
+        */
+       dma_addr_t (*map_single)(struct esp *esp, void *buf,
+                                size_t sz, int dir);
+       int (*map_sg)(struct esp *esp, struct scatterlist *sg,
+                     int num_sg, int dir);
+       void (*unmap_single)(struct esp *esp, dma_addr_t addr,
+                            size_t sz, int dir);
+       void (*unmap_sg)(struct esp *esp, struct scatterlist *sg,
+                        int num_sg, int dir);
+
+       /* Return non-zero if there is an IRQ pending.  Usually this
+        * status bit lives in the DMA controller sitting in front of
+        * the ESP.  This has to be accurate or else the ESP interrupt
+        * handler will not run.
+        */
+       int (*irq_pending)(struct esp *esp);
+
+       /* Reset the DMA engine entirely.  On return, ESP interrupts
+        * should be enabled.  Often the interrupt enabling is
+        * controlled in the DMA engine.
+        */
+       void (*reset_dma)(struct esp *esp);
+
+       /* Drain any pending DMA in the DMA engine after a transfer.
+        * This is for writes to memory.
+        */
+       void (*dma_drain)(struct esp *esp);
+
+       /* Invalidate the DMA engine after a DMA transfer.  */
+       void (*dma_invalidate)(struct esp *esp);
+
+       /* Setup an ESP command that will use a DMA transfer.
+        * The 'esp_count' specifies what transfer length should be
+        * programmed into the ESP transfer counter registers, whereas
+        * the 'dma_count' is the length that should be programmed into
+        * the DMA controller.  Usually they are the same.  If 'write'
+        * is non-zero, this transfer is a write into memory.  'cmd'
+        * holds the ESP command that should be issued by calling
+        * scsi_esp_cmd() at the appropriate time while programming
+        * the DMA hardware.
+        */
+       void (*send_dma_cmd)(struct esp *esp, u32 dma_addr, u32 esp_count,
+                            u32 dma_count, int write, u8 cmd);
+
+       /* Return non-zero if the DMA engine is reporting an error
+        * currently.
+        */
+       int (*dma_error)(struct esp *esp);
+};
+
+#define ESP_MAX_MSG_SZ         8
+#define ESP_EVENT_LOG_SZ       32
+
+#define ESP_QUICKIRQ_LIMIT     100
+#define ESP_RESELECT_TAG_LIMIT 2500
+
+struct esp {
+       void __iomem            *regs;
+       void __iomem            *dma_regs;
+
+       const struct esp_driver_ops *ops;
+
+       struct Scsi_Host        *host;
+       void                    *dev;
+
+       struct esp_cmd_entry    *active_cmd;
+
+       struct list_head        queued_cmds;
+       struct list_head        active_cmds;
+
+       u8                      *command_block;
+       dma_addr_t              command_block_dma;
+
+       unsigned int            data_dma_len;
+
+       /* The following are used to determine the cause of an IRQ. Upon every
+        * IRQ entry we synchronize these with the hardware registers.
+        */
+       u8                      sreg;
+       u8                      seqreg;
+       u8                      sreg2;
+       u8                      ireg;
+
+       u32                     prev_hme_dmacsr;
+       u8                      prev_soff;
+       u8                      prev_stp;
+       u8                      prev_cfg3;
+       u8                      __pad;
+
+       struct list_head        esp_cmd_pool;
+
+       struct esp_target_data  target[ESP_MAX_TARGET];
+
+       int                     fifo_cnt;
+       u8                      fifo[16];
+
+       struct esp_event_ent    esp_event_log[ESP_EVENT_LOG_SZ];
+       int                     esp_event_cur;
+
+       u8                      msg_out[ESP_MAX_MSG_SZ];
+       int                     msg_out_len;
+
+       u8                      msg_in[ESP_MAX_MSG_SZ];
+       int                     msg_in_len;
+
+       u8                      bursts;
+       u8                      config1;
+       u8                      config2;
+
+       u8                      scsi_id;
+       u32                     scsi_id_mask;
+
+       enum esp_rev            rev;
+
+       u32                     flags;
+#define ESP_FLAG_DIFFERENTIAL  0x00000001
+#define ESP_FLAG_RESETTING     0x00000002
+#define ESP_FLAG_DOING_SLOWCMD 0x00000004
+#define ESP_FLAG_WIDE_CAPABLE  0x00000008
+#define ESP_FLAG_QUICKIRQ_CHECK        0x00000010
+
+       u8                      select_state;
+#define ESP_SELECT_NONE                0x00 /* Not selecting */
+#define ESP_SELECT_BASIC       0x01 /* Select w/o MSGOUT phase */
+#define ESP_SELECT_MSGOUT      0x02 /* Select with MSGOUT */
+
+       /* When we are not selecting, we are expecting an event.  */
+       u8                      event;
+#define ESP_EVENT_NONE         0x00
+#define ESP_EVENT_CMD_START    0x01
+#define ESP_EVENT_CMD_DONE     0x02
+#define ESP_EVENT_DATA_IN      0x03
+#define ESP_EVENT_DATA_OUT     0x04
+#define ESP_EVENT_DATA_DONE    0x05
+#define ESP_EVENT_MSGIN                0x06
+#define ESP_EVENT_MSGIN_MORE   0x07
+#define ESP_EVENT_MSGIN_DONE   0x08
+#define ESP_EVENT_MSGOUT       0x09
+#define ESP_EVENT_MSGOUT_DONE  0x0a
+#define ESP_EVENT_STATUS       0x0b
+#define ESP_EVENT_FREE_BUS     0x0c
+#define ESP_EVENT_CHECK_PHASE  0x0d
+#define ESP_EVENT_RESET                0x10
+
+       /* Probed in esp_get_clock_params() */
+       u32                     cfact;
+       u32                     cfreq;
+       u32                     ccycle;
+       u32                     ctick;
+       u32                     neg_defp;
+       u32                     sync_defp;
+
+       /* Computed in esp_reset_esp() */
+       u32                     max_period;
+       u32                     min_period;
+       u32                     radelay;
+
+       /* Slow command state.  */
+       u8                      *cmd_bytes_ptr;
+       int                     cmd_bytes_left;
+
+       struct completion       *eh_reset;
+
+       struct sbus_dma         *dma;
+};
+
+#define host_to_esp(host)      ((struct esp *)(host)->hostdata)
+
+/* A front-end driver for the ESP chip should do the following in
+ * it's device probe routine:
+ * 1) Allocate the host and private area using scsi_host_alloc()
+ *    with size 'sizeof(struct esp)'.  The first argument to
+ *    scsi_host_alloc() should be &scsi_esp_template.
+ * 2) Set host->max_id as appropriate.
+ * 3) Set esp->host to the scsi_host itself, and esp->dev
+ *    to the device object pointer.
+ * 4) Hook up esp->ops to the front-end implementation.
+ * 5) If the ESP chip supports wide transfers, set ESP_FLAG_WIDE_CAPABLE
+ *    in esp->flags.
+ * 6) Map the DMA and ESP chip registers.
+ * 7) DMA map the ESP command block, store the DMA address
+ *    in esp->command_block_dma.
+ * 8) Register the scsi_esp_intr() interrupt handler.
+ * 9) Probe for and provide the following chip properties:
+ *    esp->scsi_id (assign to esp->host->this_id too)
+ *    esp->scsi_id_mask
+ *    If ESP bus is differential, set ESP_FLAG_DIFFERENTIAL
+ *    esp->cfreq
+ *    DMA burst bit mask in esp->bursts, if necessary
+ * 10) Perform any actions necessary before the ESP device can
+ *     be programmed for the first time.  On some configs, for
+ *     example, the DMA engine has to be reset before ESP can
+ *     be programmed.
+ * 11) If necessary, call dev_set_drvdata() as needed.
+ * 12) Call scsi_esp_register() with prepared 'esp' structure
+ *     and a device pointer if possible.
+ * 13) Check scsi_esp_register() return value, release all resources
+ *     if an error was returned.
+ */
+extern struct scsi_host_template scsi_esp_template;
+extern int scsi_esp_register(struct esp *, struct device *);
+
+extern void scsi_esp_unregister(struct esp *);
+extern irqreturn_t scsi_esp_intr(int, void *);
+extern void scsi_esp_cmd(struct esp *, u8);
+
+#endif /* !(_ESP_SCSI_H) */
index 9f10689..c4195ea 100644 (file)
@@ -1403,7 +1403,7 @@ static int __devinit qpti_sbus_probe(struct of_device *dev, const struct of_devi
        struct scsi_host_template *tpnt = match->data;
        struct Scsi_Host *host;
        struct qlogicpti *qpti;
-       char *fcode;
+       const char *fcode;
 
        /* Sometimes Antares cards come up not completely
         * setup, and we get a report of a zero IRQ.
index 1b59b27..4bf9aa5 100644 (file)
@@ -50,7 +50,7 @@ scsi_nl_rcv_msg(struct sk_buff *skb)
        while (skb->len >= NLMSG_SPACE(0)) {
                err = 0;
 
-               nlh = (struct nlmsghdr *) skb->data;
+               nlh = nlmsg_hdr(skb);
                if ((nlh->nlmsg_len < (sizeof(*nlh) + sizeof(*hdr))) ||
                    (skb->len < nlh->nlmsg_len)) {
                        printk(KERN_WARNING "%s: discarding partial skb\n",
@@ -168,7 +168,8 @@ scsi_netlink_init(void)
        }
 
        scsi_nl_sock = netlink_kernel_create(NETLINK_SCSITRANSPORT,
-                               SCSI_NL_GRP_CNT, scsi_nl_rcv, THIS_MODULE);
+                               SCSI_NL_GRP_CNT, scsi_nl_rcv, NULL,
+                               THIS_MODULE);
        if (!scsi_nl_sock) {
                printk(KERN_ERR "%s: register of recieve handler failed\n",
                                __FUNCTION__);
index ce0d14a..aabaa05 100644 (file)
@@ -1081,7 +1081,7 @@ iscsi_if_rx(struct sock *sk, int len)
                        struct nlmsghdr *nlh;
                        struct iscsi_uevent *ev;
 
-                       nlh = (struct nlmsghdr *)skb->data;
+                       nlh = nlmsg_hdr(skb);
                        if (nlh->nlmsg_len < sizeof(*nlh) ||
                            skb->len < nlh->nlmsg_len) {
                                break;
@@ -1435,7 +1435,7 @@ static __init int iscsi_transport_init(void)
        if (err)
                goto unregister_conn_class;
 
-       nls = netlink_kernel_create(NETLINK_ISCSI, 1, iscsi_if_rx,
+       nls = netlink_kernel_create(NETLINK_ISCSI, 1, iscsi_if_rx, NULL,
                        THIS_MODULE);
        if (!nls) {
                err = -ENOBUFS;
diff --git a/drivers/scsi/sun_esp.c b/drivers/scsi/sun_esp.c
new file mode 100644 (file)
index 0000000..8c766bc
--- /dev/null
@@ -0,0 +1,634 @@
+/* sun_esp.c: ESP front-end for Sparc SBUS systems.
+ *
+ * Copyright (C) 2007 David S. Miller (davem@davemloft.net)
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include <asm/irq.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+
+#include <asm/sbus.h>
+
+#include <scsi/scsi_host.h>
+
+#include "esp_scsi.h"
+
+#define DRV_MODULE_NAME                "sun_esp"
+#define PFX DRV_MODULE_NAME    ": "
+#define DRV_VERSION            "1.000"
+#define DRV_MODULE_RELDATE     "April 19, 2007"
+
+#define dma_read32(REG) \
+       sbus_readl(esp->dma_regs + (REG))
+#define dma_write32(VAL, REG) \
+       sbus_writel((VAL), esp->dma_regs + (REG))
+
+static int __devinit esp_sbus_find_dma(struct esp *esp, struct sbus_dev *dma_sdev)
+{
+       struct sbus_dev *sdev = esp->dev;
+       struct sbus_dma *dma;
+
+       if (dma_sdev != NULL) {
+               for_each_dvma(dma) {
+                       if (dma->sdev == dma_sdev)
+                               break;
+               }
+       } else {
+               for_each_dvma(dma) {
+                       if (dma->sdev == NULL)
+                               break;
+
+                       /* If bus + slot are the same and it has the
+                        * correct OBP name, it's ours.
+                        */
+                       if (sdev->bus == dma->sdev->bus &&
+                           sdev->slot == dma->sdev->slot &&
+                           (!strcmp(dma->sdev->prom_name, "dma") ||
+                            !strcmp(dma->sdev->prom_name, "espdma")))
+                               break;
+               }
+       }
+
+       if (dma == NULL) {
+               printk(KERN_ERR PFX "[%s] Cannot find dma.\n",
+                      sdev->ofdev.node->full_name);
+               return -ENODEV;
+       }
+       esp->dma = dma;
+       esp->dma_regs = dma->regs;
+
+       return 0;
+
+}
+
+static int __devinit esp_sbus_map_regs(struct esp *esp, int hme)
+{
+       struct sbus_dev *sdev = esp->dev;
+       struct resource *res;
+
+       /* On HME, two reg sets exist, first is DVMA,
+        * second is ESP registers.
+        */
+       if (hme)
+               res = &sdev->resource[1];
+       else
+               res = &sdev->resource[0];
+
+       esp->regs = sbus_ioremap(res, 0, SBUS_ESP_REG_SIZE, "ESP");
+       if (!esp->regs)
+               return -ENOMEM;
+
+       return 0;
+}
+
+static int __devinit esp_sbus_map_command_block(struct esp *esp)
+{
+       struct sbus_dev *sdev = esp->dev;
+
+       esp->command_block = sbus_alloc_consistent(sdev, 16,
+                                                  &esp->command_block_dma);
+       if (!esp->command_block)
+               return -ENOMEM;
+       return 0;
+}
+
+static int __devinit esp_sbus_register_irq(struct esp *esp)
+{
+       struct Scsi_Host *host = esp->host;
+       struct sbus_dev *sdev = esp->dev;
+
+       host->irq = sdev->irqs[0];
+       return request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, "ESP", esp);
+}
+
+static void __devinit esp_get_scsi_id(struct esp *esp)
+{
+       struct sbus_dev *sdev = esp->dev;
+       struct device_node *dp = sdev->ofdev.node;
+
+       esp->scsi_id = of_getintprop_default(dp, "initiator-id", 0xff);
+       if (esp->scsi_id != 0xff)
+               goto done;
+
+       esp->scsi_id = of_getintprop_default(dp, "scsi-initiator-id", 0xff);
+       if (esp->scsi_id != 0xff)
+               goto done;
+
+       if (!sdev->bus) {
+               /* SUN4 */
+               esp->scsi_id = 7;
+               goto done;
+       }
+
+       esp->scsi_id = of_getintprop_default(sdev->bus->ofdev.node,
+                                            "scsi-initiator-id", 7);
+
+done:
+       esp->host->this_id = esp->scsi_id;
+       esp->scsi_id_mask = (1 << esp->scsi_id);
+}
+
+static void __devinit esp_get_differential(struct esp *esp)
+{
+       struct sbus_dev *sdev = esp->dev;
+       struct device_node *dp = sdev->ofdev.node;
+
+       if (of_find_property(dp, "differential", NULL))
+               esp->flags |= ESP_FLAG_DIFFERENTIAL;
+       else
+               esp->flags &= ~ESP_FLAG_DIFFERENTIAL;
+}
+
+static void __devinit esp_get_clock_params(struct esp *esp)
+{
+       struct sbus_dev *sdev = esp->dev;
+       struct device_node *dp = sdev->ofdev.node;
+       struct device_node *bus_dp;
+       int fmhz;
+
+       bus_dp = NULL;
+       if (sdev != NULL && sdev->bus != NULL)
+               bus_dp = sdev->bus->ofdev.node;
+
+       fmhz = of_getintprop_default(dp, "clock-frequency", 0);
+       if (fmhz == 0)
+               fmhz = (!bus_dp) ? 0 :
+                       of_getintprop_default(bus_dp, "clock-frequency", 0);
+
+       esp->cfreq = fmhz;
+}
+
+static void __devinit esp_get_bursts(struct esp *esp, struct sbus_dev *dma)
+{
+       struct sbus_dev *sdev = esp->dev;
+       struct device_node *dp = sdev->ofdev.node;
+       u8 bursts;
+
+       bursts = of_getintprop_default(dp, "burst-sizes", 0xff);
+       if (dma) {
+               struct device_node *dma_dp = dma->ofdev.node;
+               u8 val = of_getintprop_default(dma_dp, "burst-sizes", 0xff);
+               if (val != 0xff)
+                       bursts &= val;
+       }
+
+       if (sdev->bus) {
+               u8 val = of_getintprop_default(sdev->bus->ofdev.node,
+                                              "burst-sizes", 0xff);
+               if (val != 0xff)
+                       bursts &= val;
+       }
+
+       if (bursts == 0xff ||
+           (bursts & DMA_BURST16) == 0 ||
+           (bursts & DMA_BURST32) == 0)
+               bursts = (DMA_BURST32 - 1);
+
+       esp->bursts = bursts;
+}
+
+static void __devinit esp_sbus_get_props(struct esp *esp, struct sbus_dev *espdma)
+{
+       esp_get_scsi_id(esp);
+       esp_get_differential(esp);
+       esp_get_clock_params(esp);
+       esp_get_bursts(esp, espdma);
+}
+
+static void sbus_esp_write8(struct esp *esp, u8 val, unsigned long reg)
+{
+       sbus_writeb(val, esp->regs + (reg * 4UL));
+}
+
+static u8 sbus_esp_read8(struct esp *esp, unsigned long reg)
+{
+       return sbus_readb(esp->regs + (reg * 4UL));
+}
+
+static dma_addr_t sbus_esp_map_single(struct esp *esp, void *buf,
+                                     size_t sz, int dir)
+{
+       return sbus_map_single(esp->dev, buf, sz, dir);
+}
+
+static int sbus_esp_map_sg(struct esp *esp, struct scatterlist *sg,
+                                 int num_sg, int dir)
+{
+       return sbus_map_sg(esp->dev, sg, num_sg, dir);
+}
+
+static void sbus_esp_unmap_single(struct esp *esp, dma_addr_t addr,
+                                 size_t sz, int dir)
+{
+       sbus_unmap_single(esp->dev, addr, sz, dir);
+}
+
+static void sbus_esp_unmap_sg(struct esp *esp, struct scatterlist *sg,
+                             int num_sg, int dir)
+{
+       sbus_unmap_sg(esp->dev, sg, num_sg, dir);
+}
+
+static int sbus_esp_irq_pending(struct esp *esp)
+{
+       if (dma_read32(DMA_CSR) & (DMA_HNDL_INTR | DMA_HNDL_ERROR))
+               return 1;
+       return 0;
+}
+
+static void sbus_esp_reset_dma(struct esp *esp)
+{
+       int can_do_burst16, can_do_burst32, can_do_burst64;
+       int can_do_sbus64, lim;
+       u32 val;
+
+       can_do_burst16 = (esp->bursts & DMA_BURST16) != 0;
+       can_do_burst32 = (esp->bursts & DMA_BURST32) != 0;
+       can_do_burst64 = 0;
+       can_do_sbus64 = 0;
+       if (sbus_can_dma_64bit(esp->dev))
+               can_do_sbus64 = 1;
+       if (sbus_can_burst64(esp->sdev))
+               can_do_burst64 = (esp->bursts & DMA_BURST64) != 0;
+
+       /* Put the DVMA into a known state. */
+       if (esp->dma->revision != dvmahme) {
+               val = dma_read32(DMA_CSR);
+               dma_write32(val | DMA_RST_SCSI, DMA_CSR);
+               dma_write32(val & ~DMA_RST_SCSI, DMA_CSR);
+       }
+       switch (esp->dma->revision) {
+       case dvmahme:
+               dma_write32(DMA_RESET_FAS366, DMA_CSR);
+               dma_write32(DMA_RST_SCSI, DMA_CSR);
+
+               esp->prev_hme_dmacsr = (DMA_PARITY_OFF | DMA_2CLKS |
+                                       DMA_SCSI_DISAB | DMA_INT_ENAB);
+
+               esp->prev_hme_dmacsr &= ~(DMA_ENABLE | DMA_ST_WRITE |
+                                         DMA_BRST_SZ);
+
+               if (can_do_burst64)
+                       esp->prev_hme_dmacsr |= DMA_BRST64;
+               else if (can_do_burst32)
+                       esp->prev_hme_dmacsr |= DMA_BRST32;
+
+               if (can_do_sbus64) {
+                       esp->prev_hme_dmacsr |= DMA_SCSI_SBUS64;
+                       sbus_set_sbus64(esp->dev, esp->bursts);
+               }
+
+               lim = 1000;
+               while (dma_read32(DMA_CSR) & DMA_PEND_READ) {
+                       if (--lim == 0) {
+                               printk(KERN_ALERT PFX "esp%d: DMA_PEND_READ "
+                                      "will not clear!\n",
+                                      esp->host->unique_id);
+                               break;
+                       }
+                       udelay(1);
+               }
+
+               dma_write32(0, DMA_CSR);
+               dma_write32(esp->prev_hme_dmacsr, DMA_CSR);
+
+               dma_write32(0, DMA_ADDR);
+               break;
+
+       case dvmarev2:
+               if (esp->rev != ESP100) {
+                       val = dma_read32(DMA_CSR);
+                       dma_write32(val | DMA_3CLKS, DMA_CSR);
+               }
+               break;
+
+       case dvmarev3:
+               val = dma_read32(DMA_CSR);
+               val &= ~DMA_3CLKS;
+               val |= DMA_2CLKS;
+               if (can_do_burst32) {
+                       val &= ~DMA_BRST_SZ;
+                       val |= DMA_BRST32;
+               }
+               dma_write32(val, DMA_CSR);
+               break;
+
+       case dvmaesc1:
+               val = dma_read32(DMA_CSR);
+               val |= DMA_ADD_ENABLE;
+               val &= ~DMA_BCNT_ENAB;
+               if (!can_do_burst32 && can_do_burst16) {
+                       val |= DMA_ESC_BURST;
+               } else {
+                       val &= ~(DMA_ESC_BURST);
+               }
+               dma_write32(val, DMA_CSR);
+               break;
+
+       default:
+               break;
+       }
+
+       /* Enable interrupts.  */
+       val = dma_read32(DMA_CSR);
+       dma_write32(val | DMA_INT_ENAB, DMA_CSR);
+}
+
+static void sbus_esp_dma_drain(struct esp *esp)
+{
+       u32 csr;
+       int lim;
+
+       if (esp->dma->revision == dvmahme)
+               return;
+
+       csr = dma_read32(DMA_CSR);
+       if (!(csr & DMA_FIFO_ISDRAIN))
+               return;
+
+       if (esp->dma->revision != dvmarev3 && esp->dma->revision != dvmaesc1)
+               dma_write32(csr | DMA_FIFO_STDRAIN, DMA_CSR);
+
+       lim = 1000;
+       while (dma_read32(DMA_CSR) & DMA_FIFO_ISDRAIN) {
+               if (--lim == 0) {
+                       printk(KERN_ALERT PFX "esp%d: DMA will not drain!\n",
+                              esp->host->unique_id);
+                       break;
+               }
+               udelay(1);
+       }
+}
+
+static void sbus_esp_dma_invalidate(struct esp *esp)
+{
+       if (esp->dma->revision == dvmahme) {
+               dma_write32(DMA_RST_SCSI, DMA_CSR);
+
+               esp->prev_hme_dmacsr = ((esp->prev_hme_dmacsr |
+                                        (DMA_PARITY_OFF | DMA_2CLKS |
+                                         DMA_SCSI_DISAB | DMA_INT_ENAB)) &
+                                       ~(DMA_ST_WRITE | DMA_ENABLE));
+
+               dma_write32(0, DMA_CSR);
+               dma_write32(esp->prev_hme_dmacsr, DMA_CSR);
+
+               /* This is necessary to avoid having the SCSI channel
+                * engine lock up on us.
+                */
+               dma_write32(0, DMA_ADDR);
+       } else {
+               u32 val;
+               int lim;
+
+               lim = 1000;
+               while ((val = dma_read32(DMA_CSR)) & DMA_PEND_READ) {
+                       if (--lim == 0) {
+                               printk(KERN_ALERT PFX "esp%d: DMA will not "
+                                      "invalidate!\n", esp->host->unique_id);
+                               break;
+                       }
+                       udelay(1);
+               }
+
+               val &= ~(DMA_ENABLE | DMA_ST_WRITE | DMA_BCNT_ENAB);
+               val |= DMA_FIFO_INV;
+               dma_write32(val, DMA_CSR);
+               val &= ~DMA_FIFO_INV;
+               dma_write32(val, DMA_CSR);
+       }
+}
+
+static void sbus_esp_send_dma_cmd(struct esp *esp, u32 addr, u32 esp_count,
+                                 u32 dma_count, int write, u8 cmd)
+{
+       u32 csr;
+
+       BUG_ON(!(cmd & ESP_CMD_DMA));
+
+       sbus_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
+       sbus_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
+       if (esp->rev == FASHME) {
+               sbus_esp_write8(esp, (esp_count >> 16) & 0xff, FAS_RLO);
+               sbus_esp_write8(esp, 0, FAS_RHI);
+
+               scsi_esp_cmd(esp, cmd);
+
+               csr = esp->prev_hme_dmacsr;
+               csr |= DMA_SCSI_DISAB | DMA_ENABLE;
+               if (write)
+                       csr |= DMA_ST_WRITE;
+               else
+                       csr &= ~DMA_ST_WRITE;
+               esp->prev_hme_dmacsr = csr;
+
+               dma_write32(dma_count, DMA_COUNT);
+               dma_write32(addr, DMA_ADDR);
+               dma_write32(csr, DMA_CSR);
+       } else {
+               csr = dma_read32(DMA_CSR);
+               csr |= DMA_ENABLE;
+               if (write)
+                       csr |= DMA_ST_WRITE;
+               else
+                       csr &= ~DMA_ST_WRITE;
+               dma_write32(csr, DMA_CSR);
+               if (esp->dma->revision == dvmaesc1) {
+                       u32 end = PAGE_ALIGN(addr + dma_count + 16U);
+                       dma_write32(end - addr, DMA_COUNT);
+               }
+               dma_write32(addr, DMA_ADDR);
+
+               scsi_esp_cmd(esp, cmd);
+       }
+
+}
+
+static int sbus_esp_dma_error(struct esp *esp)
+{
+       u32 csr = dma_read32(DMA_CSR);
+
+       if (csr & DMA_HNDL_ERROR)
+               return 1;
+
+       return 0;
+}
+
+static const struct esp_driver_ops sbus_esp_ops = {
+       .esp_write8     =       sbus_esp_write8,
+       .esp_read8      =       sbus_esp_read8,
+       .map_single     =       sbus_esp_map_single,
+       .map_sg         =       sbus_esp_map_sg,
+       .unmap_single   =       sbus_esp_unmap_single,
+       .unmap_sg       =       sbus_esp_unmap_sg,
+       .irq_pending    =       sbus_esp_irq_pending,
+       .reset_dma      =       sbus_esp_reset_dma,
+       .dma_drain      =       sbus_esp_dma_drain,
+       .dma_invalidate =       sbus_esp_dma_invalidate,
+       .send_dma_cmd   =       sbus_esp_send_dma_cmd,
+       .dma_error      =       sbus_esp_dma_error,
+};
+
+static int __devinit esp_sbus_probe_one(struct device *dev,
+                                       struct sbus_dev *esp_dev,
+                                       struct sbus_dev *espdma,
+                                       struct sbus_bus *sbus,
+                                       int hme)
+{
+       struct scsi_host_template *tpnt = &scsi_esp_template;
+       struct Scsi_Host *host;
+       struct esp *esp;
+       int err;
+
+       host = scsi_host_alloc(tpnt, sizeof(struct esp));
+
+       err = -ENOMEM;
+       if (!host)
+               goto fail;
+
+       host->max_id = (hme ? 16 : 8);
+       esp = host_to_esp(host);
+
+       esp->host = host;
+       esp->dev = esp_dev;
+       esp->ops = &sbus_esp_ops;
+
+       if (hme)
+               esp->flags |= ESP_FLAG_WIDE_CAPABLE;
+
+       err = esp_sbus_find_dma(esp, espdma);
+       if (err < 0)
+               goto fail_unlink;
+
+       err = esp_sbus_map_regs(esp, hme);
+       if (err < 0)
+               goto fail_unlink;
+
+       err = esp_sbus_map_command_block(esp);
+       if (err < 0)
+               goto fail_unmap_regs;
+
+       err = esp_sbus_register_irq(esp);
+       if (err < 0)
+               goto fail_unmap_command_block;
+
+       esp_sbus_get_props(esp, espdma);
+
+       /* Before we try to touch the ESP chip, ESC1 dma can
+        * come up with the reset bit set, so make sure that
+        * is clear first.
+        */
+       if (esp->dma->revision == dvmaesc1) {
+               u32 val = dma_read32(DMA_CSR);
+
+               dma_write32(val & ~DMA_RST_SCSI, DMA_CSR);
+       }
+
+       dev_set_drvdata(&esp_dev->ofdev.dev, esp);
+
+       err = scsi_esp_register(esp, dev);
+       if (err)
+               goto fail_free_irq;
+
+       return 0;
+
+fail_free_irq:
+       free_irq(host->irq, esp);
+fail_unmap_command_block:
+       sbus_free_consistent(esp->dev, 16,
+                            esp->command_block,
+                            esp->command_block_dma);
+fail_unmap_regs:
+       sbus_iounmap(esp->regs, SBUS_ESP_REG_SIZE);
+fail_unlink:
+       scsi_host_put(host);
+fail:
+       return err;
+}
+
+static int __devinit esp_sbus_probe(struct of_device *dev, const struct of_device_id *match)
+{
+       struct sbus_dev *sdev = to_sbus_device(&dev->dev);
+       struct device_node *dp = dev->node;
+       struct sbus_dev *dma_sdev = NULL;
+       int hme = 0;
+
+       if (dp->parent &&
+           (!strcmp(dp->parent->name, "espdma") ||
+            !strcmp(dp->parent->name, "dma")))
+               dma_sdev = sdev->parent;
+       else if (!strcmp(dp->name, "SUNW,fas")) {
+               dma_sdev = sdev;
+               hme = 1;
+       }
+
+       return esp_sbus_probe_one(&dev->dev, sdev, dma_sdev,
+                                 sdev->bus, hme);
+}
+
+static int __devexit esp_sbus_remove(struct of_device *dev)
+{
+       struct esp *esp = dev_get_drvdata(&dev->dev);
+       unsigned int irq = esp->host->irq;
+       u32 val;
+
+       scsi_esp_unregister(esp);
+
+       /* Disable interrupts.  */
+       val = dma_read32(DMA_CSR);
+       dma_write32(val & ~DMA_INT_ENAB, DMA_CSR);
+
+       free_irq(irq, esp);
+       sbus_free_consistent(esp->dev, 16,
+                            esp->command_block,
+                            esp->command_block_dma);
+       sbus_iounmap(esp->regs, SBUS_ESP_REG_SIZE);
+
+       scsi_host_put(esp->host);
+
+       return 0;
+}
+
+static struct of_device_id esp_match[] = {
+       {
+               .name = "SUNW,esp",
+       },
+       {
+               .name = "SUNW,fas",
+       },
+       {
+               .name = "esp",
+       },
+       {},
+};
+MODULE_DEVICE_TABLE(of, esp_match);
+
+static struct of_platform_driver esp_sbus_driver = {
+       .name           = "esp",
+       .match_table    = esp_match,
+       .probe          = esp_sbus_probe,
+       .remove         = __devexit_p(esp_sbus_remove),
+};
+
+static int __init sunesp_init(void)
+{
+       return of_register_driver(&esp_sbus_driver, &sbus_bus_type);
+}
+
+static void __exit sunesp_exit(void)
+{
+       of_unregister_driver(&esp_sbus_driver);
+}
+
+MODULE_DESCRIPTION("Sun ESP SCSI driver");
+MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+module_init(sunesp_init);
+module_exit(sunesp_exit);
index c129a0e..90621c3 100644 (file)
@@ -1310,7 +1310,8 @@ static unsigned int check_modem_status(struct uart_8250_port *up)
 {
        unsigned int status = serial_in(up, UART_MSR);
 
-       if (status & UART_MSR_ANY_DELTA && up->ier & UART_IER_MSI) {
+       if (status & UART_MSR_ANY_DELTA && up->ier & UART_IER_MSI &&
+           up->port.info != NULL) {
                if (status & UART_MSR_TERI)
                        up->port.icount.rng++;
                if (status & UART_MSR_DDSR)
@@ -1333,8 +1334,9 @@ static inline void
 serial8250_handle_port(struct uart_8250_port *up)
 {
        unsigned int status;
+       unsigned long flags;
 
-       spin_lock(&up->port.lock);
+       spin_lock_irqsave(&up->port.lock, flags);
 
        status = serial_inp(up, UART_LSR);
 
@@ -1346,7 +1348,7 @@ serial8250_handle_port(struct uart_8250_port *up)
        if (status & UART_LSR_THRE)
                transmit_chars(up);
 
-       spin_unlock(&up->port.lock);
+       spin_unlock_irqrestore(&up->port.lock, flags);
 }
 
 /*
index 41431d0..246c557 100644 (file)
@@ -164,7 +164,7 @@ static void free_port_memory(struct icom_port *icom_port)
        }
 }
 
-static int __init get_port_memory(struct icom_port *icom_port)
+static int __devinit get_port_memory(struct icom_port *icom_port)
 {
        int index;
        unsigned long stgAddr;
@@ -1380,7 +1380,7 @@ static void icom_port_active(struct icom_port *icom_port, struct icom_adapter *i
                            0x8024 + 2 - 2 * (icom_port->port - 2);
        }
 }
-static int __init icom_load_ports(struct icom_adapter *icom_adapter)
+static int __devinit icom_load_ports(struct icom_adapter *icom_adapter)
 {
        struct icom_port *icom_port;
        int port_num;
@@ -1473,7 +1473,7 @@ static void icom_remove_adapter(struct icom_adapter *icom_adapter)
                }
        }
 
-       free_irq(icom_adapter->irq_number, (void *) icom_adapter);
+       free_irq(icom_adapter->pci_dev->irq, (void *) icom_adapter);
        iounmap(icom_adapter->base_addr);
        icom_free_adapter(icom_adapter);
        pci_release_regions(icom_adapter->pci_dev);
@@ -1539,7 +1539,6 @@ static int __devinit icom_probe(struct pci_dev *dev,
        }
 
         icom_adapter->base_addr_pci = pci_resource_start(dev, 0);
-        icom_adapter->irq_number = dev->irq;
         icom_adapter->pci_dev = dev;
         icom_adapter->version = ent->driver_data;
         icom_adapter->subsystem_id = ent->subdevice;
@@ -1570,7 +1569,7 @@ static int __devinit icom_probe(struct pci_dev *dev,
                icom_port = &icom_adapter->port_info[index];
 
                if (icom_port->status == ICOM_PORT_ACTIVE) {
-                       icom_port->uart_port.irq = icom_port->adapter->irq_number;
+                       icom_port->uart_port.irq = icom_port->adapter->pci_dev->irq;
                        icom_port->uart_port.type = PORT_ICOM;
                        icom_port->uart_port.iotype = UPIO_MEM;
                        icom_port->uart_port.membase =
index 798f1ef..e8578d8 100644 (file)
@@ -258,7 +258,6 @@ struct icom_port {
 struct icom_adapter {
        void __iomem * base_addr;
        unsigned long base_addr_pci;
-       unsigned char irq_number;
        struct pci_dev *pci_dev;
        struct icom_port port_info[4];
        int index;
index 96a852a..bfd4417 100644 (file)
@@ -1387,8 +1387,8 @@ static enum su_type __devinit su_get_type(struct device_node *dp)
        struct device_node *ap = of_find_node_by_path("/aliases");
 
        if (ap) {
-               char *keyb = of_get_property(ap, "keyboard", NULL);
-               char *ms = of_get_property(ap, "mouse", NULL);
+               const char *keyb = of_get_property(ap, "keyboard", NULL);
+               const char *ms = of_get_property(ap, "mouse", NULL);
 
                if (keyb) {
                        if (dp == of_find_node_by_path(keyb))
index ec63b0e..d3e2c5f 100644 (file)
@@ -343,7 +343,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
                UDSL_ASSERT(sarb->tail + ATM_CELL_PAYLOAD <= sarb->end);
        }
 
-       memcpy(sarb->tail, source + ATM_CELL_HEADER, ATM_CELL_PAYLOAD);
+       memcpy(skb_tail_pointer(sarb), source + ATM_CELL_HEADER, ATM_CELL_PAYLOAD);
        __skb_put(sarb, ATM_CELL_PAYLOAD);
 
        if (pti & 1) {
@@ -370,7 +370,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
                        goto out;
                }
 
-               if (crc32_be(~0, sarb->tail - pdu_length, pdu_length) != 0xc704dd7b) {
+               if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
                        atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
                                  __func__, vcc);
                        atomic_inc(&vcc->stats->rx_err);
@@ -396,7 +396,9 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
                        goto out;       /* atm_charge increments rx_drop */
                }
 
-               memcpy(skb->data, sarb->tail - pdu_length, length);
+               skb_copy_to_linear_data(skb,
+                                       skb_tail_pointer(sarb) - pdu_length,
+                                       length);
                __skb_put(skb, length);
 
                vdbg("%s: sending skb 0x%p, skb->len %u, skb->truesize %u",
@@ -484,7 +486,7 @@ static unsigned int usbatm_write_cells(struct usbatm_data *instance,
                ptr[4] = 0xec;
                ptr += ATM_CELL_HEADER;
 
-               memcpy(ptr, skb->data, data_len);
+               skb_copy_from_linear_data(skb, ptr, data_len);
                ptr += data_len;
                __skb_pull(skb, data_len);
 
index 04e6b85..8f9f217 100644 (file)
@@ -1766,7 +1766,6 @@ static void rx_complete (struct usb_ep *ep, struct usb_request *req)
                        break;
                }
 
-               skb->dev = dev->net;
                skb->protocol = eth_type_trans (skb, dev->net);
                dev->stats.rx_packets++;
                dev->stats.rx_bytes += skb->len;
index 5808ea0..d5ef97b 100644 (file)
@@ -298,7 +298,7 @@ static int asix_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
                if (ax_skb) {
                        ax_skb->len = size;
                        ax_skb->data = packet;
-                       ax_skb->tail = packet + size;
+                       skb_set_tail_pointer(ax_skb, size);
                        usbnet_skb_return(dev, ax_skb);
                } else {
                        return 0;
@@ -338,7 +338,7 @@ static struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
            && ((headroom + tailroom) >= (4 + padlen))) {
                if ((headroom < 4) || (tailroom < padlen)) {
                        skb->data = memmove(skb->head + 4, skb->data, skb->len);
-                       skb->tail = skb->data + skb->len;
+                       skb_set_tail_pointer(skb, skb->len);
                }
        } else {
                struct sk_buff *skb2;
@@ -352,11 +352,11 @@ static struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
        skb_push(skb, 4);
        packet_len = (((skb->len - 4) ^ 0x0000ffff) << 16) + (skb->len - 4);
        cpu_to_le32s(&packet_len);
-       memcpy(skb->data, &packet_len, sizeof(packet_len));
+       skb_copy_to_linear_data(skb, &packet_len, sizeof(packet_len));
 
        if ((skb->len % 512) == 0) {
                cpu_to_le32s(&padbytes);
-               memcpy( skb->tail, &padbytes, sizeof(padbytes));
+               memcpy(skb_tail_pointer(skb), &padbytes, sizeof(padbytes));
                skb_put(skb, sizeof(padbytes));
        }
        return skb;
index 4852012..ffec2e0 100644 (file)
@@ -255,7 +255,6 @@ static void catc_rx_done(struct urb *urb)
                if (!(skb = dev_alloc_skb(pkt_len)))
                        return;
 
-               skb->dev = catc->netdev;
                eth_copy_and_sum(skb, pkt_start + pkt_offset, pkt_len, 0);
                skb_put(skb, pkt_len);
 
@@ -419,7 +418,7 @@ static int catc_hard_start_xmit(struct sk_buff *skb, struct net_device *netdev)
        catc->tx_ptr = (((catc->tx_ptr - 1) >> 6) + 1) << 6;
        tx_buf = catc->tx_buf[catc->tx_idx] + catc->tx_ptr;
        *((u16*)tx_buf) = (catc->is_f5u011) ? cpu_to_be16((u16)skb->len) : cpu_to_le16((u16)skb->len);
-       memcpy(tx_buf + 2, skb->data, skb->len);
+       skb_copy_from_linear_data(skb, tx_buf + 2, skb->len);
        catc->tx_ptr += skb->len + 2;
 
        if (!test_and_set_bit(TX_RUNNING, &catc->flags))
index d257a8e..031cf5c 100644 (file)
@@ -157,7 +157,7 @@ genelink_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
                if ((headroom < (4 + 4*1)) || (tailroom < padlen)) {
                        skb->data = memmove(skb->head + (4 + 4*1),
                                             skb->data, skb->len);
-                       skb->tail = skb->data + skb->len;
+                       skb_set_tail_pointer(skb, skb->len);
                }
        } else {
                struct sk_buff  *skb2;
index de95268..a0cc05d 100644 (file)
@@ -636,8 +636,6 @@ static void kaweth_usb_receive(struct urb *urb)
 
                skb_reserve(skb, 2);    /* Align IP on 16 byte boundaries */
 
-               skb->dev = net;
-
                eth_copy_and_sum(skb, kaweth->rx_buf + 2, pkt_len, 0);
 
                skb_put(skb, pkt_len);
index ccebfde..19bf8da 100644 (file)
@@ -520,7 +520,7 @@ net1080_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
                        skb->data = memmove(skb->head
                                                + sizeof (struct nc_header),
                                            skb->data, skb->len);
-                       skb->tail = skb->data + len;
+                       skb_set_tail_pointer(skb, len);
                        goto encapsulate;
                }
        }
index d48c024..1ad4ee5 100644 (file)
@@ -316,6 +316,7 @@ static int update_eth_regs_async(pegasus_t * pegasus)
        return ret;
 }
 
+/* Returns 0 on success, error on failure */
 static int read_mii_word(pegasus_t * pegasus, __u8 phy, __u8 indx, __u16 * regd)
 {
        int i;
@@ -574,7 +575,6 @@ static void fill_skb_pool(pegasus_t * pegasus)
                 */
                if (pegasus->rx_pool[i] == NULL)
                        return;
-               pegasus->rx_pool[i]->dev = pegasus->net;
                skb_reserve(pegasus->rx_pool[i], 2);
        }
 }
@@ -847,10 +847,16 @@ static void intr_callback(struct urb *urb)
                 * d[0].NO_CARRIER kicks in only with failed TX.
                 * ... so monitoring with MII may be safest.
                 */
-               if (d[0] & NO_CARRIER)
-                       netif_carrier_off(net); 
-               else
-                       netif_carrier_on(net);
+               if (pegasus->features & TRUST_LINK_STATUS) {
+                       if (d[5] & LINK_STATUS)
+                               netif_carrier_on(net);
+                       else
+                               netif_carrier_off(net);
+               } else {
+                       /* Never set carrier _on_ based on ! NO_CARRIER */
+                       if (d[0] & NO_CARRIER)
+                               netif_carrier_off(net); 
+               }
 
                /* bytes 3-4 == rx_lostpkt, reg 2E/2F */
                pegasus->stats.rx_missed_errors += ((d[3] & 0x7f) << 8) | d[4];
@@ -883,7 +889,7 @@ static int pegasus_start_xmit(struct sk_buff *skb, struct net_device *net)
        netif_stop_queue(net);
 
        ((__le16 *) pegasus->tx_buff)[0] = cpu_to_le16(l16);
-       memcpy(pegasus->tx_buff + 2, skb->data, skb->len);
+       skb_copy_from_linear_data(skb, pegasus->tx_buff + 2, skb->len);
        usb_fill_bulk_urb(pegasus->tx_urb, pegasus->usb,
                          usb_sndbulkpipe(pegasus->usb, 2),
                          pegasus->tx_buff, count,
@@ -950,7 +956,7 @@ static void set_carrier(struct net_device *net)
        pegasus_t *pegasus = netdev_priv(net);
        u16 tmp;
 
-       if (!read_mii_word(pegasus, pegasus->phy, MII_BMSR, &tmp))
+       if (read_mii_word(pegasus, pegasus->phy, MII_BMSR, &tmp))
                return;
 
        if (tmp & BMSR_LSTATUS)
@@ -1408,8 +1414,10 @@ static void pegasus_disconnect(struct usb_interface *intf)
        unlink_all_urbs(pegasus);
        free_all_urbs(pegasus);
        free_skb_pool(pegasus);
-       if (pegasus->rx_skb)
+       if (pegasus->rx_skb != NULL) {
                dev_kfree_skb(pegasus->rx_skb);
+               pegasus->rx_skb = NULL;
+       }
        free_netdev(pegasus->net);
 }
 
index c746782..c7aadb4 100644 (file)
@@ -11,6 +11,7 @@
 
 #define        PEGASUS_II              0x80000000
 #define        HAS_HOME_PNA            0x40000000
+#define        TRUST_LINK_STATUS       0x20000000
 
 #define        PEGASUS_MTU             1536
 #define        RX_SKBS                 4
@@ -203,7 +204,7 @@ PEGASUS_DEV( "AEI USB Fast Ethernet Adapter", VENDOR_AEILAB, 0x1701,
 PEGASUS_DEV( "Allied Telesyn Int. AT-USB100", VENDOR_ALLIEDTEL, 0xb100,
                DEFAULT_GPIO_RESET | PEGASUS_II )
 PEGASUS_DEV( "Belkin F5D5050 USB Ethernet", VENDOR_BELKIN, 0x0121,
-               DEFAULT_GPIO_RESET | PEGASUS_II )
+               DEFAULT_GPIO_RESET | PEGASUS_II | TRUST_LINK_STATUS )
 PEGASUS_DEV( "Billionton USB-100", VENDOR_BILLIONTON, 0x0986,
                DEFAULT_GPIO_RESET )
 PEGASUS_DEV( "Billionton USBLP-100", VENDOR_BILLIONTON, 0x0987,
index 39a21c7..1d36772 100644 (file)
@@ -588,7 +588,7 @@ rndis_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
                if (likely((sizeof *hdr) <= room)) {
                        skb->data = memmove(skb->head + sizeof *hdr,
                                            skb->data, len);
-                       skb->tail = skb->data + len;
+                       skb_set_tail_pointer(skb, len);
                        goto fill;
                }
        }
index ea153dc..fa598f0 100644 (file)
@@ -646,7 +646,6 @@ static void fill_skb_pool(rtl8150_t *dev)
                if (!skb) {
                        return;
                }
-               skb->dev = dev->netdev;
                skb_reserve(skb, 2);
                dev->rx_skb_pool[i] = skb;
        }
index de69b18..0c5465a 100644 (file)
@@ -203,7 +203,6 @@ void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb)
 {
        int     status;
 
-       skb->dev = dev->net;
        skb->protocol = eth_type_trans (skb, dev->net);
        dev->stats.rx_packets++;
        dev->stats.rx_bytes += skb->len;
index e4f0dd0..8372ace 100644 (file)
@@ -139,7 +139,7 @@ config FB_TILEBLITTING
         This is particularly important to one driver, matroxfb.  If
         unsure, say N.
 
-comment "Frambuffer hardware drivers"
+comment "Frambuffer hardware drivers"
        depends on FB
 
 config FB_CIRRUS
index d7627fc..8514f2a 100644 (file)
@@ -2899,7 +2899,7 @@ static int __devinit atyfb_setup_sparc(struct pci_dev *pdev,
                        struct fb_info *info, unsigned long addr)
 {
        struct atyfb_par *par = info->par;
-       struct pcidev_cookie *pcp;
+       struct device_node *dp;
        char prop[128];
        int node, len, i, j, ret;
        u32 mem, chip_id;
@@ -3037,8 +3037,8 @@ static int __devinit atyfb_setup_sparc(struct pci_dev *pdev,
                        node = 0;
        }
 
-       pcp = pdev->sysdata;
-       if (node == pcp->prom_node->node) {
+       dp = pci_device_to_OF_node(pdev);
+       if (node == dp->node) {
                struct fb_var_screeninfo *var = &default_var;
                unsigned int N, P, Q, M, T, R;
                u32 v_total, h_total;
index 1bf6f42..a4b3fd1 100644 (file)
@@ -410,7 +410,7 @@ static int  __devinit radeon_find_mem_vbios(struct radeonfb_info *rinfo)
 }
 #endif
 
-#ifdef CONFIG_PPC_OF
+#if defined(CONFIG_PPC_OF) || defined(CONFIG_SPARC)
 /*
  * Read XTAL (ref clock), SCLK and MCLK from Open Firmware device
  * tree. Hopefully, ATI OF driver is kind enough to fill these
@@ -440,7 +440,7 @@ static int __devinit radeon_read_xtal_OF (struct radeonfb_info *rinfo)
 
                return 0;
 }
-#endif /* CONFIG_PPC_OF */
+#endif /* CONFIG_PPC_OF || CONFIG_SPARC */
 
 /*
  * Read PLL infos from chip registers
@@ -645,7 +645,7 @@ static void __devinit radeon_get_pllinfo(struct radeonfb_info *rinfo)
        rinfo->pll.ref_div = INPLL(PPLL_REF_DIV) & PPLL_REF_DIV_MASK;
 
 
-#ifdef CONFIG_PPC_OF
+#if defined(CONFIG_PPC_OF) || defined(CONFIG_SPARC)
        /*
         * Retrieve PLL infos from Open Firmware first
         */
@@ -653,7 +653,7 @@ static void __devinit radeon_get_pllinfo(struct radeonfb_info *rinfo)
                        printk(KERN_INFO "radeonfb: Retrieved PLL infos from Open Firmware\n");
                goto found;
        }
-#endif /* CONFIG_PPC_OF */
+#endif /* CONFIG_PPC_OF || CONFIG_SPARC */
 
        /*
         * Check out if we have an X86 which gave us some PLL informations
@@ -2231,7 +2231,7 @@ static int __devinit radeonfb_pci_register (struct pci_dev *pdev,
            rinfo->family == CHIP_FAMILY_RS200)
                rinfo->errata |= CHIP_ERRATA_PLL_DELAY;
 
-#ifdef CONFIG_PPC_OF
+#if defined(CONFIG_PPC_OF) || defined(CONFIG_SPARC)
        /* On PPC, we obtain the OF device-node pointer to the firmware
         * data for this chip
         */
@@ -2240,6 +2240,8 @@ static int __devinit radeonfb_pci_register (struct pci_dev *pdev,
                printk(KERN_WARNING "radeonfb (%s): Cannot match card to OF node !\n",
                       pci_name(rinfo->pdev));
 
+#endif /* CONFIG_PPC_OF || CONFIG_SPARC */
+#ifdef CONFIG_PPC_OF
        /* On PPC, the firmware sets up a memory mapping that tends
         * to cause lockups when enabling the engine. We reconfigure
         * the card internal memory mappings properly
index 38c7dbf..737b5c0 100644 (file)
@@ -52,7 +52,7 @@ static char *radeon_get_mon_name(int type)
 }
 
 
-#ifdef CONFIG_PPC_OF
+#if defined(CONFIG_PPC_OF) || defined(CONFIG_SPARC)
 /*
  * Try to find monitor informations & EDID data out of the Open Firmware
  * device-tree. This also contains some "hacks" to work around a few machine
@@ -156,7 +156,7 @@ static int __devinit radeon_probe_OF_head(struct radeonfb_info *rinfo, int head_
        }
         return MT_NONE;
 }
-#endif /* CONFIG_PPC_OF */
+#endif /* CONFIG_PPC_OF || CONFIG_SPARC */
 
 
 static int __devinit radeon_get_panel_info_BIOS(struct radeonfb_info *rinfo)
@@ -495,11 +495,11 @@ void __devinit radeon_probe_screens(struct radeonfb_info *rinfo,
                 * Old single head cards
                 */
                if (!rinfo->has_CRTC2) {
-#ifdef CONFIG_PPC_OF
+#if defined(CONFIG_PPC_OF) || defined(CONFIG_SPARC)
                        if (rinfo->mon1_type == MT_NONE)
                                rinfo->mon1_type = radeon_probe_OF_head(rinfo, 0,
                                                                        &rinfo->mon1_EDID);
-#endif /* CONFIG_PPC_OF */
+#endif /* CONFIG_PPC_OF || CONFIG_SPARC */
 #ifdef CONFIG_FB_RADEON_I2C
                        if (rinfo->mon1_type == MT_NONE)
                                rinfo->mon1_type =
@@ -544,11 +544,11 @@ void __devinit radeon_probe_screens(struct radeonfb_info *rinfo,
                /*
                 * Probe primary head (DVI or laptop internal panel)
                 */
-#ifdef CONFIG_PPC_OF
+#if defined(CONFIG_PPC_OF) || defined(CONFIG_SPARC)
                if (rinfo->mon1_type == MT_NONE)
                        rinfo->mon1_type = radeon_probe_OF_head(rinfo, 0,
                                                                &rinfo->mon1_EDID);
-#endif /* CONFIG_PPC_OF */
+#endif /* CONFIG_PPC_OF || CONFIG_SPARC */
 #ifdef CONFIG_FB_RADEON_I2C
                if (rinfo->mon1_type == MT_NONE)
                        rinfo->mon1_type = radeon_probe_i2c_connector(rinfo, ddc_dvi,
@@ -572,11 +572,11 @@ void __devinit radeon_probe_screens(struct radeonfb_info *rinfo,
                /*
                 * Probe secondary head (mostly VGA, can be DVI)
                 */
-#ifdef CONFIG_PPC_OF
+#if defined(CONFIG_PPC_OF) || defined(CONFIG_SPARC)
                if (rinfo->mon2_type == MT_NONE)
                        rinfo->mon2_type = radeon_probe_OF_head(rinfo, 1,
                                                                &rinfo->mon2_EDID);
-#endif /* CONFIG_PPC_OF */
+#endif /* CONFIG_PPC_OF || defined(CONFIG_SPARC) */
 #ifdef CONFIG_FB_RADEON_I2C
                if (rinfo->mon2_type == MT_NONE)
                        rinfo->mon2_type = radeon_probe_i2c_connector(rinfo, ddc_vga,
index d5ff224..3190003 100644 (file)
@@ -16,7 +16,7 @@
 
 #include <asm/io.h>
 
-#ifdef CONFIG_PPC_OF
+#if defined(CONFIG_PPC_OF) || defined(CONFIG_SPARC)
 #include <asm/prom.h>
 #endif
 
@@ -292,7 +292,7 @@ struct radeonfb_info {
        unsigned long           fb_local_base;
 
        struct pci_dev          *pdev;
-#ifdef CONFIG_PPC_OF
+#if defined(CONFIG_PPC_OF) || defined(CONFIG_SPARC)
        struct device_node      *of_node;
 #endif
 
index 767c850..f042428 100644 (file)
@@ -266,7 +266,7 @@ static void __devinit cg3_init_fix(struct fb_info *info, int linebytes,
 static void __devinit cg3_rdi_maybe_fixup_var(struct fb_var_screeninfo *var,
                                              struct device_node *dp)
 {
-       char *params;
+       const char *params;
        char *p;
        int ww, hh;
 
index 90592fb..eb1a481 100644 (file)
@@ -44,8 +44,8 @@
 
 #include <asm/io.h>
 
-#ifdef __sparc__
-#include <asm/pbm.h>
+#ifdef CONFIG_SPARC
+#include <asm/prom.h>
 #include <asm/pcic.h>
 #endif
 
@@ -96,7 +96,7 @@ struct fb_var_screeninfo default_var = {
        .vmode          = FB_VMODE_NONINTERLACED
 };
 
-#ifdef __sparc__
+#ifdef CONFIG_SPARC
 struct fb_var_screeninfo default_var_1024x768 __initdata = {
        /* 1024x768, 75 Hz, Non-Interlaced (78.75 MHz dotclock) */
        .xres           = 1024,
@@ -188,7 +188,7 @@ static inline void iga_outb(struct iga_par *par, unsigned char val,
         pci_outb(par, val, reg+1);
 }
 
-#endif /* __sparc__ */
+#endif /* CONFIG_SPARC */
 
 /*
  *  Very important functionality for the JavaEngine1 computer:
@@ -217,7 +217,7 @@ static void iga_blank_border(struct iga_par *par)
                iga_outb(par, 0, IGA_EXT_CNTRL, IGA_IDX_OVERSCAN_COLOR + i);
 }
 
-#ifdef __sparc__
+#ifdef CONFIG_SPARC
 static int igafb_mmap(struct fb_info *info,
                      struct vm_area_struct *vma)
 {
@@ -271,7 +271,7 @@ static int igafb_mmap(struct fb_info *info,
        vma->vm_flags |= VM_IO;
        return 0;
 }
-#endif /* __sparc__ */
+#endif /* CONFIG_SPARC */
 
 static int igafb_setcolreg(unsigned regno, unsigned red, unsigned green,
                            unsigned blue, unsigned transp,
@@ -323,7 +323,7 @@ static struct fb_ops igafb_ops = {
        .fb_fillrect    = cfb_fillrect,
        .fb_copyarea    = cfb_copyarea,
        .fb_imageblit   = cfb_imageblit,
-#ifdef __sparc__
+#ifdef CONFIG_SPARC
        .fb_mmap        = igafb_mmap,
 #endif
 };
@@ -424,7 +424,7 @@ int __init igafb_init(void)
 
        par->frame_buffer_phys = addr & PCI_BASE_ADDRESS_MEM_MASK;
 
-#ifdef __sparc__
+#ifdef CONFIG_SPARC
        /*
         * The following is sparc specific and this is why:
         *
@@ -477,8 +477,8 @@ int __init igafb_init(void)
         * Set default vmode and cmode from PROM properties.
         */
        {
-                struct pcidev_cookie *cookie = pdev->sysdata;
-                int node = cookie->prom_node;
+               struct device_node *dp = pci_device_to_OF_node(pdev);
+                int node = dp->node;
                 int width = prom_getintdefault(node, "width", 1024);
                 int height = prom_getintdefault(node, "height", 768);
                 int depth = prom_getintdefault(node, "depth", 8);
@@ -534,7 +534,7 @@ int __init igafb_init(void)
                kfree(info);
         }
 
-#ifdef __sparc__
+#ifdef CONFIG_SPARC
            /*
             * Add /dev/fb mmap values.
             */
@@ -552,7 +552,7 @@ int __init igafb_init(void)
            par->mmap_map[1].size = PAGE_SIZE * 2; /* X wants 2 pages */
            par->mmap_map[1].prot_mask = SRMMU_CACHE;
            par->mmap_map[1].prot_flag = SRMMU_WRITE;
-#endif /* __sparc__ */
+#endif /* CONFIG_SPARC */
 
        return 0;
 }
index 124a085..b01b0a4 100644 (file)
@@ -415,7 +415,7 @@ static int v9fs_remove(struct inode *dir, struct dentry *file, int rmdir)
        file_inode = file->d_inode;
        sb = file_inode->i_sb;
        v9ses = v9fs_inode2v9ses(file_inode);
-       v9fid = v9fs_fid_lookup(file);
+       v9fid = v9fs_fid_clone(file);
        if(IS_ERR(v9fid))
                return PTR_ERR(v9fid);
 
index 3c4886b..e33c089 100644 (file)
@@ -2019,7 +2019,7 @@ config CODA_FS_OLD_API
 config AFS_FS
        tristate "Andrew File System support (AFS) (EXPERIMENTAL)"
        depends on INET && EXPERIMENTAL
-       select RXRPC
+       select AF_RXRPC
        help
          If you say Y here, you will get an experimental Andrew File System
          driver. It currently only supports unsecured read-only AFS access.
@@ -2028,8 +2028,15 @@ config AFS_FS
 
          If unsure, say N.
 
-config RXRPC
-       tristate
+config AFS_DEBUG
+       bool "AFS dynamic debugging"
+       depends on AFS_FS
+       help
+         Say Y here to make runtime controllable debugging messages appear.
+
+         See <file:Documentation/filesystems/afs.txt> for more information.
+
+         If unsure, say N.
 
 config 9P_FS
        tristate "Plan 9 Resource Sharing Support (9P2000) (Experimental)"
index 4029c9d..01545eb 100644 (file)
@@ -2,8 +2,6 @@
 # Makefile for Red Hat Linux AFS client.
 #
 
-#CFLAGS += -finstrument-functions
-
 kafs-objs := \
        callback.o \
        cell.o \
@@ -12,14 +10,15 @@ kafs-objs := \
        file.o \
        fsclient.o \
        inode.o \
-       kafsasyncd.o \
-       kafstimod.o \
        main.o \
        misc.o \
        mntpt.o \
        proc.o \
+       rxrpc.o \
+       security.o \
        server.o \
        super.o \
+       use-rtnetlink.o \
        vlclient.o \
        vlocation.o \
        vnode.o \
diff --git a/fs/afs/afs.h b/fs/afs/afs.h
new file mode 100644 (file)
index 0000000..52d0752
--- /dev/null
@@ -0,0 +1,146 @@
+/* AFS common types
+ *
+ * Copyright (C) 2002, 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef AFS_H
+#define AFS_H
+
+#include <linux/in.h>
+
+#define AFS_MAXCELLNAME        64              /* maximum length of a cell name */
+#define AFS_MAXVOLNAME 64              /* maximum length of a volume name */
+
+typedef unsigned                       afs_volid_t;
+typedef unsigned                       afs_vnodeid_t;
+typedef unsigned long long             afs_dataversion_t;
+
+typedef enum {
+       AFSVL_RWVOL,                    /* read/write volume */
+       AFSVL_ROVOL,                    /* read-only volume */
+       AFSVL_BACKVOL,                  /* backup volume */
+} __attribute__((packed)) afs_voltype_t;
+
+typedef enum {
+       AFS_FTYPE_INVALID       = 0,
+       AFS_FTYPE_FILE          = 1,
+       AFS_FTYPE_DIR           = 2,
+       AFS_FTYPE_SYMLINK       = 3,
+} afs_file_type_t;
+
+/*
+ * AFS file identifier
+ */
+struct afs_fid {
+       afs_volid_t     vid;            /* volume ID */
+       afs_vnodeid_t   vnode;          /* file index within volume */
+       unsigned        unique;         /* unique ID number (file index version) */
+};
+
+/*
+ * AFS callback notification
+ */
+typedef enum {
+       AFSCM_CB_UNTYPED        = 0,    /* no type set on CB break */
+       AFSCM_CB_EXCLUSIVE      = 1,    /* CB exclusive to CM [not implemented] */
+       AFSCM_CB_SHARED         = 2,    /* CB shared by other CM's */
+       AFSCM_CB_DROPPED        = 3,    /* CB promise cancelled by file server */
+} afs_callback_type_t;
+
+struct afs_callback {
+       struct afs_fid          fid;            /* file identifier */
+       unsigned                version;        /* callback version */
+       unsigned                expiry;         /* time at which expires */
+       afs_callback_type_t     type;           /* type of callback */
+};
+
+#define AFSCBMAX 50    /* maximum callbacks transferred per bulk op */
+
+/*
+ * AFS volume information
+ */
+struct afs_volume_info {
+       afs_volid_t             vid;            /* volume ID */
+       afs_voltype_t           type;           /* type of this volume */
+       afs_volid_t             type_vids[5];   /* volume ID's for possible types for this vol */
+
+       /* list of fileservers serving this volume */
+       size_t                  nservers;       /* number of entries used in servers[] */
+       struct {
+               struct in_addr  addr;           /* fileserver address */
+       } servers[8];
+};
+
+/*
+ * AFS security ACE access mask
+ */
+typedef u32 afs_access_t;
+#define AFS_ACE_READ           0x00000001U     /* - permission to read a file/dir */
+#define AFS_ACE_WRITE          0x00000002U     /* - permission to write/chmod a file */
+#define AFS_ACE_INSERT         0x00000004U     /* - permission to create dirent in a dir */
+#define AFS_ACE_LOOKUP         0x00000008U     /* - permission to lookup a file/dir in a dir */
+#define AFS_ACE_DELETE         0x00000010U     /* - permission to delete a dirent from a dir */
+#define AFS_ACE_LOCK           0x00000020U     /* - permission to lock a file */
+#define AFS_ACE_ADMINISTER     0x00000040U     /* - permission to change ACL */
+#define AFS_ACE_USER_A         0x01000000U     /* - 'A' user-defined permission */
+#define AFS_ACE_USER_B         0x02000000U     /* - 'B' user-defined permission */
+#define AFS_ACE_USER_C         0x04000000U     /* - 'C' user-defined permission */
+#define AFS_ACE_USER_D         0x08000000U     /* - 'D' user-defined permission */
+#define AFS_ACE_USER_E         0x10000000U     /* - 'E' user-defined permission */
+#define AFS_ACE_USER_F         0x20000000U     /* - 'F' user-defined permission */
+#define AFS_ACE_USER_G         0x40000000U     /* - 'G' user-defined permission */
+#define AFS_ACE_USER_H         0x80000000U     /* - 'H' user-defined permission */
+
+/*
+ * AFS file status information
+ */
+struct afs_file_status {
+       unsigned                if_version;     /* interface version */
+#define AFS_FSTATUS_VERSION    1
+
+       afs_file_type_t         type;           /* file type */
+       unsigned                nlink;          /* link count */
+       u64                     size;           /* file size */
+       afs_dataversion_t       data_version;   /* current data version */
+       u32                     author;         /* author ID */
+       u32                     owner;          /* owner ID */
+       u32                     group;          /* group ID */
+       afs_access_t            caller_access;  /* access rights for authenticated caller */
+       afs_access_t            anon_access;    /* access rights for unauthenticated caller */
+       umode_t                 mode;           /* UNIX mode */
+       struct afs_fid          parent;         /* parent dir ID for non-dirs only */
+       time_t                  mtime_client;   /* last time client changed data */
+       time_t                  mtime_server;   /* last time server changed data */
+};
+
+/*
+ * AFS file status change request
+ */
+struct afs_store_status {
+       u32                     mask;           /* which bits of the struct are set */
+       u32                     mtime_client;   /* last time client changed data */
+       u32                     owner;          /* owner ID */
+       u32                     group;          /* group ID */
+       umode_t                 mode;           /* UNIX mode */
+};
+
+#define AFS_SET_MTIME          0x01            /* set the mtime */
+#define AFS_SET_OWNER          0x02            /* set the owner ID */
+#define AFS_SET_GROUP          0x04            /* set the group ID (unsupported?) */
+#define AFS_SET_MODE           0x08            /* set the UNIX mode */
+#define AFS_SET_SEG_SIZE       0x10            /* set the segment size (unsupported) */
+
+/*
+ * AFS volume synchronisation information
+ */
+struct afs_volsync {
+       time_t                  creation;       /* volume creation time */
+};
+
+#endif /* AFS_H */
diff --git a/fs/afs/afs_cm.h b/fs/afs/afs_cm.h
new file mode 100644 (file)
index 0000000..7b4d4fa
--- /dev/null
@@ -0,0 +1,32 @@
+/* AFS Cache Manager definitions
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef AFS_CM_H
+#define AFS_CM_H
+
+#define AFS_CM_PORT            7001    /* AFS file server port */
+#define CM_SERVICE             1       /* AFS File Service ID */
+
+enum AFS_CM_Operations {
+       CBCallBack              = 204,  /* break callback promises */
+       CBInitCallBackState     = 205,  /* initialise callback state */
+       CBProbe                 = 206,  /* probe client */
+       CBGetLock               = 207,  /* get contents of CM lock table */
+       CBGetCE                 = 208,  /* get cache file description */
+       CBGetXStatsVersion      = 209,  /* get version of extended statistics */
+       CBGetXStats             = 210,  /* get contents of extended statistics data */
+       CBInitCallBackState3    = 213,  /* initialise callback state, version 3 */
+       CBGetCapabilities       = 65538, /* get client capabilities */
+};
+
+#define AFS_CAP_ERROR_TRANSLATION      0x1
+
+#endif /* AFS_FS_H */
diff --git a/fs/afs/afs_fs.h b/fs/afs/afs_fs.h
new file mode 100644 (file)
index 0000000..89e0d16
--- /dev/null
@@ -0,0 +1,48 @@
+/* AFS File Service definitions
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef AFS_FS_H
+#define AFS_FS_H
+
+#define AFS_FS_PORT            7000    /* AFS file server port */
+#define FS_SERVICE             1       /* AFS File Service ID */
+
+enum AFS_FS_Operations {
+       FSFETCHDATA             = 130,  /* AFS Fetch file data */
+       FSFETCHSTATUS           = 132,  /* AFS Fetch file status */
+       FSREMOVEFILE            = 136,  /* AFS Remove a file */
+       FSCREATEFILE            = 137,  /* AFS Create a file */
+       FSRENAME                = 138,  /* AFS Rename or move a file or directory */
+       FSSYMLINK               = 139,  /* AFS Create a symbolic link */
+       FSLINK                  = 140,  /* AFS Create a hard link */
+       FSMAKEDIR               = 141,  /* AFS Create a directory */
+       FSREMOVEDIR             = 142,  /* AFS Remove a directory */
+       FSGIVEUPCALLBACKS       = 147,  /* AFS Discard callback promises */
+       FSGETVOLUMEINFO         = 148,  /* AFS Get root volume information */
+       FSGETROOTVOLUME         = 151,  /* AFS Get root volume name */
+       FSLOOKUP                = 161,  /* AFS lookup file in directory */
+};
+
+enum AFS_FS_Errors {
+       VSALVAGE        = 101,  /* volume needs salvaging */
+       VNOVNODE        = 102,  /* no such file/dir (vnode) */
+       VNOVOL          = 103,  /* no such volume or volume unavailable */
+       VVOLEXISTS      = 104,  /* volume name already exists */
+       VNOSERVICE      = 105,  /* volume not currently in service */
+       VOFFLINE        = 106,  /* volume is currently offline (more info available [VVL-spec]) */
+       VONLINE         = 107,  /* volume is already online */
+       VDISKFULL       = 108,  /* disk partition is full */
+       VOVERQUOTA      = 109,  /* volume's maximum quota exceeded */
+       VBUSY           = 110,  /* volume is temporarily unavailable */
+       VMOVED          = 111,  /* volume moved to new server - ask this FS where */
+};
+
+#endif /* AFS_FS_H */
similarity index 74%
rename from fs/afs/vlclient.h
rename to fs/afs/afs_vl.h
index e3d6011..8bbefe0 100644 (file)
@@ -1,6 +1,6 @@
-/* vlclient.h: Volume Location Service client interface
+/* AFS Volume Location Service client interface
  *
- * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2002, 2007 Red Hat, Inc. All Rights Reserved.
  * Written by David Howells (dhowells@redhat.com)
  *
  * This program is free software; you can redistribute it and/or
@@ -9,10 +9,19 @@
  * 2 of the License, or (at your option) any later version.
  */
 
-#ifndef _LINUX_AFS_VLCLIENT_H
-#define _LINUX_AFS_VLCLIENT_H
+#ifndef AFS_VL_H
+#define AFS_VL_H
 
-#include "types.h"
+#include "afs.h"
+
+#define AFS_VL_PORT            7003    /* volume location service port */
+#define VL_SERVICE             52      /* RxRPC service ID for the Volume Location service */
+
+enum AFSVL_Operations {
+       VLGETENTRYBYID          = 503,  /* AFS Get Cache Entry By ID operation ID */
+       VLGETENTRYBYNAME        = 504,  /* AFS Get Cache Entry By Name operation ID */
+       VLPROBE                 = 514,  /* AFS Probe Volume Location Service operation ID */
+};
 
 enum AFSVL_Errors {
        AFSVL_IDEXIST           = 363520,       /* Volume Id entry exists in vl database */
@@ -40,14 +49,16 @@ enum AFSVL_Errors {
        AFSVL_BADVOLOPER        = 363542,       /* Bad volume operation code */
        AFSVL_BADRELLOCKTYPE    = 363543,       /* Bad release lock type */
        AFSVL_RERELEASE         = 363544,       /* Status report: last release was aborted */
-       AFSVL_BADSERVERFLAG     = 363545,       /* Invalid replication site server °ag */
+       AFSVL_BADSERVERFLAG     = 363545,       /* Invalid replication site server Â°ag */
        AFSVL_PERM              = 363546,       /* No permission access */
        AFSVL_NOMEM             = 363547,       /* malloc/realloc failed to alloc enough memory */
 };
 
-/* maps to "struct vldbentry" in vvl-spec.pdf */
+/*
+ * maps to "struct vldbentry" in vvl-spec.pdf
+ */
 struct afs_vldbentry {
-       char            name[65];               /* name of volume (including NUL char) */
+       char            name[65];               /* name of volume (with NUL char) */
        afs_voltype_t   type;                   /* volume type */
        unsigned        num_servers;            /* num servers that hold instances of this vol */
        unsigned        clone_id;               /* cloning ID */
@@ -68,26 +79,6 @@ struct afs_vldbentry {
 #define AFS_VLSF_RWVOL         0x0004  /* this server holds a R/W instance of the volume */
 #define AFS_VLSF_BACKVOL       0x0008  /* this server holds a backup instance of the volume */
        } servers[8];
-
 };
 
-/* look up a volume location database entry by name */
-extern int afs_rxvl_get_entry_by_name(struct afs_server *server,
-                                     const char *volname,
-                                     unsigned volnamesz,
-                                     struct afs_cache_vlocation *entry);
-
-/* look up a volume location database entry by ID */
-extern int afs_rxvl_get_entry_by_id(struct afs_server *server,
-                                   afs_volid_t volid,
-                                   afs_voltype_t voltype,
-                                   struct afs_cache_vlocation *entry);
-
-extern int afs_rxvl_get_entry_by_id_async(struct afs_async_op *op,
-                                         afs_volid_t volid,
-                                         afs_voltype_t voltype);
-
-extern int afs_rxvl_get_entry_by_id_async2(struct afs_async_op *op,
-                                          struct afs_cache_vlocation *entry);
-
-#endif /* _LINUX_AFS_VLCLIENT_H */
+#endif /* AFS_VL_H */
diff --git a/fs/afs/cache.c b/fs/afs/cache.c
new file mode 100644 (file)
index 0000000..de0d7de
--- /dev/null
@@ -0,0 +1,256 @@
+/* AFS caching stuff
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifdef AFS_CACHING_SUPPORT
+static cachefs_match_val_t afs_cell_cache_match(void *target,
+                                               const void *entry);
+static void afs_cell_cache_update(void *source, void *entry);
+
+struct cachefs_index_def afs_cache_cell_index_def = {
+       .name                   = "cell_ix",
+       .data_size              = sizeof(struct afs_cache_cell),
+       .keys[0]                = { CACHEFS_INDEX_KEYS_ASCIIZ, 64 },
+       .match                  = afs_cell_cache_match,
+       .update                 = afs_cell_cache_update,
+};
+#endif
+
+/*
+ * match a cell record obtained from the cache
+ */
+#ifdef AFS_CACHING_SUPPORT
+static cachefs_match_val_t afs_cell_cache_match(void *target,
+                                               const void *entry)
+{
+       const struct afs_cache_cell *ccell = entry;
+       struct afs_cell *cell = target;
+
+       _enter("{%s},{%s}", ccell->name, cell->name);
+
+       if (strncmp(ccell->name, cell->name, sizeof(ccell->name)) == 0) {
+               _leave(" = SUCCESS");
+               return CACHEFS_MATCH_SUCCESS;
+       }
+
+       _leave(" = FAILED");
+       return CACHEFS_MATCH_FAILED;
+}
+#endif
+
+/*
+ * update a cell record in the cache
+ */
+#ifdef AFS_CACHING_SUPPORT
+static void afs_cell_cache_update(void *source, void *entry)
+{
+       struct afs_cache_cell *ccell = entry;
+       struct afs_cell *cell = source;
+
+       _enter("%p,%p", source, entry);
+
+       strncpy(ccell->name, cell->name, sizeof(ccell->name));
+
+       memcpy(ccell->vl_servers,
+              cell->vl_addrs,
+              min(sizeof(ccell->vl_servers), sizeof(cell->vl_addrs)));
+
+}
+#endif
+
+#ifdef AFS_CACHING_SUPPORT
+static cachefs_match_val_t afs_vlocation_cache_match(void *target,
+                                                    const void *entry);
+static void afs_vlocation_cache_update(void *source, void *entry);
+
+struct cachefs_index_def afs_vlocation_cache_index_def = {
+       .name           = "vldb",
+       .data_size      = sizeof(struct afs_cache_vlocation),
+       .keys[0]        = { CACHEFS_INDEX_KEYS_ASCIIZ, 64 },
+       .match          = afs_vlocation_cache_match,
+       .update         = afs_vlocation_cache_update,
+};
+#endif
+
+/*
+ * match a VLDB record stored in the cache
+ * - may also load target from entry
+ */
+#ifdef AFS_CACHING_SUPPORT
+static cachefs_match_val_t afs_vlocation_cache_match(void *target,
+                                                    const void *entry)
+{
+       const struct afs_cache_vlocation *vldb = entry;
+       struct afs_vlocation *vlocation = target;
+
+       _enter("{%s},{%s}", vlocation->vldb.name, vldb->name);
+
+       if (strncmp(vlocation->vldb.name, vldb->name, sizeof(vldb->name)) == 0
+           ) {
+               if (!vlocation->valid ||
+                   vlocation->vldb.rtime == vldb->rtime
+                   ) {
+                       vlocation->vldb = *vldb;
+                       vlocation->valid = 1;
+                       _leave(" = SUCCESS [c->m]");
+                       return CACHEFS_MATCH_SUCCESS;
+               } else if (memcmp(&vlocation->vldb, vldb, sizeof(*vldb)) != 0) {
+                       /* delete if VIDs for this name differ */
+                       if (memcmp(&vlocation->vldb.vid,
+                                  &vldb->vid,
+                                  sizeof(vldb->vid)) != 0) {
+                               _leave(" = DELETE");
+                               return CACHEFS_MATCH_SUCCESS_DELETE;
+                       }
+
+                       _leave(" = UPDATE");
+                       return CACHEFS_MATCH_SUCCESS_UPDATE;
+               } else {
+                       _leave(" = SUCCESS");
+                       return CACHEFS_MATCH_SUCCESS;
+               }
+       }
+
+       _leave(" = FAILED");
+       return CACHEFS_MATCH_FAILED;
+}
+#endif
+
+/*
+ * update a VLDB record stored in the cache
+ */
+#ifdef AFS_CACHING_SUPPORT
+static void afs_vlocation_cache_update(void *source, void *entry)
+{
+       struct afs_cache_vlocation *vldb = entry;
+       struct afs_vlocation *vlocation = source;
+
+       _enter("");
+
+       *vldb = vlocation->vldb;
+}
+#endif
+
+#ifdef AFS_CACHING_SUPPORT
+static cachefs_match_val_t afs_volume_cache_match(void *target,
+                                                 const void *entry);
+static void afs_volume_cache_update(void *source, void *entry);
+
+struct cachefs_index_def afs_volume_cache_index_def = {
+       .name           = "volume",
+       .data_size      = sizeof(struct afs_cache_vhash),
+       .keys[0]        = { CACHEFS_INDEX_KEYS_BIN, 1 },
+       .keys[1]        = { CACHEFS_INDEX_KEYS_BIN, 1 },
+       .match          = afs_volume_cache_match,
+       .update         = afs_volume_cache_update,
+};
+#endif
+
+/*
+ * match a volume hash record stored in the cache
+ */
+#ifdef AFS_CACHING_SUPPORT
+static cachefs_match_val_t afs_volume_cache_match(void *target,
+                                                 const void *entry)
+{
+       const struct afs_cache_vhash *vhash = entry;
+       struct afs_volume *volume = target;
+
+       _enter("{%u},{%u}", volume->type, vhash->vtype);
+
+       if (volume->type == vhash->vtype) {
+               _leave(" = SUCCESS");
+               return CACHEFS_MATCH_SUCCESS;
+       }
+
+       _leave(" = FAILED");
+       return CACHEFS_MATCH_FAILED;
+}
+#endif
+
+/*
+ * update a volume hash record stored in the cache
+ */
+#ifdef AFS_CACHING_SUPPORT
+static void afs_volume_cache_update(void *source, void *entry)
+{
+       struct afs_cache_vhash *vhash = entry;
+       struct afs_volume *volume = source;
+
+       _enter("");
+
+       vhash->vtype = volume->type;
+}
+#endif
+
+#ifdef AFS_CACHING_SUPPORT
+static cachefs_match_val_t afs_vnode_cache_match(void *target,
+                                                const void *entry);
+static void afs_vnode_cache_update(void *source, void *entry);
+
+struct cachefs_index_def afs_vnode_cache_index_def = {
+       .name           = "vnode",
+       .data_size      = sizeof(struct afs_cache_vnode),
+       .keys[0]        = { CACHEFS_INDEX_KEYS_BIN, 4 },
+       .match          = afs_vnode_cache_match,
+       .update         = afs_vnode_cache_update,
+};
+#endif
+
+/*
+ * match a vnode record stored in the cache
+ */
+#ifdef AFS_CACHING_SUPPORT
+static cachefs_match_val_t afs_vnode_cache_match(void *target,
+                                                const void *entry)
+{
+       const struct afs_cache_vnode *cvnode = entry;
+       struct afs_vnode *vnode = target;
+
+       _enter("{%x,%x,%Lx},{%x,%x,%Lx}",
+              vnode->fid.vnode,
+              vnode->fid.unique,
+              vnode->status.version,
+              cvnode->vnode_id,
+              cvnode->vnode_unique,
+              cvnode->data_version);
+
+       if (vnode->fid.vnode != cvnode->vnode_id) {
+               _leave(" = FAILED");
+               return CACHEFS_MATCH_FAILED;
+       }
+
+       if (vnode->fid.unique != cvnode->vnode_unique ||
+           vnode->status.version != cvnode->data_version) {
+               _leave(" = DELETE");
+               return CACHEFS_MATCH_SUCCESS_DELETE;
+       }
+
+       _leave(" = SUCCESS");
+       return CACHEFS_MATCH_SUCCESS;
+}
+#endif
+
+/*
+ * update a vnode record stored in the cache
+ */
+#ifdef AFS_CACHING_SUPPORT
+static void afs_vnode_cache_update(void *source, void *entry)
+{
+       struct afs_cache_vnode *cvnode = entry;
+       struct afs_vnode *vnode = source;
+
+       _enter("");
+
+       cvnode->vnode_id        = vnode->fid.vnode;
+       cvnode->vnode_unique    = vnode->fid.unique;
+       cvnode->data_version    = vnode->status.version;
+}
+#endif
index 9eb7722..36a3642 100644 (file)
@@ -1,4 +1,4 @@
-/* cache.h: AFS local cache management interface
+/* AFS local cache management interface
  *
  * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
  * Written by David Howells (dhowells@redhat.com)
@@ -9,8 +9,8 @@
  * 2 of the License, or (at your option) any later version.
  */
 
-#ifndef _LINUX_AFS_CACHE_H
-#define _LINUX_AFS_CACHE_H
+#ifndef AFS_CACHE_H
+#define AFS_CACHE_H
 
 #undef AFS_CACHING_SUPPORT
 
@@ -20,8 +20,4 @@
 #endif
 #include "types.h"
 
-#ifdef __KERNEL__
-
-#endif /* __KERNEL__ */
-
-#endif /* _LINUX_AFS_CACHE_H */
+#endif /* AFS_CACHE_H */
index 9cb206e..639399f 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002 Red Hat, Inc. All rights reserved.
+ * Copyright (c) 2002, 2007 Red Hat, Inc. All rights reserved.
  *
  * This software may be freely redistributed under the terms of the
  * GNU General Public License.
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/init.h>
-#include "server.h"
-#include "vnode.h"
+#include <linux/circ_buf.h>
 #include "internal.h"
-#include "cmservice.h"
 
-/*****************************************************************************/
+unsigned afs_vnode_update_timeout = 10;
+
+#define afs_breakring_space(server) \
+       CIRC_SPACE((server)->cb_break_head, (server)->cb_break_tail,    \
+                  ARRAY_SIZE((server)->cb_break))
+
+//static void afs_callback_updater(struct work_struct *);
+
+static struct workqueue_struct *afs_callback_update_worker;
+
 /*
  * allow the fileserver to request callback state (re-)initialisation
  */
-int SRXAFSCM_InitCallBackState(struct afs_server *server)
+void afs_init_callback_state(struct afs_server *server)
 {
-       struct list_head callbacks;
+       struct afs_vnode *vnode;
 
-       _enter("%p", server);
+       _enter("{%p}", server);
 
-       INIT_LIST_HEAD(&callbacks);
-
-       /* transfer the callback list from the server to a temp holding area */
        spin_lock(&server->cb_lock);
 
-       list_add(&callbacks, &server->cb_promises);
-       list_del_init(&server->cb_promises);
+       /* kill all the promises on record from this server */
+       while (!RB_EMPTY_ROOT(&server->cb_promises)) {
+               vnode = rb_entry(server->cb_promises.rb_node,
+                                struct afs_vnode, cb_promise);
+               _debug("UNPROMISE { vid=%x vn=%u uq=%u}",
+                      vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique);
+               rb_erase(&vnode->cb_promise, &server->cb_promises);
+               vnode->cb_promised = false;
+       }
 
-       /* munch our way through the list, grabbing the inode, dropping all the
-        * locks and regetting them in the right order
-        */
-       while (!list_empty(&callbacks)) {
-               struct afs_vnode *vnode;
-               struct inode *inode;
+       spin_unlock(&server->cb_lock);
+       _leave("");
+}
 
-               vnode = list_entry(callbacks.next, struct afs_vnode, cb_link);
-               list_del_init(&vnode->cb_link);
+/*
+ * handle the data invalidation side of a callback being broken
+ */
+void afs_broken_callback_work(struct work_struct *work)
+{
+       struct afs_vnode *vnode =
+               container_of(work, struct afs_vnode, cb_broken_work);
 
-               /* try and grab the inode - may fail */
-               inode = igrab(AFS_VNODE_TO_I(vnode));
-               if (inode) {
-                       int release = 0;
+       _enter("");
 
-                       spin_unlock(&server->cb_lock);
-                       spin_lock(&vnode->lock);
+       if (test_bit(AFS_VNODE_DELETED, &vnode->flags))
+               return;
 
-                       if (vnode->cb_server == server) {
-                               vnode->cb_server = NULL;
-                               afs_kafstimod_del_timer(&vnode->cb_timeout);
-                               spin_lock(&afs_cb_hash_lock);
-                               list_del_init(&vnode->cb_hash_link);
-                               spin_unlock(&afs_cb_hash_lock);
-                               release = 1;
-                       }
+       /* we're only interested in dealing with a broken callback on *this*
+        * vnode and only if no-one else has dealt with it yet */
+       if (!mutex_trylock(&vnode->validate_lock))
+               return; /* someone else is dealing with it */
 
-                       spin_unlock(&vnode->lock);
+       if (test_bit(AFS_VNODE_CB_BROKEN, &vnode->flags)) {
+               if (S_ISDIR(vnode->vfs_inode.i_mode))
+                       afs_clear_permits(vnode);
 
-                       iput(inode);
-                       afs_put_server(server);
+               if (afs_vnode_fetch_status(vnode, NULL, NULL) < 0)
+                       goto out;
 
-                       spin_lock(&server->cb_lock);
+               if (test_bit(AFS_VNODE_DELETED, &vnode->flags))
+                       goto out;
+
+               /* if the vnode's data version number changed then its contents
+                * are different */
+               if (test_and_clear_bit(AFS_VNODE_ZAP_DATA, &vnode->flags)) {
+                       _debug("zap data {%x:%u}",
+                              vnode->fid.vid, vnode->fid.vnode);
+                       invalidate_remote_inode(&vnode->vfs_inode);
                }
        }
 
-       spin_unlock(&server->cb_lock);
+out:
+       mutex_unlock(&vnode->validate_lock);
 
-       _leave(" = 0");
-       return 0;
-} /* end SRXAFSCM_InitCallBackState() */
+       /* avoid the potential race whereby the mutex_trylock() in this
+        * function happens again between the clear_bit() and the
+        * mutex_unlock() */
+       if (test_bit(AFS_VNODE_CB_BROKEN, &vnode->flags)) {
+               _debug("requeue");
+               queue_work(afs_callback_update_worker, &vnode->cb_broken_work);
+       }
+       _leave("");
+}
+
+/*
+ * actually break a callback
+ */
+static void afs_break_callback(struct afs_server *server,
+                              struct afs_vnode *vnode)
+{
+       _enter("");
+
+       set_bit(AFS_VNODE_CB_BROKEN, &vnode->flags);
+
+       if (vnode->cb_promised) {
+               spin_lock(&vnode->lock);
+
+               _debug("break callback");
+
+               spin_lock(&server->cb_lock);
+               if (vnode->cb_promised) {
+                       rb_erase(&vnode->cb_promise, &server->cb_promises);
+                       vnode->cb_promised = false;
+               }
+               spin_unlock(&server->cb_lock);
+
+               queue_work(afs_callback_update_worker, &vnode->cb_broken_work);
+               spin_unlock(&vnode->lock);
+       }
+}
+
+/*
+ * allow the fileserver to explicitly break one callback
+ * - happens when
+ *   - the backing file is changed
+ *   - a lock is released
+ */
+static void afs_break_one_callback(struct afs_server *server,
+                                  struct afs_fid *fid)
+{
+       struct afs_vnode *vnode;
+       struct rb_node *p;
+
+       _debug("find");
+       spin_lock(&server->fs_lock);
+       p = server->fs_vnodes.rb_node;
+       while (p) {
+               vnode = rb_entry(p, struct afs_vnode, server_rb);
+               if (fid->vid < vnode->fid.vid)
+                       p = p->rb_left;
+               else if (fid->vid > vnode->fid.vid)
+                       p = p->rb_right;
+               else if (fid->vnode < vnode->fid.vnode)
+                       p = p->rb_left;
+               else if (fid->vnode > vnode->fid.vnode)
+                       p = p->rb_right;
+               else if (fid->unique < vnode->fid.unique)
+                       p = p->rb_left;
+               else if (fid->unique > vnode->fid.unique)
+                       p = p->rb_right;
+               else
+                       goto found;
+       }
+
+       /* not found so we just ignore it (it may have moved to another
+        * server) */
+not_available:
+       _debug("not avail");
+       spin_unlock(&server->fs_lock);
+       _leave("");
+       return;
+
+found:
+       _debug("found");
+       ASSERTCMP(server, ==, vnode->server);
+
+       if (!igrab(AFS_VNODE_TO_I(vnode)))
+               goto not_available;
+       spin_unlock(&server->fs_lock);
+
+       afs_break_callback(server, vnode);
+       iput(&vnode->vfs_inode);
+       _leave("");
+}
 
-/*****************************************************************************/
 /*
  * allow the fileserver to break callback promises
  */
-int SRXAFSCM_CallBack(struct afs_server *server, size_t count,
-                     struct afs_callback callbacks[])
+void afs_break_callbacks(struct afs_server *server, size_t count,
+                        struct afs_callback callbacks[])
 {
-       _enter("%p,%u,", server, count);
+       _enter("%p,%zu,", server, count);
 
-       for (; count > 0; callbacks++, count--) {
-               struct afs_vnode *vnode = NULL;
-               struct inode *inode = NULL;
-               int valid = 0;
+       ASSERT(server != NULL);
+       ASSERTCMP(count, <=, AFSCBMAX);
 
+       for (; count > 0; callbacks++, count--) {
                _debug("- Fid { vl=%08x n=%u u=%u }  CB { v=%u x=%u t=%u }",
                       callbacks->fid.vid,
                       callbacks->fid.vnode,
@@ -103,67 +205,270 @@ int SRXAFSCM_CallBack(struct afs_server *server, size_t count,
                       callbacks->expiry,
                       callbacks->type
                       );
+               afs_break_one_callback(server, &callbacks->fid);
+       }
 
-               /* find the inode for this fid */
-               spin_lock(&afs_cb_hash_lock);
+       _leave("");
+       return;
+}
 
-               list_for_each_entry(vnode,
-                                   &afs_cb_hash(server, &callbacks->fid),
-                                   cb_hash_link) {
-                       if (memcmp(&vnode->fid, &callbacks->fid,
-                                  sizeof(struct afs_fid)) != 0)
-                               continue;
+/*
+ * record the callback for breaking
+ * - the caller must hold server->cb_lock
+ */
+static void afs_do_give_up_callback(struct afs_server *server,
+                                   struct afs_vnode *vnode)
+{
+       struct afs_callback *cb;
 
-                       /* right vnode, but is it same server? */
-                       if (vnode->cb_server != server)
-                               break; /* no */
+       _enter("%p,%p", server, vnode);
 
-                       /* try and nail the inode down */
-                       inode = igrab(AFS_VNODE_TO_I(vnode));
-                       break;
+       cb = &server->cb_break[server->cb_break_head];
+       cb->fid         = vnode->fid;
+       cb->version     = vnode->cb_version;
+       cb->expiry      = vnode->cb_expiry;
+       cb->type        = vnode->cb_type;
+       smp_wmb();
+       server->cb_break_head =
+               (server->cb_break_head + 1) &
+               (ARRAY_SIZE(server->cb_break) - 1);
+
+       /* defer the breaking of callbacks to try and collect as many as
+        * possible to ship in one operation */
+       switch (atomic_inc_return(&server->cb_break_n)) {
+       case 1 ... AFSCBMAX - 1:
+               queue_delayed_work(afs_callback_update_worker,
+                                  &server->cb_break_work, HZ * 2);
+               break;
+       case AFSCBMAX:
+               afs_flush_callback_breaks(server);
+               break;
+       default:
+               break;
+       }
+
+       ASSERT(server->cb_promises.rb_node != NULL);
+       rb_erase(&vnode->cb_promise, &server->cb_promises);
+       vnode->cb_promised = false;
+       _leave("");
+}
+
+/*
+ * discard the callback on a deleted item
+ */
+void afs_discard_callback_on_delete(struct afs_vnode *vnode)
+{
+       struct afs_server *server = vnode->server;
+
+       _enter("%d", vnode->cb_promised);
+
+       if (!vnode->cb_promised) {
+               _leave(" [not promised]");
+               return;
+       }
+
+       ASSERT(server != NULL);
+
+       spin_lock(&server->cb_lock);
+       if (vnode->cb_promised) {
+               ASSERT(server->cb_promises.rb_node != NULL);
+               rb_erase(&vnode->cb_promise, &server->cb_promises);
+               vnode->cb_promised = false;
+       }
+       spin_unlock(&server->cb_lock);
+       _leave("");
+}
+
+/*
+ * give up the callback registered for a vnode on the file server when the
+ * inode is being cleared
+ */
+void afs_give_up_callback(struct afs_vnode *vnode)
+{
+       struct afs_server *server = vnode->server;
+
+       DECLARE_WAITQUEUE(myself, current);
+
+       _enter("%d", vnode->cb_promised);
+
+       _debug("GIVE UP INODE %p", &vnode->vfs_inode);
+
+       if (!vnode->cb_promised) {
+               _leave(" [not promised]");
+               return;
+       }
+
+       ASSERT(server != NULL);
+
+       spin_lock(&server->cb_lock);
+       if (vnode->cb_promised && afs_breakring_space(server) == 0) {
+               add_wait_queue(&server->cb_break_waitq, &myself);
+               for (;;) {
+                       set_current_state(TASK_UNINTERRUPTIBLE);
+                       if (!vnode->cb_promised ||
+                           afs_breakring_space(server) != 0)
+                               break;
+                       spin_unlock(&server->cb_lock);
+                       schedule();
+                       spin_lock(&server->cb_lock);
                }
+               remove_wait_queue(&server->cb_break_waitq, &myself);
+               __set_current_state(TASK_RUNNING);
+       }
+
+       /* of course, it's always possible for the server to break this vnode's
+        * callback first... */
+       if (vnode->cb_promised)
+               afs_do_give_up_callback(server, vnode);
+
+       spin_unlock(&server->cb_lock);
+       _leave("");
+}
+
+/*
+ * dispatch a deferred give up callbacks operation
+ */
+void afs_dispatch_give_up_callbacks(struct work_struct *work)
+{
+       struct afs_server *server =
+               container_of(work, struct afs_server, cb_break_work.work);
+
+       _enter("");
+
+       /* tell the fileserver to discard the callback promises it has
+        * - in the event of ENOMEM or some other error, we just forget that we
+        *   had callbacks entirely, and the server will call us later to break
+        *   them
+        */
+       afs_fs_give_up_callbacks(server, &afs_async_call);
+}
+
+/*
+ * flush the outstanding callback breaks on a server
+ */
+void afs_flush_callback_breaks(struct afs_server *server)
+{
+       cancel_delayed_work(&server->cb_break_work);
+       queue_delayed_work(afs_callback_update_worker,
+                          &server->cb_break_work, 0);
+}
 
-               spin_unlock(&afs_cb_hash_lock);
-
-               if (inode) {
-                       /* we've found the record for this vnode */
-                       spin_lock(&vnode->lock);
-                       if (vnode->cb_server == server) {
-                               /* the callback _is_ on the calling server */
-                               vnode->cb_server = NULL;
-                               valid = 1;
-
-                               afs_kafstimod_del_timer(&vnode->cb_timeout);
-                               vnode->flags |= AFS_VNODE_CHANGED;
-
-                               spin_lock(&server->cb_lock);
-                               list_del_init(&vnode->cb_link);
-                               spin_unlock(&server->cb_lock);
-
-                               spin_lock(&afs_cb_hash_lock);
-                               list_del_init(&vnode->cb_hash_link);
-                               spin_unlock(&afs_cb_hash_lock);
-                       }
-                       spin_unlock(&vnode->lock);
-
-                       if (valid) {
-                               invalidate_remote_inode(inode);
-                               afs_put_server(server);
-                       }
-                       iput(inode);
+#if 0
+/*
+ * update a bunch of callbacks
+ */
+static void afs_callback_updater(struct work_struct *work)
+{
+       struct afs_server *server;
+       struct afs_vnode *vnode, *xvnode;
+       time_t now;
+       long timeout;
+       int ret;
+
+       server = container_of(work, struct afs_server, updater);
+
+       _enter("");
+
+       now = get_seconds();
+
+       /* find the first vnode to update */
+       spin_lock(&server->cb_lock);
+       for (;;) {
+               if (RB_EMPTY_ROOT(&server->cb_promises)) {
+                       spin_unlock(&server->cb_lock);
+                       _leave(" [nothing]");
+                       return;
                }
+
+               vnode = rb_entry(rb_first(&server->cb_promises),
+                                struct afs_vnode, cb_promise);
+               if (atomic_read(&vnode->usage) > 0)
+                       break;
+               rb_erase(&vnode->cb_promise, &server->cb_promises);
+               vnode->cb_promised = false;
        }
 
-       _leave(" = 0");
-       return 0;
-} /* end SRXAFSCM_CallBack() */
+       timeout = vnode->update_at - now;
+       if (timeout > 0) {
+               queue_delayed_work(afs_vnode_update_worker,
+                                  &afs_vnode_update, timeout * HZ);
+               spin_unlock(&server->cb_lock);
+               _leave(" [nothing]");
+               return;
+       }
+
+       list_del_init(&vnode->update);
+       atomic_inc(&vnode->usage);
+       spin_unlock(&server->cb_lock);
+
+       /* we can now perform the update */
+       _debug("update %s", vnode->vldb.name);
+       vnode->state = AFS_VL_UPDATING;
+       vnode->upd_rej_cnt = 0;
+       vnode->upd_busy_cnt = 0;
+
+       ret = afs_vnode_update_record(vl, &vldb);
+       switch (ret) {
+       case 0:
+               afs_vnode_apply_update(vl, &vldb);
+               vnode->state = AFS_VL_UPDATING;
+               break;
+       case -ENOMEDIUM:
+               vnode->state = AFS_VL_VOLUME_DELETED;
+               break;
+       default:
+               vnode->state = AFS_VL_UNCERTAIN;
+               break;
+       }
+
+       /* and then reschedule */
+       _debug("reschedule");
+       vnode->update_at = get_seconds() + afs_vnode_update_timeout;
+
+       spin_lock(&server->cb_lock);
+
+       if (!list_empty(&server->cb_promises)) {
+               /* next update in 10 minutes, but wait at least 1 second more
+                * than the newest record already queued so that we don't spam
+                * the VL server suddenly with lots of requests
+                */
+               xvnode = list_entry(server->cb_promises.prev,
+                                   struct afs_vnode, update);
+               if (vnode->update_at <= xvnode->update_at)
+                       vnode->update_at = xvnode->update_at + 1;
+               xvnode = list_entry(server->cb_promises.next,
+                                   struct afs_vnode, update);
+               timeout = xvnode->update_at - now;
+               if (timeout < 0)
+                       timeout = 0;
+       } else {
+               timeout = afs_vnode_update_timeout;
+       }
+
+       list_add_tail(&vnode->update, &server->cb_promises);
+
+       _debug("timeout %ld", timeout);
+       queue_delayed_work(afs_vnode_update_worker,
+                          &afs_vnode_update, timeout * HZ);
+       spin_unlock(&server->cb_lock);
+       afs_put_vnode(vl);
+}
+#endif
+
+/*
+ * initialise the callback update process
+ */
+int __init afs_callback_update_init(void)
+{
+       afs_callback_update_worker =
+               create_singlethread_workqueue("kafs_callbackd");
+       return afs_callback_update_worker ? 0 : -ENOMEM;
+}
 
-/*****************************************************************************/
 /*
- * allow the fileserver to see if the cache manager is still alive
+ * shut down the callback update process
  */
-int SRXAFSCM_Probe(struct afs_server *server)
+void __exit afs_callback_update_kill(void)
 {
-       _debug("SRXAFSCM_Probe(%p)\n", server);
-       return 0;
-} /* end SRXAFSCM_Probe() */
+       destroy_workqueue(afs_callback_update_worker);
+}
index 1fc5783..9b1311a 100644 (file)
@@ -1,4 +1,4 @@
-/* cell.c: AFS cell and server record management
+/* AFS cell and server record management
  *
  * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
  * Written by David Howells (dhowells@redhat.com)
 
 #include <linux/module.h>
 #include <linux/slab.h>
-#include <rxrpc/peer.h>
-#include <rxrpc/connection.h>
-#include "volume.h"
-#include "cell.h"
-#include "server.h"
-#include "transport.h"
-#include "vlclient.h"
-#include "kafstimod.h"
-#include "super.h"
+#include <linux/key.h>
+#include <linux/ctype.h>
+#include <keys/rxrpc-type.h>
 #include "internal.h"
 
 DECLARE_RWSEM(afs_proc_cells_sem);
@@ -28,66 +22,47 @@ LIST_HEAD(afs_proc_cells);
 static struct list_head afs_cells = LIST_HEAD_INIT(afs_cells);
 static DEFINE_RWLOCK(afs_cells_lock);
 static DECLARE_RWSEM(afs_cells_sem); /* add/remove serialisation */
+static DECLARE_WAIT_QUEUE_HEAD(afs_cells_freeable_wq);
 static struct afs_cell *afs_cell_root;
 
-#ifdef AFS_CACHING_SUPPORT
-static cachefs_match_val_t afs_cell_cache_match(void *target,
-                                               const void *entry);
-static void afs_cell_cache_update(void *source, void *entry);
-
-struct cachefs_index_def afs_cache_cell_index_def = {
-       .name                   = "cell_ix",
-       .data_size              = sizeof(struct afs_cache_cell),
-       .keys[0]                = { CACHEFS_INDEX_KEYS_ASCIIZ, 64 },
-       .match                  = afs_cell_cache_match,
-       .update                 = afs_cell_cache_update,
-};
-#endif
-
-/*****************************************************************************/
 /*
- * create a cell record
- * - "name" is the name of the cell
- * - "vllist" is a colon separated list of IP addresses in "a.b.c.d" format
+ * allocate a cell record and fill in its name, VL server address list and
+ * allocate an anonymous key
  */
-int afs_cell_create(const char *name, char *vllist, struct afs_cell **_cell)
+static struct afs_cell *afs_cell_alloc(const char *name, char *vllist)
 {
        struct afs_cell *cell;
-       char *next;
+       size_t namelen;
+       char keyname[4 + AFS_MAXCELLNAME + 1], *cp, *dp, *next;
        int ret;
 
-       _enter("%s", name);
+       _enter("%s,%s", name, vllist);
 
        BUG_ON(!name); /* TODO: want to look up "this cell" in the cache */
 
+       namelen = strlen(name);
+       if (namelen > AFS_MAXCELLNAME)
+               return ERR_PTR(-ENAMETOOLONG);
+
        /* allocate and initialise a cell record */
-       cell = kmalloc(sizeof(struct afs_cell) + strlen(name) + 1, GFP_KERNEL);
+       cell = kzalloc(sizeof(struct afs_cell) + namelen + 1, GFP_KERNEL);
        if (!cell) {
                _leave(" = -ENOMEM");
-               return -ENOMEM;
+               return ERR_PTR(-ENOMEM);
        }
 
-       down_write(&afs_cells_sem);
-
-       memset(cell, 0, sizeof(struct afs_cell));
-       atomic_set(&cell->usage, 0);
+       memcpy(cell->name, name, namelen);
+       cell->name[namelen] = 0;
 
+       atomic_set(&cell->usage, 1);
        INIT_LIST_HEAD(&cell->link);
-
-       rwlock_init(&cell->sv_lock);
-       INIT_LIST_HEAD(&cell->sv_list);
-       INIT_LIST_HEAD(&cell->sv_graveyard);
-       spin_lock_init(&cell->sv_gylock);
-
+       rwlock_init(&cell->servers_lock);
+       INIT_LIST_HEAD(&cell->servers);
        init_rwsem(&cell->vl_sem);
        INIT_LIST_HEAD(&cell->vl_list);
-       INIT_LIST_HEAD(&cell->vl_graveyard);
-       spin_lock_init(&cell->vl_gylock);
-
-       strcpy(cell->name,name);
+       spin_lock_init(&cell->vl_lock);
 
        /* fill in the VL server list from the rest of the string */
-       ret = -EINVAL;
        do {
                unsigned a, b, c, d;
 
@@ -96,20 +71,75 @@ int afs_cell_create(const char *name, char *vllist, struct afs_cell **_cell)
                        *next++ = 0;
 
                if (sscanf(vllist, "%u.%u.%u.%u", &a, &b, &c, &d) != 4)
-                       goto badaddr;
+                       goto bad_address;
 
                if (a > 255 || b > 255 || c > 255 || d > 255)
-                       goto badaddr;
+                       goto bad_address;
 
                cell->vl_addrs[cell->vl_naddrs++].s_addr =
                        htonl((a << 24) | (b << 16) | (c << 8) | d);
 
-               if (cell->vl_naddrs >= AFS_CELL_MAX_ADDRS)
-                       break;
+       } while (cell->vl_naddrs < AFS_CELL_MAX_ADDRS && (vllist = next));
+
+       /* create a key to represent an anonymous user */
+       memcpy(keyname, "afs@", 4);
+       dp = keyname + 4;
+       cp = cell->name;
+       do {
+               *dp++ = toupper(*cp);
+       } while (*cp++);
+       cell->anonymous_key = key_alloc(&key_type_rxrpc, keyname, 0, 0, current,
+                                       KEY_POS_SEARCH, KEY_ALLOC_NOT_IN_QUOTA);
+       if (IS_ERR(cell->anonymous_key)) {
+               _debug("no key");
+               ret = PTR_ERR(cell->anonymous_key);
+               goto error;
+       }
+
+       ret = key_instantiate_and_link(cell->anonymous_key, NULL, 0,
+                                      NULL, NULL);
+       if (ret < 0) {
+               _debug("instantiate failed");
+               goto error;
+       }
+
+       _debug("anon key %p{%x}",
+              cell->anonymous_key, key_serial(cell->anonymous_key));
+
+       _leave(" = %p", cell);
+       return cell;
+
+bad_address:
+       printk(KERN_ERR "kAFS: bad VL server IP address\n");
+       ret = -EINVAL;
+error:
+       key_put(cell->anonymous_key);
+       kfree(cell);
+       _leave(" = %d", ret);
+       return ERR_PTR(ret);
+}
+
+/*
+ * create a cell record
+ * - "name" is the name of the cell
+ * - "vllist" is a colon separated list of IP addresses in "a.b.c.d" format
+ */
+struct afs_cell *afs_cell_create(const char *name, char *vllist)
+{
+       struct afs_cell *cell;
+       int ret;
+
+       _enter("%s,%s", name, vllist);
 
-       } while(vllist = next, vllist);
+       cell = afs_cell_alloc(name, vllist);
+       if (IS_ERR(cell)) {
+               _leave(" = %ld", PTR_ERR(cell));
+               return cell;
+       }
+
+       down_write(&afs_cells_sem);
 
-       /* add a proc dir for this cell */
+       /* add a proc directory for this cell */
        ret = afs_proc_cell_setup(cell);
        if (ret < 0)
                goto error;
@@ -130,31 +160,28 @@ int afs_cell_create(const char *name, char *vllist, struct afs_cell **_cell)
        down_write(&afs_proc_cells_sem);
        list_add_tail(&cell->proc_link, &afs_proc_cells);
        up_write(&afs_proc_cells_sem);
-
-       *_cell = cell;
        up_write(&afs_cells_sem);
 
-       _leave(" = 0 (%p)", cell);
-       return 0;
+       _leave(" = %p", cell);
+       return cell;
 
- badaddr:
-       printk(KERN_ERR "kAFS: bad VL server IP address: '%s'\n", vllist);
- error:
+error:
        up_write(&afs_cells_sem);
+       key_put(cell->anonymous_key);
        kfree(cell);
        _leave(" = %d", ret);
-       return ret;
-} /* end afs_cell_create() */
+       return ERR_PTR(ret);
+}
 
-/*****************************************************************************/
 /*
- * initialise the cell database from module parameters
+ * set the root cell information
+ * - can be called with a module parameter string
+ * - can be called from a write to /proc/fs/afs/rootcell
  */
 int afs_cell_init(char *rootcell)
 {
        struct afs_cell *old_root, *new_root;
        char *cp;
-       int ret;
 
        _enter("");
 
@@ -162,82 +189,60 @@ int afs_cell_init(char *rootcell)
                /* module is loaded with no parameters, or built statically.
                 * - in the future we might initialize cell DB here.
                 */
-               _leave(" = 0 (but no root)");
+               _leave(" = 0 [no root]");
                return 0;
        }
 
        cp = strchr(rootcell, ':');
        if (!cp) {
                printk(KERN_ERR "kAFS: no VL server IP addresses specified\n");
-               _leave(" = %d (no colon)", -EINVAL);
+               _leave(" = -EINVAL");
                return -EINVAL;
        }
 
        /* allocate a cell record for the root cell */
        *cp++ = 0;
-       ret = afs_cell_create(rootcell, cp, &new_root);
-       if (ret < 0) {
-               _leave(" = %d", ret);
-               return ret;
+       new_root = afs_cell_create(rootcell, cp);
+       if (IS_ERR(new_root)) {
+               _leave(" = %ld", PTR_ERR(new_root));
+               return PTR_ERR(new_root);
        }
 
-       /* as afs_put_cell() takes locks by itself, we have to do
-        * a little gymnastics to be race-free.
-        */
-       afs_get_cell(new_root);
-
+       /* install the new cell */
        write_lock(&afs_cells_lock);
-       while (afs_cell_root) {
-               old_root = afs_cell_root;
-               afs_cell_root = NULL;
-               write_unlock(&afs_cells_lock);
-               afs_put_cell(old_root);
-               write_lock(&afs_cells_lock);
-       }
+       old_root = afs_cell_root;
        afs_cell_root = new_root;
        write_unlock(&afs_cells_lock);
+       afs_put_cell(old_root);
 
-       _leave(" = %d", ret);
-       return ret;
-
-} /* end afs_cell_init() */
+       _leave(" = 0");
+       return 0;
+}
 
-/*****************************************************************************/
 /*
  * lookup a cell record
  */
-int afs_cell_lookup(const char *name, unsigned namesz, struct afs_cell **_cell)
+struct afs_cell *afs_cell_lookup(const char *name, unsigned namesz)
 {
        struct afs_cell *cell;
-       int ret;
 
        _enter("\"%*.*s\",", namesz, namesz, name ? name : "");
 
-       *_cell = NULL;
+       down_read(&afs_cells_sem);
+       read_lock(&afs_cells_lock);
 
        if (name) {
                /* if the cell was named, look for it in the cell record list */
-               ret = -ENOENT;
-               cell = NULL;
-               read_lock(&afs_cells_lock);
-
                list_for_each_entry(cell, &afs_cells, link) {
                        if (strncmp(cell->name, name, namesz) == 0) {
                                afs_get_cell(cell);
                                goto found;
                        }
                }
-               cell = NULL;
+               cell = ERR_PTR(-ENOENT);
        found:
-
-               read_unlock(&afs_cells_lock);
-
-               if (cell)
-                       ret = 0;
-       }
-       else {
-               read_lock(&afs_cells_lock);
-
+               ;
+       } else {
                cell = afs_cell_root;
                if (!cell) {
                        /* this should not happen unless user tries to mount
@@ -246,44 +251,35 @@ int afs_cell_lookup(const char *name, unsigned namesz, struct afs_cell **_cell)
                         * ENOENT might be "more appropriate" but they happen
                         * for other reasons.
                         */
-                       ret = -EDESTADDRREQ;
-               }
-               else {
+                       cell = ERR_PTR(-EDESTADDRREQ);
+               } else {
                        afs_get_cell(cell);
-                       ret = 0;
                }
 
-               read_unlock(&afs_cells_lock);
        }
 
-       *_cell = cell;
-       _leave(" = %d (%p)", ret, cell);
-       return ret;
-
-} /* end afs_cell_lookup() */
+       read_unlock(&afs_cells_lock);
+       up_read(&afs_cells_sem);
+       _leave(" = %p", cell);
+       return cell;
+}
 
-/*****************************************************************************/
 /*
  * try and get a cell record
  */
-struct afs_cell *afs_get_cell_maybe(struct afs_cell **_cell)
+struct afs_cell *afs_get_cell_maybe(struct afs_cell *cell)
 {
-       struct afs_cell *cell;
-
        write_lock(&afs_cells_lock);
 
-       cell = *_cell;
        if (cell && !list_empty(&cell->link))
                afs_get_cell(cell);
        else
                cell = NULL;
 
        write_unlock(&afs_cells_lock);
-
        return cell;
-} /* end afs_get_cell_maybe() */
+}
 
-/*****************************************************************************/
 /*
  * destroy a cell record
  */
@@ -294,8 +290,7 @@ void afs_put_cell(struct afs_cell *cell)
 
        _enter("%p{%d,%s}", cell, atomic_read(&cell->usage), cell->name);
 
-       /* sanity check */
-       BUG_ON(atomic_read(&cell->usage) <= 0);
+       ASSERTCMP(atomic_read(&cell->usage), >, 0);
 
        /* to prevent a race, the decrement and the dequeue must be effectively
         * atomic */
@@ -307,36 +302,49 @@ void afs_put_cell(struct afs_cell *cell)
                return;
        }
 
+       ASSERT(list_empty(&cell->servers));
+       ASSERT(list_empty(&cell->vl_list));
+
        write_unlock(&afs_cells_lock);
 
-       BUG_ON(!list_empty(&cell->sv_list));
-       BUG_ON(!list_empty(&cell->sv_graveyard));
-       BUG_ON(!list_empty(&cell->vl_list));
-       BUG_ON(!list_empty(&cell->vl_graveyard));
+       wake_up(&afs_cells_freeable_wq);
 
        _leave(" [unused]");
-} /* end afs_put_cell() */
+}
 
-/*****************************************************************************/
 /*
  * destroy a cell record
+ * - must be called with the afs_cells_sem write-locked
+ * - cell->link should have been broken by the caller
  */
 static void afs_cell_destroy(struct afs_cell *cell)
 {
        _enter("%p{%d,%s}", cell, atomic_read(&cell->usage), cell->name);
 
-       /* to prevent a race, the decrement and the dequeue must be effectively
-        * atomic */
-       write_lock(&afs_cells_lock);
+       ASSERTCMP(atomic_read(&cell->usage), >=, 0);
+       ASSERT(list_empty(&cell->link));
 
-       /* sanity check */
-       BUG_ON(atomic_read(&cell->usage) != 0);
+       /* wait for everyone to stop using the cell */
+       if (atomic_read(&cell->usage) > 0) {
+               DECLARE_WAITQUEUE(myself, current);
 
-       list_del_init(&cell->link);
+               _debug("wait for cell %s", cell->name);
+               set_current_state(TASK_UNINTERRUPTIBLE);
+               add_wait_queue(&afs_cells_freeable_wq, &myself);
 
-       write_unlock(&afs_cells_lock);
+               while (atomic_read(&cell->usage) > 0) {
+                       schedule();
+                       set_current_state(TASK_UNINTERRUPTIBLE);
+               }
 
-       down_write(&afs_cells_sem);
+               remove_wait_queue(&afs_cells_freeable_wq, &myself);
+               set_current_state(TASK_RUNNING);
+       }
+
+       _debug("cell dead");
+       ASSERTCMP(atomic_read(&cell->usage), ==, 0);
+       ASSERT(list_empty(&cell->servers));
+       ASSERT(list_empty(&cell->vl_list));
 
        afs_proc_cell_remove(cell);
 
@@ -348,104 +356,26 @@ static void afs_cell_destroy(struct afs_cell *cell)
        cachefs_relinquish_cookie(cell->cache, 0);
 #endif
 
-       up_write(&afs_cells_sem);
-
-       BUG_ON(!list_empty(&cell->sv_list));
-       BUG_ON(!list_empty(&cell->sv_graveyard));
-       BUG_ON(!list_empty(&cell->vl_list));
-       BUG_ON(!list_empty(&cell->vl_graveyard));
-
-       /* finish cleaning up the cell */
+       key_put(cell->anonymous_key);
        kfree(cell);
 
        _leave(" [destroyed]");
-} /* end afs_cell_destroy() */
-
-/*****************************************************************************/
-/*
- * lookup the server record corresponding to an Rx RPC peer
- */
-int afs_server_find_by_peer(const struct rxrpc_peer *peer,
-                           struct afs_server **_server)
-{
-       struct afs_server *server;
-       struct afs_cell *cell;
-
-       _enter("%p{a=%08x},", peer, ntohl(peer->addr.s_addr));
-
-       /* search the cell list */
-       read_lock(&afs_cells_lock);
-
-       list_for_each_entry(cell, &afs_cells, link) {
-
-               _debug("? cell %s",cell->name);
-
-               write_lock(&cell->sv_lock);
-
-               /* check the active list */
-               list_for_each_entry(server, &cell->sv_list, link) {
-                       _debug("?? server %08x", ntohl(server->addr.s_addr));
-
-                       if (memcmp(&server->addr, &peer->addr,
-                                  sizeof(struct in_addr)) == 0)
-                               goto found_server;
-               }
+}
 
-               /* check the inactive list */
-               spin_lock(&cell->sv_gylock);
-               list_for_each_entry(server, &cell->sv_graveyard, link) {
-                       _debug("?? dead server %08x",
-                              ntohl(server->addr.s_addr));
-
-                       if (memcmp(&server->addr, &peer->addr,
-                                  sizeof(struct in_addr)) == 0)
-                               goto found_dead_server;
-               }
-               spin_unlock(&cell->sv_gylock);
-
-               write_unlock(&cell->sv_lock);
-       }
-       read_unlock(&afs_cells_lock);
-
-       _leave(" = -ENOENT");
-       return -ENOENT;
-
-       /* we found it in the graveyard - resurrect it */
- found_dead_server:
-       list_move_tail(&server->link, &cell->sv_list);
-       afs_get_server(server);
-       afs_kafstimod_del_timer(&server->timeout);
-       spin_unlock(&cell->sv_gylock);
-       goto success;
-
-       /* we found it - increment its ref count and return it */
- found_server:
-       afs_get_server(server);
-
- success:
-       write_unlock(&cell->sv_lock);
-       read_unlock(&afs_cells_lock);
-
-       *_server = server;
-       _leave(" = 0 (s=%p c=%p)", server, cell);
-       return 0;
-
-} /* end afs_server_find_by_peer() */
-
-/*****************************************************************************/
 /*
  * purge in-memory cell database on module unload or afs_init() failure
  * - the timeout daemon is stopped before calling this
  */
 void afs_cell_purge(void)
 {
-       struct afs_vlocation *vlocation;
        struct afs_cell *cell;
 
        _enter("");
 
        afs_put_cell(afs_cell_root);
 
+       down_write(&afs_cells_sem);
+
        while (!list_empty(&afs_cells)) {
                cell = NULL;
 
@@ -464,104 +394,11 @@ void afs_cell_purge(void)
                        _debug("PURGING CELL %s (%d)",
                               cell->name, atomic_read(&cell->usage));
 
-                       BUG_ON(!list_empty(&cell->sv_list));
-                       BUG_ON(!list_empty(&cell->vl_list));
-
-                       /* purge the cell's VL graveyard list */
-                       _debug(" - clearing VL graveyard");
-
-                       spin_lock(&cell->vl_gylock);
-
-                       while (!list_empty(&cell->vl_graveyard)) {
-                               vlocation = list_entry(cell->vl_graveyard.next,
-                                                      struct afs_vlocation,
-                                                      link);
-                               list_del_init(&vlocation->link);
-
-                               afs_kafstimod_del_timer(&vlocation->timeout);
-
-                               spin_unlock(&cell->vl_gylock);
-
-                               afs_vlocation_do_timeout(vlocation);
-                               /* TODO: race if move to use krxtimod instead
-                                * of kafstimod */
-
-                               spin_lock(&cell->vl_gylock);
-                       }
-
-                       spin_unlock(&cell->vl_gylock);
-
-                       /* purge the cell's server graveyard list */
-                       _debug(" - clearing server graveyard");
-
-                       spin_lock(&cell->sv_gylock);
-
-                       while (!list_empty(&cell->sv_graveyard)) {
-                               struct afs_server *server;
-
-                               server = list_entry(cell->sv_graveyard.next,
-                                                   struct afs_server, link);
-                               list_del_init(&server->link);
-
-                               afs_kafstimod_del_timer(&server->timeout);
-
-                               spin_unlock(&cell->sv_gylock);
-
-                               afs_server_do_timeout(server);
-
-                               spin_lock(&cell->sv_gylock);
-                       }
-
-                       spin_unlock(&cell->sv_gylock);
-
                        /* now the cell should be left with no references */
                        afs_cell_destroy(cell);
                }
        }
 
+       up_write(&afs_cells_sem);
        _leave("");
-} /* end afs_cell_purge() */
-
-/*****************************************************************************/
-/*
- * match a cell record obtained from the cache
- */
-#ifdef AFS_CACHING_SUPPORT
-static cachefs_match_val_t afs_cell_cache_match(void *target,
-                                               const void *entry)
-{
-       const struct afs_cache_cell *ccell = entry;
-       struct afs_cell *cell = target;
-
-       _enter("{%s},{%s}", ccell->name, cell->name);
-
-       if (strncmp(ccell->name, cell->name, sizeof(ccell->name)) == 0) {
-               _leave(" = SUCCESS");
-               return CACHEFS_MATCH_SUCCESS;
-       }
-
-       _leave(" = FAILED");
-       return CACHEFS_MATCH_FAILED;
-} /* end afs_cell_cache_match() */
-#endif
-
-/*****************************************************************************/
-/*
- * update a cell record in the cache
- */
-#ifdef AFS_CACHING_SUPPORT
-static void afs_cell_cache_update(void *source, void *entry)
-{
-       struct afs_cache_cell *ccell = entry;
-       struct afs_cell *cell = source;
-
-       _enter("%p,%p", source, entry);
-
-       strncpy(ccell->name, cell->name, sizeof(ccell->name));
-
-       memcpy(ccell->vl_servers,
-              cell->vl_addrs,
-              min(sizeof(ccell->vl_servers), sizeof(cell->vl_addrs)));
-
-} /* end afs_cell_cache_update() */
-#endif
+}
diff --git a/fs/afs/cell.h b/fs/afs/cell.h
deleted file mode 100644 (file)
index 4834910..0000000
+++ /dev/null
@@ -1,78 +0,0 @@
-/* cell.h: AFS cell record
- *
- * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#ifndef _LINUX_AFS_CELL_H
-#define _LINUX_AFS_CELL_H
-
-#include "types.h"
-#include "cache.h"
-
-#define AFS_CELL_MAX_ADDRS 15
-
-extern volatile int afs_cells_being_purged; /* T when cells are being purged by rmmod */
-
-/*****************************************************************************/
-/*
- * entry in the cached cell catalogue
- */
-struct afs_cache_cell
-{
-       char                    name[64];       /* cell name (padded with NULs) */
-       struct in_addr          vl_servers[15]; /* cached cell VL servers */
-};
-
-/*****************************************************************************/
-/*
- * AFS cell record
- */
-struct afs_cell
-{
-       atomic_t                usage;
-       struct list_head        link;           /* main cell list link */
-       struct list_head        proc_link;      /* /proc cell list link */
-       struct proc_dir_entry   *proc_dir;      /* /proc dir for this cell */
-#ifdef AFS_CACHING_SUPPORT
-       struct cachefs_cookie   *cache;         /* caching cookie */
-#endif
-
-       /* server record management */
-       rwlock_t                sv_lock;        /* active server list lock */
-       struct list_head        sv_list;        /* active server list */
-       struct list_head        sv_graveyard;   /* inactive server list */
-       spinlock_t              sv_gylock;      /* inactive server list lock */
-
-       /* volume location record management */
-       struct rw_semaphore     vl_sem;         /* volume management serialisation semaphore */
-       struct list_head        vl_list;        /* cell's active VL record list */
-       struct list_head        vl_graveyard;   /* cell's inactive VL record list */
-       spinlock_t              vl_gylock;      /* graveyard lock */
-       unsigned short          vl_naddrs;      /* number of VL servers in addr list */
-       unsigned short          vl_curr_svix;   /* current server index */
-       struct in_addr          vl_addrs[AFS_CELL_MAX_ADDRS];   /* cell VL server addresses */
-
-       char                    name[0];        /* cell name - must go last */
-};
-
-extern int afs_cell_init(char *rootcell);
-
-extern int afs_cell_create(const char *name, char *vllist, struct afs_cell **_cell);
-
-extern int afs_cell_lookup(const char *name, unsigned nmsize, struct afs_cell **_cell);
-
-#define afs_get_cell(C) do { atomic_inc(&(C)->usage); } while(0)
-
-extern struct afs_cell *afs_get_cell_maybe(struct afs_cell **_cell);
-
-extern void afs_put_cell(struct afs_cell *cell);
-
-extern void afs_cell_purge(void);
-
-#endif /* _LINUX_AFS_CELL_H */
index 3d097fd..6685f4c 100644 (file)
@@ -1,4 +1,4 @@
-/* cmservice.c: AFS Cache Manager Service
+/* AFS Cache Manager Service
  *
  * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
  * Written by David Howells (dhowells@redhat.com)
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/sched.h>
-#include <linux/completion.h>
-#include "server.h"
-#include "cell.h"
-#include "transport.h"
-#include <rxrpc/rxrpc.h>
-#include <rxrpc/transport.h>
-#include <rxrpc/connection.h>
-#include <rxrpc/call.h>
-#include "cmservice.h"
+#include <linux/ip.h>
 #include "internal.h"
+#include "afs_cm.h"
 
-static unsigned afscm_usage;           /* AFS cache manager usage count */
-static struct rw_semaphore afscm_sem;  /* AFS cache manager start/stop semaphore */
-
-static int afscm_new_call(struct rxrpc_call *call);
-static void afscm_attention(struct rxrpc_call *call);
-static void afscm_error(struct rxrpc_call *call);
-static void afscm_aemap(struct rxrpc_call *call);
-
-static void _SRXAFSCM_CallBack(struct rxrpc_call *call);
-static void _SRXAFSCM_InitCallBackState(struct rxrpc_call *call);
-static void _SRXAFSCM_Probe(struct rxrpc_call *call);
-
-typedef void (*_SRXAFSCM_xxxx_t)(struct rxrpc_call *call);
-
-static const struct rxrpc_operation AFSCM_ops[] = {
-       {
-               .id     = 204,
-               .asize  = RXRPC_APP_MARK_EOF,
-               .name   = "CallBack",
-               .user   = _SRXAFSCM_CallBack,
-       },
-       {
-               .id     = 205,
-               .asize  = RXRPC_APP_MARK_EOF,
-               .name   = "InitCallBackState",
-               .user   = _SRXAFSCM_InitCallBackState,
-       },
-       {
-               .id     = 206,
-               .asize  = RXRPC_APP_MARK_EOF,
-               .name   = "Probe",
-               .user   = _SRXAFSCM_Probe,
-       },
-#if 0
-       {
-               .id     = 207,
-               .asize  = RXRPC_APP_MARK_EOF,
-               .name   = "GetLock",
-               .user   = _SRXAFSCM_GetLock,
-       },
-       {
-               .id     = 208,
-               .asize  = RXRPC_APP_MARK_EOF,
-               .name   = "GetCE",
-               .user   = _SRXAFSCM_GetCE,
-       },
-       {
-               .id     = 209,
-               .asize  = RXRPC_APP_MARK_EOF,
-               .name   = "GetXStatsVersion",
-               .user   = _SRXAFSCM_GetXStatsVersion,
-       },
-       {
-               .id     = 210,
-               .asize  = RXRPC_APP_MARK_EOF,
-               .name   = "GetXStats",
-               .user   = _SRXAFSCM_GetXStats,
-       }
-#endif
-};
+struct workqueue_struct *afs_cm_workqueue;
 
-static struct rxrpc_service AFSCM_service = {
-       .name           = "AFS/CM",
-       .owner          = THIS_MODULE,
-       .link           = LIST_HEAD_INIT(AFSCM_service.link),
-       .new_call       = afscm_new_call,
-       .service_id     = 1,
-       .attn_func      = afscm_attention,
-       .error_func     = afscm_error,
-       .aemap_func     = afscm_aemap,
-       .ops_begin      = &AFSCM_ops[0],
-       .ops_end        = &AFSCM_ops[ARRAY_SIZE(AFSCM_ops)],
-};
+static int afs_deliver_cb_init_call_back_state(struct afs_call *,
+                                              struct sk_buff *, bool);
+static int afs_deliver_cb_init_call_back_state3(struct afs_call *,
+                                               struct sk_buff *, bool);
+static int afs_deliver_cb_probe(struct afs_call *, struct sk_buff *, bool);
+static int afs_deliver_cb_callback(struct afs_call *, struct sk_buff *, bool);
+static int afs_deliver_cb_get_capabilities(struct afs_call *, struct sk_buff *,
+                                          bool);
+static void afs_cm_destructor(struct afs_call *);
 
-static DECLARE_COMPLETION(kafscmd_alive);
-static DECLARE_COMPLETION(kafscmd_dead);
-static DECLARE_WAIT_QUEUE_HEAD(kafscmd_sleepq);
-static LIST_HEAD(kafscmd_attention_list);
-static LIST_HEAD(afscm_calls);
-static DEFINE_SPINLOCK(afscm_calls_lock);
-static DEFINE_SPINLOCK(kafscmd_attention_lock);
-static int kafscmd_die;
-
-/*****************************************************************************/
 /*
- * AFS Cache Manager kernel thread
+ * CB.CallBack operation type
  */
-static int kafscmd(void *arg)
-{
-       DECLARE_WAITQUEUE(myself, current);
-
-       struct rxrpc_call *call;
-       _SRXAFSCM_xxxx_t func;
-       int die;
-
-       printk(KERN_INFO "kAFS: Started kafscmd %d\n", current->pid);
-
-       daemonize("kafscmd");
-
-       complete(&kafscmd_alive);
-
-       /* loop around looking for things to attend to */
-       do {
-               if (list_empty(&kafscmd_attention_list)) {
-                       set_current_state(TASK_INTERRUPTIBLE);
-                       add_wait_queue(&kafscmd_sleepq, &myself);
-
-                       for (;;) {
-                               set_current_state(TASK_INTERRUPTIBLE);
-                               if (!list_empty(&kafscmd_attention_list) ||
-                                   signal_pending(current) ||
-                                   kafscmd_die)
-                                       break;
-
-                               schedule();
-                       }
-
-                       remove_wait_queue(&kafscmd_sleepq, &myself);
-                       set_current_state(TASK_RUNNING);
-               }
-
-               die = kafscmd_die;
-
-               /* dequeue the next call requiring attention */
-               call = NULL;
-               spin_lock(&kafscmd_attention_lock);
-
-               if (!list_empty(&kafscmd_attention_list)) {
-                       call = list_entry(kafscmd_attention_list.next,
-                                         struct rxrpc_call,
-                                         app_attn_link);
-                       list_del_init(&call->app_attn_link);
-                       die = 0;
-               }
-
-               spin_unlock(&kafscmd_attention_lock);
-
-               if (call) {
-                       /* act upon it */
-                       _debug("@@@ Begin Attend Call %p", call);
-
-                       func = call->app_user;
-                       if (func)
-                               func(call);
-
-                       rxrpc_put_call(call);
-
-                       _debug("@@@ End Attend Call %p", call);
-               }
-
-       } while(!die);
-
-       /* and that's all */
-       complete_and_exit(&kafscmd_dead, 0);
-
-} /* end kafscmd() */
+static const struct afs_call_type afs_SRXCBCallBack = {
+       .name           = "CB.CallBack",
+       .deliver        = afs_deliver_cb_callback,
+       .abort_to_error = afs_abort_to_error,
+       .destructor     = afs_cm_destructor,
+};
 
-/*****************************************************************************/
 /*
- * handle a call coming in to the cache manager
- * - if I want to keep the call, I must increment its usage count
- * - the return value will be negated and passed back in an abort packet if
- *   non-zero
- * - serialised by virtue of there only being one krxiod
+ * CB.InitCallBackState operation type
  */
-static int afscm_new_call(struct rxrpc_call *call)
-{
-       _enter("%p{cid=%u u=%d}",
-              call, ntohl(call->call_id), atomic_read(&call->usage));
-
-       rxrpc_get_call(call);
-
-       /* add to my current call list */
-       spin_lock(&afscm_calls_lock);
-       list_add(&call->app_link,&afscm_calls);
-       spin_unlock(&afscm_calls_lock);
-
-       _leave(" = 0");
-       return 0;
-
-} /* end afscm_new_call() */
+static const struct afs_call_type afs_SRXCBInitCallBackState = {
+       .name           = "CB.InitCallBackState",
+       .deliver        = afs_deliver_cb_init_call_back_state,
+       .abort_to_error = afs_abort_to_error,
+       .destructor     = afs_cm_destructor,
+};
 
-/*****************************************************************************/
 /*
- * queue on the kafscmd queue for attention
+ * CB.InitCallBackState3 operation type
  */
-static void afscm_attention(struct rxrpc_call *call)
-{
-       _enter("%p{cid=%u u=%d}",
-              call, ntohl(call->call_id), atomic_read(&call->usage));
-
-       spin_lock(&kafscmd_attention_lock);
-
-       if (list_empty(&call->app_attn_link)) {
-               list_add_tail(&call->app_attn_link, &kafscmd_attention_list);
-               rxrpc_get_call(call);
-       }
-
-       spin_unlock(&kafscmd_attention_lock);
-
-       wake_up(&kafscmd_sleepq);
-
-       _leave(" {u=%d}", atomic_read(&call->usage));
-} /* end afscm_attention() */
+static const struct afs_call_type afs_SRXCBInitCallBackState3 = {
+       .name           = "CB.InitCallBackState3",
+       .deliver        = afs_deliver_cb_init_call_back_state3,
+       .abort_to_error = afs_abort_to_error,
+       .destructor     = afs_cm_destructor,
+};
 
-/*****************************************************************************/
 /*
- * handle my call being aborted
- * - clean up, dequeue and put my ref to the call
+ * CB.Probe operation type
  */
-static void afscm_error(struct rxrpc_call *call)
-{
-       int removed;
-
-       _enter("%p{est=%s ac=%u er=%d}",
-              call,
-              rxrpc_call_error_states[call->app_err_state],
-              call->app_abort_code,
-              call->app_errno);
-
-       spin_lock(&kafscmd_attention_lock);
-
-       if (list_empty(&call->app_attn_link)) {
-               list_add_tail(&call->app_attn_link, &kafscmd_attention_list);
-               rxrpc_get_call(call);
-       }
-
-       spin_unlock(&kafscmd_attention_lock);
-
-       removed = 0;
-       spin_lock(&afscm_calls_lock);
-       if (!list_empty(&call->app_link)) {
-               list_del_init(&call->app_link);
-               removed = 1;
-       }
-       spin_unlock(&afscm_calls_lock);
-
-       if (removed)
-               rxrpc_put_call(call);
-
-       wake_up(&kafscmd_sleepq);
+static const struct afs_call_type afs_SRXCBProbe = {
+       .name           = "CB.Probe",
+       .deliver        = afs_deliver_cb_probe,
+       .abort_to_error = afs_abort_to_error,
+       .destructor     = afs_cm_destructor,
+};
 
-       _leave("");
-} /* end afscm_error() */
+/*
+ * CB.GetCapabilities operation type
+ */
+static const struct afs_call_type afs_SRXCBGetCapabilites = {
+       .name           = "CB.GetCapabilities",
+       .deliver        = afs_deliver_cb_get_capabilities,
+       .abort_to_error = afs_abort_to_error,
+       .destructor     = afs_cm_destructor,
+};
 
-/*****************************************************************************/
 /*
- * map afs abort codes to/from Linux error codes
- * - called with call->lock held
+ * route an incoming cache manager call
+ * - return T if supported, F if not
  */
-static void afscm_aemap(struct rxrpc_call *call)
+bool afs_cm_incoming_call(struct afs_call *call)
 {
-       switch (call->app_err_state) {
-       case RXRPC_ESTATE_LOCAL_ABORT:
-               call->app_abort_code = -call->app_errno;
-               break;
-       case RXRPC_ESTATE_PEER_ABORT:
-               call->app_errno = -ECONNABORTED;
-               break;
+       u32 operation_id = ntohl(call->operation_ID);
+
+       _enter("{CB.OP %u}", operation_id);
+
+       switch (operation_id) {
+       case CBCallBack:
+               call->type = &afs_SRXCBCallBack;
+               return true;
+       case CBInitCallBackState:
+               call->type = &afs_SRXCBInitCallBackState;
+               return true;
+       case CBInitCallBackState3:
+               call->type = &afs_SRXCBInitCallBackState3;
+               return true;
+       case CBProbe:
+               call->type = &afs_SRXCBProbe;
+               return true;
+       case CBGetCapabilities:
+               call->type = &afs_SRXCBGetCapabilites;
+               return true;
        default:
-               break;
+               return false;
        }
-} /* end afscm_aemap() */
+}
 
-/*****************************************************************************/
 /*
- * start the cache manager service if not already started
+ * clean up a cache manager call
  */
-int afscm_start(void)
+static void afs_cm_destructor(struct afs_call *call)
 {
-       int ret;
-
-       down_write(&afscm_sem);
-       if (!afscm_usage) {
-               ret = kernel_thread(kafscmd, NULL, 0);
-               if (ret < 0)
-                       goto out;
-
-               wait_for_completion(&kafscmd_alive);
-
-               ret = rxrpc_add_service(afs_transport, &AFSCM_service);
-               if (ret < 0)
-                       goto kill;
-
-               afs_kafstimod_add_timer(&afs_mntpt_expiry_timer,
-                                       afs_mntpt_expiry_timeout * HZ);
-       }
-
-       afscm_usage++;
-       up_write(&afscm_sem);
-
-       return 0;
-
- kill:
-       kafscmd_die = 1;
-       wake_up(&kafscmd_sleepq);
-       wait_for_completion(&kafscmd_dead);
-
- out:
-       up_write(&afscm_sem);
-       return ret;
+       _enter("");
 
-} /* end afscm_start() */
+       afs_put_server(call->server);
+       call->server = NULL;
+       kfree(call->buffer);
+       call->buffer = NULL;
+}
 
-/*****************************************************************************/
 /*
- * stop the cache manager service
+ * allow the fileserver to see if the cache manager is still alive
  */
-void afscm_stop(void)
+static void SRXAFSCB_CallBack(struct work_struct *work)
 {
-       struct rxrpc_call *call;
+       struct afs_call *call = container_of(work, struct afs_call, work);
 
-       down_write(&afscm_sem);
+       _enter("");
 
-       BUG_ON(afscm_usage == 0);
-       afscm_usage--;
+       /* be sure to send the reply *before* attempting to spam the AFS server
+        * with FSFetchStatus requests on the vnodes with broken callbacks lest
+        * the AFS server get into a vicious cycle of trying to break further
+        * callbacks because it hadn't received completion of the CBCallBack op
+        * yet */
+       afs_send_empty_reply(call);
 
-       if (afscm_usage == 0) {
-               /* don't want more incoming calls */
-               rxrpc_del_service(afs_transport, &AFSCM_service);
-
-               /* abort any calls I've still got open (the afscm_error() will
-                * dequeue them) */
-               spin_lock(&afscm_calls_lock);
-               while (!list_empty(&afscm_calls)) {
-                       call = list_entry(afscm_calls.next,
-                                         struct rxrpc_call,
-                                         app_link);
+       afs_break_callbacks(call->server, call->count, call->request);
+       _leave("");
+}
 
-                       list_del_init(&call->app_link);
-                       rxrpc_get_call(call);
-                       spin_unlock(&afscm_calls_lock);
+/*
+ * deliver request data to a CB.CallBack call
+ */
+static int afs_deliver_cb_callback(struct afs_call *call, struct sk_buff *skb,
+                                  bool last)
+{
+       struct afs_callback *cb;
+       struct afs_server *server;
+       struct in_addr addr;
+       __be32 *bp;
+       u32 tmp;
+       int ret, loop;
+
+       _enter("{%u},{%u},%d", call->unmarshall, skb->len, last);
+
+       switch (call->unmarshall) {
+       case 0:
+               call->offset = 0;
+               call->unmarshall++;
+
+               /* extract the FID array and its count in two steps */
+       case 1:
+               _debug("extract FID count");
+               ret = afs_extract_data(call, skb, last, &call->tmp, 4);
+               switch (ret) {
+               case 0:         break;
+               case -EAGAIN:   return 0;
+               default:        return ret;
+               }
 
-                       rxrpc_call_abort(call, -ESRCH); /* abort, dequeue and
-                                                        * put */
+               call->count = ntohl(call->tmp);
+               _debug("FID count: %u", call->count);
+               if (call->count > AFSCBMAX)
+                       return -EBADMSG;
+
+               call->buffer = kmalloc(call->count * 3 * 4, GFP_KERNEL);
+               if (!call->buffer)
+                       return -ENOMEM;
+               call->offset = 0;
+               call->unmarshall++;
+
+       case 2:
+               _debug("extract FID array");
+               ret = afs_extract_data(call, skb, last, call->buffer,
+                                      call->count * 3 * 4);
+               switch (ret) {
+               case 0:         break;
+               case -EAGAIN:   return 0;
+               default:        return ret;
+               }
 
-                       _debug("nuking active call %08x.%d",
-                              ntohl(call->conn->conn_id),
-                              ntohl(call->call_id));
-                       rxrpc_put_call(call);
-                       rxrpc_put_call(call);
+               _debug("unmarshall FID array");
+               call->request = kcalloc(call->count,
+                                       sizeof(struct afs_callback),
+                                       GFP_KERNEL);
+               if (!call->request)
+                       return -ENOMEM;
+
+               cb = call->request;
+               bp = call->buffer;
+               for (loop = call->count; loop > 0; loop--, cb++) {
+                       cb->fid.vid     = ntohl(*bp++);
+                       cb->fid.vnode   = ntohl(*bp++);
+                       cb->fid.unique  = ntohl(*bp++);
+                       cb->type        = AFSCM_CB_UNTYPED;
+               }
 
-                       spin_lock(&afscm_calls_lock);
+               call->offset = 0;
+               call->unmarshall++;
+
+               /* extract the callback array and its count in two steps */
+       case 3:
+               _debug("extract CB count");
+               ret = afs_extract_data(call, skb, last, &call->tmp, 4);
+               switch (ret) {
+               case 0:         break;
+               case -EAGAIN:   return 0;
+               default:        return ret;
                }
-               spin_unlock(&afscm_calls_lock);
 
-               /* get rid of my daemon */
-               kafscmd_die = 1;
-               wake_up(&kafscmd_sleepq);
-               wait_for_completion(&kafscmd_dead);
+               tmp = ntohl(call->tmp);
+               _debug("CB count: %u", tmp);
+               if (tmp != call->count && tmp != 0)
+                       return -EBADMSG;
+               call->offset = 0;
+               call->unmarshall++;
+               if (tmp == 0)
+                       goto empty_cb_array;
+
+       case 4:
+               _debug("extract CB array");
+               ret = afs_extract_data(call, skb, last, call->request,
+                                      call->count * 3 * 4);
+               switch (ret) {
+               case 0:         break;
+               case -EAGAIN:   return 0;
+               default:        return ret;
+               }
 
-               /* dispose of any calls waiting for attention */
-               spin_lock(&kafscmd_attention_lock);
-               while (!list_empty(&kafscmd_attention_list)) {
-                       call = list_entry(kafscmd_attention_list.next,
-                                         struct rxrpc_call,
-                                         app_attn_link);
+               _debug("unmarshall CB array");
+               cb = call->request;
+               bp = call->buffer;
+               for (loop = call->count; loop > 0; loop--, cb++) {
+                       cb->version     = ntohl(*bp++);
+                       cb->expiry      = ntohl(*bp++);
+                       cb->type        = ntohl(*bp++);
+               }
 
-                       list_del_init(&call->app_attn_link);
-                       spin_unlock(&kafscmd_attention_lock);
+       empty_cb_array:
+               call->offset = 0;
+               call->unmarshall++;
 
-                       rxrpc_put_call(call);
+       case 5:
+               _debug("trailer");
+               if (skb->len != 0)
+                       return -EBADMSG;
+               break;
+       }
 
-                       spin_lock(&kafscmd_attention_lock);
-               }
-               spin_unlock(&kafscmd_attention_lock);
+       if (!last)
+               return 0;
 
-               afs_kafstimod_del_timer(&afs_mntpt_expiry_timer);
-       }
+       call->state = AFS_CALL_REPLYING;
 
-       up_write(&afscm_sem);
+       /* we'll need the file server record as that tells us which set of
+        * vnodes to operate upon */
+       memcpy(&addr, &ip_hdr(skb)->saddr, 4);
+       server = afs_find_server(&addr);
+       if (!server)
+               return -ENOTCONN;
+       call->server = server;
 
-} /* end afscm_stop() */
+       INIT_WORK(&call->work, SRXAFSCB_CallBack);
+       schedule_work(&call->work);
+       return 0;
+}
 
-/*****************************************************************************/
 /*
- * handle the fileserver breaking a set of callbacks
+ * allow the fileserver to request callback state (re-)initialisation
  */
-static void _SRXAFSCM_CallBack(struct rxrpc_call *call)
+static void SRXAFSCB_InitCallBackState(struct work_struct *work)
 {
-       struct afs_server *server;
-       size_t count, qty, tmp;
-       int ret = 0, removed;
-
-       _enter("%p{acs=%s}", call, rxrpc_call_states[call->app_call_state]);
-
-       server = afs_server_get_from_peer(call->conn->peer);
-
-       switch (call->app_call_state) {
-               /* we've received the last packet
-                * - drain all the data from the call and send the reply
-                */
-       case RXRPC_CSTATE_SRVR_GOT_ARGS:
-               ret = -EBADMSG;
-               qty = call->app_ready_qty;
-               if (qty < 8 || qty > 50 * (6 * 4) + 8)
-                       break;
-
-               {
-                       struct afs_callback *cb, *pcb;
-                       int loop;
-                       __be32 *fp, *bp;
-
-                       fp = rxrpc_call_alloc_scratch(call, qty);
-
-                       /* drag the entire argument block out to the scratch
-                        * space */
-                       ret = rxrpc_call_read_data(call, fp, qty, 0);
-                       if (ret < 0)
-                               break;
-
-                       /* and unmarshall the parameter block */
-                       ret = -EBADMSG;
-                       count = ntohl(*fp++);
-                       if (count>AFSCBMAX ||
-                           (count * (3 * 4) + 8 != qty &&
-                            count * (6 * 4) + 8 != qty))
-                               break;
-
-                       bp = fp + count*3;
-                       tmp = ntohl(*bp++);
-                       if (tmp > 0 && tmp != count)
-                               break;
-                       if (tmp == 0)
-                               bp = NULL;
-
-                       pcb = cb = rxrpc_call_alloc_scratch_s(
-                               call, struct afs_callback);
-
-                       for (loop = count - 1; loop >= 0; loop--) {
-                               pcb->fid.vid    = ntohl(*fp++);
-                               pcb->fid.vnode  = ntohl(*fp++);
-                               pcb->fid.unique = ntohl(*fp++);
-                               if (bp) {
-                                       pcb->version    = ntohl(*bp++);
-                                       pcb->expiry     = ntohl(*bp++);
-                                       pcb->type       = ntohl(*bp++);
-                               }
-                               else {
-                                       pcb->version    = 0;
-                                       pcb->expiry     = 0;
-                                       pcb->type       = AFSCM_CB_UNTYPED;
-                               }
-                               pcb++;
-                       }
-
-                       /* invoke the actual service routine */
-                       ret = SRXAFSCM_CallBack(server, count, cb);
-                       if (ret < 0)
-                               break;
-               }
+       struct afs_call *call = container_of(work, struct afs_call, work);
 
-               /* send the reply */
-               ret = rxrpc_call_write_data(call, 0, NULL, RXRPC_LAST_PACKET,
-                                           GFP_KERNEL, 0, &count);
-               if (ret < 0)
-                       break;
-               break;
-
-               /* operation complete */
-       case RXRPC_CSTATE_COMPLETE:
-               call->app_user = NULL;
-               removed = 0;
-               spin_lock(&afscm_calls_lock);
-               if (!list_empty(&call->app_link)) {
-                       list_del_init(&call->app_link);
-                       removed = 1;
-               }
-               spin_unlock(&afscm_calls_lock);
+       _enter("{%p}", call->server);
 
-               if (removed)
-                       rxrpc_put_call(call);
-               break;
+       afs_init_callback_state(call->server);
+       afs_send_empty_reply(call);
+       _leave("");
+}
 
-               /* operation terminated on error */
-       case RXRPC_CSTATE_ERROR:
-               call->app_user = NULL;
-               break;
+/*
+ * deliver request data to a CB.InitCallBackState call
+ */
+static int afs_deliver_cb_init_call_back_state(struct afs_call *call,
+                                              struct sk_buff *skb,
+                                              bool last)
+{
+       struct afs_server *server;
+       struct in_addr addr;
 
-       default:
-               break;
-       }
+       _enter(",{%u},%d", skb->len, last);
 
-       if (ret < 0)
-               rxrpc_call_abort(call, ret);
+       if (skb->len > 0)
+               return -EBADMSG;
+       if (!last)
+               return 0;
 
-       afs_put_server(server);
+       /* no unmarshalling required */
+       call->state = AFS_CALL_REPLYING;
 
-       _leave(" = %d", ret);
+       /* we'll need the file server record as that tells us which set of
+        * vnodes to operate upon */
+       memcpy(&addr, &ip_hdr(skb)->saddr, 4);
+       server = afs_find_server(&addr);
+       if (!server)
+               return -ENOTCONN;
+       call->server = server;
 
-} /* end _SRXAFSCM_CallBack() */
+       INIT_WORK(&call->work, SRXAFSCB_InitCallBackState);
+       schedule_work(&call->work);
+       return 0;
+}
 
-/*****************************************************************************/
 /*
- * handle the fileserver asking us to initialise our callback state
+ * deliver request data to a CB.InitCallBackState3 call
  */
-static void _SRXAFSCM_InitCallBackState(struct rxrpc_call *call)
+static int afs_deliver_cb_init_call_back_state3(struct afs_call *call,
+                                               struct sk_buff *skb,
+                                               bool last)
 {
        struct afs_server *server;
-       size_t count;
-       int ret = 0, removed;
+       struct in_addr addr;
 
-       _enter("%p{acs=%s}", call, rxrpc_call_states[call->app_call_state]);
+       _enter(",{%u},%d", skb->len, last);
 
-       server = afs_server_get_from_peer(call->conn->peer);
+       if (!last)
+               return 0;
 
-       switch (call->app_call_state) {
-               /* we've received the last packet - drain all the data from the
-                * call */
-       case RXRPC_CSTATE_SRVR_GOT_ARGS:
-               /* shouldn't be any args */
-               ret = -EBADMSG;
-               break;
-
-               /* send the reply when asked for it */
-       case RXRPC_CSTATE_SRVR_SND_REPLY:
-               /* invoke the actual service routine */
-               ret = SRXAFSCM_InitCallBackState(server);
-               if (ret < 0)
-                       break;
-
-               ret = rxrpc_call_write_data(call, 0, NULL, RXRPC_LAST_PACKET,
-                                           GFP_KERNEL, 0, &count);
-               if (ret < 0)
-                       break;
-               break;
+       /* no unmarshalling required */
+       call->state = AFS_CALL_REPLYING;
 
-               /* operation complete */
-       case RXRPC_CSTATE_COMPLETE:
-               call->app_user = NULL;
-               removed = 0;
-               spin_lock(&afscm_calls_lock);
-               if (!list_empty(&call->app_link)) {
-                       list_del_init(&call->app_link);
-                       removed = 1;
-               }
-               spin_unlock(&afscm_calls_lock);
+       /* we'll need the file server record as that tells us which set of
+        * vnodes to operate upon */
+       memcpy(&addr, &ip_hdr(skb)->saddr, 4);
+       server = afs_find_server(&addr);
+       if (!server)
+               return -ENOTCONN;
+       call->server = server;
 
-               if (removed)
-                       rxrpc_put_call(call);
-               break;
-
-               /* operation terminated on error */
-       case RXRPC_CSTATE_ERROR:
-               call->app_user = NULL;
-               break;
-
-       default:
-               break;
-       }
-
-       if (ret < 0)
-               rxrpc_call_abort(call, ret);
-
-       afs_put_server(server);
+       INIT_WORK(&call->work, SRXAFSCB_InitCallBackState);
+       schedule_work(&call->work);
+       return 0;
+}
 
-       _leave(" = %d", ret);
+/*
+ * allow the fileserver to see if the cache manager is still alive
+ */
+static void SRXAFSCB_Probe(struct work_struct *work)
+{
+       struct afs_call *call = container_of(work, struct afs_call, work);
 
-} /* end _SRXAFSCM_InitCallBackState() */
+       _enter("");
+       afs_send_empty_reply(call);
+       _leave("");
+}
 
-/*****************************************************************************/
 /*
- * handle a probe from a fileserver
+ * deliver request data to a CB.Probe call
  */
-static void _SRXAFSCM_Probe(struct rxrpc_call *call)
+static int afs_deliver_cb_probe(struct afs_call *call, struct sk_buff *skb,
+                               bool last)
 {
-       struct afs_server *server;
-       size_t count;
-       int ret = 0, removed;
-
-       _enter("%p{acs=%s}", call, rxrpc_call_states[call->app_call_state]);
+       _enter(",{%u},%d", skb->len, last);
 
-       server = afs_server_get_from_peer(call->conn->peer);
+       if (skb->len > 0)
+               return -EBADMSG;
+       if (!last)
+               return 0;
 
-       switch (call->app_call_state) {
-               /* we've received the last packet - drain all the data from the
-                * call */
-       case RXRPC_CSTATE_SRVR_GOT_ARGS:
-               /* shouldn't be any args */
-               ret = -EBADMSG;
-               break;
+       /* no unmarshalling required */
+       call->state = AFS_CALL_REPLYING;
 
-               /* send the reply when asked for it */
-       case RXRPC_CSTATE_SRVR_SND_REPLY:
-               /* invoke the actual service routine */
-               ret = SRXAFSCM_Probe(server);
-               if (ret < 0)
-                       break;
-
-               ret = rxrpc_call_write_data(call, 0, NULL, RXRPC_LAST_PACKET,
-                                           GFP_KERNEL, 0, &count);
-               if (ret < 0)
-                       break;
-               break;
+       INIT_WORK(&call->work, SRXAFSCB_Probe);
+       schedule_work(&call->work);
+       return 0;
+}
 
-               /* operation complete */
-       case RXRPC_CSTATE_COMPLETE:
-               call->app_user = NULL;
-               removed = 0;
-               spin_lock(&afscm_calls_lock);
-               if (!list_empty(&call->app_link)) {
-                       list_del_init(&call->app_link);
-                       removed = 1;
+/*
+ * allow the fileserver to ask about the cache manager's capabilities
+ */
+static void SRXAFSCB_GetCapabilities(struct work_struct *work)
+{
+       struct afs_interface *ifs;
+       struct afs_call *call = container_of(work, struct afs_call, work);
+       int loop, nifs;
+
+       struct {
+               struct /* InterfaceAddr */ {
+                       __be32 nifs;
+                       __be32 uuid[11];
+                       __be32 ifaddr[32];
+                       __be32 netmask[32];
+                       __be32 mtu[32];
+               } ia;
+               struct /* Capabilities */ {
+                       __be32 capcount;
+                       __be32 caps[1];
+               } cap;
+       } reply;
+
+       _enter("");
+
+       nifs = 0;
+       ifs = kcalloc(32, sizeof(*ifs), GFP_KERNEL);
+       if (ifs) {
+               nifs = afs_get_ipv4_interfaces(ifs, 32, false);
+               if (nifs < 0) {
+                       kfree(ifs);
+                       ifs = NULL;
+                       nifs = 0;
                }
-               spin_unlock(&afscm_calls_lock);
+       }
 
-               if (removed)
-                       rxrpc_put_call(call);
-               break;
+       memset(&reply, 0, sizeof(reply));
+       reply.ia.nifs = htonl(nifs);
+
+       reply.ia.uuid[0] = htonl(afs_uuid.time_low);
+       reply.ia.uuid[1] = htonl(afs_uuid.time_mid);
+       reply.ia.uuid[2] = htonl(afs_uuid.time_hi_and_version);
+       reply.ia.uuid[3] = htonl((s8) afs_uuid.clock_seq_hi_and_reserved);
+       reply.ia.uuid[4] = htonl((s8) afs_uuid.clock_seq_low);
+       for (loop = 0; loop < 6; loop++)
+               reply.ia.uuid[loop + 5] = htonl((s8) afs_uuid.node[loop]);
+
+       if (ifs) {
+               for (loop = 0; loop < nifs; loop++) {
+                       reply.ia.ifaddr[loop] = ifs[loop].address.s_addr;
+                       reply.ia.netmask[loop] = ifs[loop].netmask.s_addr;
+                       reply.ia.mtu[loop] = htonl(ifs[loop].mtu);
+               }
+       }
 
-               /* operation terminated on error */
-       case RXRPC_CSTATE_ERROR:
-               call->app_user = NULL;
-               break;
+       reply.cap.capcount = htonl(1);
+       reply.cap.caps[0] = htonl(AFS_CAP_ERROR_TRANSLATION);
+       afs_send_simple_reply(call, &reply, sizeof(reply));
 
-       default:
-               break;
-       }
+       _leave("");
+}
 
-       if (ret < 0)
-               rxrpc_call_abort(call, ret);
+/*
+ * deliver request data to a CB.GetCapabilities call
+ */
+static int afs_deliver_cb_get_capabilities(struct afs_call *call,
+                                          struct sk_buff *skb, bool last)
+{
+       _enter(",{%u},%d", skb->len, last);
 
-       afs_put_server(server);
+       if (skb->len > 0)
+               return -EBADMSG;
+       if (!last)
+               return 0;
 
-       _leave(" = %d", ret);
+       /* no unmarshalling required */
+       call->state = AFS_CALL_REPLYING;
 
-} /* end _SRXAFSCM_Probe() */
+       INIT_WORK(&call->work, SRXAFSCB_GetCapabilities);
+       schedule_work(&call->work);
+       return 0;
+}
diff --git a/fs/afs/cmservice.h b/fs/afs/cmservice.h
deleted file mode 100644 (file)
index af8d4d6..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-/* cmservice.h: AFS Cache Manager Service declarations
- *
- * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#ifndef _LINUX_AFS_CMSERVICE_H
-#define _LINUX_AFS_CMSERVICE_H
-
-#include <rxrpc/transport.h>
-#include "types.h"
-
-/* cache manager start/stop */
-extern int afscm_start(void);
-extern void afscm_stop(void);
-
-/* cache manager server functions */
-extern int SRXAFSCM_InitCallBackState(struct afs_server *server);
-extern int SRXAFSCM_CallBack(struct afs_server *server,
-                            size_t count,
-                            struct afs_callback callbacks[]);
-extern int SRXAFSCM_Probe(struct afs_server *server);
-
-#endif /* _LINUX_AFS_CMSERVICE_H */
index b6dc2eb..dac5b99 100644 (file)
 #include <linux/slab.h>
 #include <linux/fs.h>
 #include <linux/pagemap.h>
-#include <linux/smp_lock.h>
-#include "vnode.h"
-#include "volume.h"
-#include <rxrpc/call.h>
-#include "super.h"
+#include <linux/ctype.h>
 #include "internal.h"
 
-static struct dentry *afs_dir_lookup(struct inode *dir, struct dentry *dentry,
-                                    struct nameidata *nd);
+static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
+                                struct nameidata *nd);
 static int afs_dir_open(struct inode *inode, struct file *file);
-static int afs_dir_readdir(struct file *file, void *dirent, filldir_t filldir);
+static int afs_readdir(struct file *file, void *dirent, filldir_t filldir);
 static int afs_d_revalidate(struct dentry *dentry, struct nameidata *nd);
 static int afs_d_delete(struct dentry *dentry);
-static int afs_dir_lookup_filldir(void *_cookie, const char *name, int nlen,
+static void afs_d_release(struct dentry *dentry);
+static int afs_lookup_filldir(void *_cookie, const char *name, int nlen,
                                  loff_t fpos, u64 ino, unsigned dtype);
+static int afs_create(struct inode *dir, struct dentry *dentry, int mode,
+                     struct nameidata *nd);
+static int afs_mkdir(struct inode *dir, struct dentry *dentry, int mode);
+static int afs_rmdir(struct inode *dir, struct dentry *dentry);
+static int afs_unlink(struct inode *dir, struct dentry *dentry);
+static int afs_link(struct dentry *from, struct inode *dir,
+                   struct dentry *dentry);
+static int afs_symlink(struct inode *dir, struct dentry *dentry,
+                      const char *content);
+static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
+                     struct inode *new_dir, struct dentry *new_dentry);
 
 const struct file_operations afs_dir_file_operations = {
        .open           = afs_dir_open,
-       .readdir        = afs_dir_readdir,
+       .release        = afs_release,
+       .readdir        = afs_readdir,
 };
 
 const struct inode_operations afs_dir_inode_operations = {
-       .lookup         = afs_dir_lookup,
+       .create         = afs_create,
+       .lookup         = afs_lookup,
+       .link           = afs_link,
+       .unlink         = afs_unlink,
+       .symlink        = afs_symlink,
+       .mkdir          = afs_mkdir,
+       .rmdir          = afs_rmdir,
+       .rename         = afs_rename,
+       .permission     = afs_permission,
        .getattr        = afs_inode_getattr,
-#if 0 /* TODO */
-       .create         = afs_dir_create,
-       .link           = afs_dir_link,
-       .unlink         = afs_dir_unlink,
-       .symlink        = afs_dir_symlink,
-       .mkdir          = afs_dir_mkdir,
-       .rmdir          = afs_dir_rmdir,
-       .mknod          = afs_dir_mknod,
-       .rename         = afs_dir_rename,
-#endif
 };
 
 static struct dentry_operations afs_fs_dentry_operations = {
        .d_revalidate   = afs_d_revalidate,
        .d_delete       = afs_d_delete,
+       .d_release      = afs_d_release,
 };
 
 #define AFS_DIR_HASHTBL_SIZE   128
@@ -105,14 +113,13 @@ struct afs_dir_page {
        union afs_dir_block blocks[PAGE_SIZE / sizeof(union afs_dir_block)];
 };
 
-struct afs_dir_lookup_cookie {
+struct afs_lookup_cookie {
        struct afs_fid  fid;
        const char      *name;
        size_t          nlen;
        int             found;
 };
 
-/*****************************************************************************/
 /*
  * check that a directory page is valid
  */
@@ -128,9 +135,10 @@ static inline void afs_dir_check_page(struct inode *dir, struct page *page)
        if (qty == 0)
                goto error;
 
-       if (page->index==0 && qty!=ntohs(dbuf->blocks[0].pagehdr.npages)) {
+       if (page->index == 0 && qty != ntohs(dbuf->blocks[0].pagehdr.npages)) {
                printk("kAFS: %s(%lu): wrong number of dir blocks %d!=%hu\n",
-                      __FUNCTION__,dir->i_ino,qty,ntohs(dbuf->blocks[0].pagehdr.npages));
+                      __FUNCTION__, dir->i_ino, qty,
+                      ntohs(dbuf->blocks[0].pagehdr.npages));
                goto error;
        }
 #endif
@@ -157,13 +165,11 @@ static inline void afs_dir_check_page(struct inode *dir, struct page *page)
        SetPageChecked(page);
        return;
 
- error:
+error:
        SetPageChecked(page);
        SetPageError(page);
+}
 
-} /* end afs_dir_check_page() */
-
-/*****************************************************************************/
 /*
  * discard a page cached in the pagecache
  */
@@ -171,20 +177,22 @@ static inline void afs_dir_put_page(struct page *page)
 {
        kunmap(page);
        page_cache_release(page);
+}
 
-} /* end afs_dir_put_page() */
-
-/*****************************************************************************/
 /*
  * get a page into the pagecache
  */
-static struct page *afs_dir_get_page(struct inode *dir, unsigned long index)
+static struct page *afs_dir_get_page(struct inode *dir, unsigned long index,
+                                    struct key *key)
 {
        struct page *page;
+       struct file file = {
+               .private_data = key,
+       };
 
        _enter("{%lu},%lu", dir->i_ino, index);
 
-       page = read_mapping_page(dir->i_mapping, index, NULL);
+       page = read_mapping_page(dir->i_mapping, index, &file);
        if (!IS_ERR(page)) {
                wait_on_page_locked(page);
                kmap(page);
@@ -197,12 +205,12 @@ static struct page *afs_dir_get_page(struct inode *dir, unsigned long index)
        }
        return page;
 
- fail:
+fail:
        afs_dir_put_page(page);
+       _leave(" = -EIO");
        return ERR_PTR(-EIO);
-} /* end afs_dir_get_page() */
+}
 
-/*****************************************************************************/
 /*
  * open an AFS directory file
  */
@@ -213,15 +221,12 @@ static int afs_dir_open(struct inode *inode, struct file *file)
        BUILD_BUG_ON(sizeof(union afs_dir_block) != 2048);
        BUILD_BUG_ON(sizeof(union afs_dirent) != 32);
 
-       if (AFS_FS_I(inode)->flags & AFS_VNODE_DELETED)
+       if (test_bit(AFS_VNODE_DELETED, &AFS_FS_I(inode)->flags))
                return -ENOENT;
 
-       _leave(" = 0");
-       return 0;
-
-} /* end afs_dir_open() */
+       return afs_open(inode, file);
+}
 
-/*****************************************************************************/
 /*
  * deal with one block in an AFS directory
  */
@@ -250,7 +255,7 @@ static int afs_dir_iterate_block(unsigned *fpos,
                /* skip entries marked unused in the bitmap */
                if (!(block->pagehdr.bitmap[offset / 8] &
                      (1 << (offset % 8)))) {
-                       _debug("ENT[%Zu.%u]: unused\n",
+                       _debug("ENT[%Zu.%u]: unused",
                               blkoff / sizeof(union afs_dir_block), offset);
                        if (offset >= curr)
                                *fpos = blkoff +
@@ -264,7 +269,7 @@ static int afs_dir_iterate_block(unsigned *fpos,
                               sizeof(*block) -
                               offset * sizeof(union afs_dirent));
 
-               _debug("ENT[%Zu.%u]: %s %Zu \"%s\"\n",
+               _debug("ENT[%Zu.%u]: %s %Zu \"%s\"",
                       blkoff / sizeof(union afs_dir_block), offset,
                       (offset < curr ? "skip" : "fill"),
                       nlen, dire->u.name);
@@ -274,7 +279,7 @@ static int afs_dir_iterate_block(unsigned *fpos,
                        if (next >= AFS_DIRENT_PER_BLOCK) {
                                _debug("ENT[%Zu.%u]:"
                                       " %u travelled beyond end dir block"
-                                      " (len %u/%Zu)\n",
+                                      " (len %u/%Zu)",
                                       blkoff / sizeof(union afs_dir_block),
                                       offset, next, tmp, nlen);
                                return -EIO;
@@ -282,13 +287,13 @@ static int afs_dir_iterate_block(unsigned *fpos,
                        if (!(block->pagehdr.bitmap[next / 8] &
                              (1 << (next % 8)))) {
                                _debug("ENT[%Zu.%u]:"
-                                      " %u unmarked extension (len %u/%Zu)\n",
+                                      " %u unmarked extension (len %u/%Zu)",
                                       blkoff / sizeof(union afs_dir_block),
                                       offset, next, tmp, nlen);
                                return -EIO;
                        }
 
-                       _debug("ENT[%Zu.%u]: ext %u/%Zu\n",
+                       _debug("ENT[%Zu.%u]: ext %u/%Zu",
                               blkoff / sizeof(union afs_dir_block),
                               next, tmp, nlen);
                        next++;
@@ -304,7 +309,7 @@ static int afs_dir_iterate_block(unsigned *fpos,
                              nlen,
                              blkoff + offset * sizeof(union afs_dirent),
                              ntohl(dire->u.vnode),
-                             filldir == afs_dir_lookup_filldir ?
+                             filldir == afs_lookup_filldir ?
                              ntohl(dire->u.unique) : DT_UNKNOWN);
                if (ret < 0) {
                        _leave(" = 0 [full]");
@@ -316,16 +321,15 @@ static int afs_dir_iterate_block(unsigned *fpos,
 
        _leave(" = 1 [more]");
        return 1;
-} /* end afs_dir_iterate_block() */
+}
 
-/*****************************************************************************/
 /*
- * read an AFS directory
+ * iterate through the data blob that lists the contents of an AFS directory
  */
 static int afs_dir_iterate(struct inode *dir, unsigned *fpos, void *cookie,
-                          filldir_t filldir)
+                          filldir_t filldir, struct key *key)
 {
-       union afs_dir_block     *dblock;
+       union afs_dir_block *dblock;
        struct afs_dir_page *dbuf;
        struct page *page;
        unsigned blkoff, limit;
@@ -333,7 +337,7 @@ static int afs_dir_iterate(struct inode *dir, unsigned *fpos, void *cookie,
 
        _enter("{%lu},%u,,", dir->i_ino, *fpos);
 
-       if (AFS_FS_I(dir)->flags & AFS_VNODE_DELETED) {
+       if (test_bit(AFS_VNODE_DELETED, &AFS_FS_I(dir)->flags)) {
                _leave(" = -ESTALE");
                return -ESTALE;
        }
@@ -348,7 +352,7 @@ static int afs_dir_iterate(struct inode *dir, unsigned *fpos, void *cookie,
                blkoff = *fpos & ~(sizeof(union afs_dir_block) - 1);
 
                /* fetch the appropriate page from the directory */
-               page = afs_dir_get_page(dir, blkoff / PAGE_SIZE);
+               page = afs_dir_get_page(dir, blkoff / PAGE_SIZE, key);
                if (IS_ERR(page)) {
                        ret = PTR_ERR(page);
                        break;
@@ -377,43 +381,50 @@ static int afs_dir_iterate(struct inode *dir, unsigned *fpos, void *cookie,
                ret = 0;
        }
 
- out:
+out:
        _leave(" = %d", ret);
        return ret;
-} /* end afs_dir_iterate() */
+}
 
-/*****************************************************************************/
 /*
  * read an AFS directory
  */
-static int afs_dir_readdir(struct file *file, void *cookie, filldir_t filldir)
+static int afs_readdir(struct file *file, void *cookie, filldir_t filldir)
 {
        unsigned fpos;
        int ret;
 
-       _enter("{%Ld,{%lu}}", file->f_pos, file->f_path.dentry->d_inode->i_ino);
+       _enter("{%Ld,{%lu}}",
+              file->f_pos, file->f_path.dentry->d_inode->i_ino);
+
+       ASSERT(file->private_data != NULL);
 
        fpos = file->f_pos;
-       ret = afs_dir_iterate(file->f_path.dentry->d_inode, &fpos, cookie, filldir);
+       ret = afs_dir_iterate(file->f_path.dentry->d_inode, &fpos,
+                             cookie, filldir, file->private_data);
        file->f_pos = fpos;
 
        _leave(" = %d", ret);
        return ret;
-} /* end afs_dir_readdir() */
+}
 
-/*****************************************************************************/
 /*
  * search the directory for a name
  * - if afs_dir_iterate_block() spots this function, it'll pass the FID
  *   uniquifier through dtype
  */
-static int afs_dir_lookup_filldir(void *_cookie, const char *name, int nlen,
-                                 loff_t fpos, u64 ino, unsigned dtype)
+static int afs_lookup_filldir(void *_cookie, const char *name, int nlen,
+                             loff_t fpos, u64 ino, unsigned dtype)
 {
-       struct afs_dir_lookup_cookie *cookie = _cookie;
+       struct afs_lookup_cookie *cookie = _cookie;
 
-       _enter("{%s,%Zu},%s,%u,,%lu,%u",
-              cookie->name, cookie->nlen, name, nlen, ino, dtype);
+       _enter("{%s,%Zu},%s,%u,,%llu,%u",
+              cookie->name, cookie->nlen, name, nlen,
+              (unsigned long long) ino, dtype);
+
+       /* insanity checks first */
+       BUILD_BUG_ON(sizeof(union afs_dir_block) != 2048);
+       BUILD_BUG_ON(sizeof(union afs_dirent) != 32);
 
        if (cookie->nlen != nlen || memcmp(cookie->name, name, nlen) != 0) {
                _leave(" = 0 [no]");
@@ -426,216 +437,254 @@ static int afs_dir_lookup_filldir(void *_cookie, const char *name, int nlen,
 
        _leave(" = -1 [found]");
        return -1;
-} /* end afs_dir_lookup_filldir() */
+}
 
-/*****************************************************************************/
 /*
- * look up an entry in a directory
+ * do a lookup in a directory
+ * - just returns the FID the dentry name maps to if found
  */
-static struct dentry *afs_dir_lookup(struct inode *dir, struct dentry *dentry,
-                                    struct nameidata *nd)
+static int afs_do_lookup(struct inode *dir, struct dentry *dentry,
+                        struct afs_fid *fid, struct key *key)
 {
-       struct afs_dir_lookup_cookie cookie;
+       struct afs_lookup_cookie cookie;
        struct afs_super_info *as;
+       unsigned fpos;
+       int ret;
+
+       _enter("{%lu},%p{%s},", dir->i_ino, dentry, dentry->d_name.name);
+
+       as = dir->i_sb->s_fs_info;
+
+       /* search the directory */
+       cookie.name     = dentry->d_name.name;
+       cookie.nlen     = dentry->d_name.len;
+       cookie.fid.vid  = as->volume->vid;
+       cookie.found    = 0;
+
+       fpos = 0;
+       ret = afs_dir_iterate(dir, &fpos, &cookie, afs_lookup_filldir,
+                             key);
+       if (ret < 0) {
+               _leave(" = %d [iter]", ret);
+               return ret;
+       }
+
+       ret = -ENOENT;
+       if (!cookie.found) {
+               _leave(" = -ENOENT [not found]");
+               return -ENOENT;
+       }
+
+       *fid = cookie.fid;
+       _leave(" = 0 { vn=%u u=%u }", fid->vnode, fid->unique);
+       return 0;
+}
+
+/*
+ * look up an entry in a directory
+ */
+static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
+                                struct nameidata *nd)
+{
        struct afs_vnode *vnode;
+       struct afs_fid fid;
        struct inode *inode;
-       unsigned fpos;
+       struct key *key;
        int ret;
 
-       _enter("{%lu},%p{%s}", dir->i_ino, dentry, dentry->d_name.name);
+       vnode = AFS_FS_I(dir);
 
-       /* insanity checks first */
-       BUILD_BUG_ON(sizeof(union afs_dir_block) != 2048);
-       BUILD_BUG_ON(sizeof(union afs_dirent) != 32);
+       _enter("{%x:%d},%p{%s},",
+              vnode->fid.vid, vnode->fid.vnode, dentry, dentry->d_name.name);
+
+       ASSERTCMP(dentry->d_inode, ==, NULL);
 
        if (dentry->d_name.len > 255) {
                _leave(" = -ENAMETOOLONG");
                return ERR_PTR(-ENAMETOOLONG);
        }
 
-       vnode = AFS_FS_I(dir);
-       if (vnode->flags & AFS_VNODE_DELETED) {
+       if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) {
                _leave(" = -ESTALE");
                return ERR_PTR(-ESTALE);
        }
 
-       as = dir->i_sb->s_fs_info;
-
-       /* search the directory */
-       cookie.name     = dentry->d_name.name;
-       cookie.nlen     = dentry->d_name.len;
-       cookie.fid.vid  = as->volume->vid;
-       cookie.found    = 0;
+       key = afs_request_key(vnode->volume->cell);
+       if (IS_ERR(key)) {
+               _leave(" = %ld [key]", PTR_ERR(key));
+               return ERR_PTR(PTR_ERR(key));
+       }
 
-       fpos = 0;
-       ret = afs_dir_iterate(dir, &fpos, &cookie, afs_dir_lookup_filldir);
+       ret = afs_validate(vnode, key);
        if (ret < 0) {
-               _leave(" = %d", ret);
+               key_put(key);
+               _leave(" = %d [val]", ret);
                return ERR_PTR(ret);
        }
 
-       ret = -ENOENT;
-       if (!cookie.found) {
-               _leave(" = %d", ret);
+       ret = afs_do_lookup(dir, dentry, &fid, key);
+       if (ret < 0) {
+               key_put(key);
+               if (ret == -ENOENT) {
+                       d_add(dentry, NULL);
+                       _leave(" = NULL [negative]");
+                       return NULL;
+               }
+               _leave(" = %d [do]", ret);
                return ERR_PTR(ret);
        }
+       dentry->d_fsdata = (void *)(unsigned long) vnode->status.data_version;
 
        /* instantiate the dentry */
-       ret = afs_iget(dir->i_sb, &cookie.fid, &inode);
-       if (ret < 0) {
-               _leave(" = %d", ret);
-               return ERR_PTR(ret);
+       inode = afs_iget(dir->i_sb, key, &fid, NULL, NULL);
+       key_put(key);
+       if (IS_ERR(inode)) {
+               _leave(" = %ld", PTR_ERR(inode));
+               return ERR_PTR(PTR_ERR(inode));
        }
 
        dentry->d_op = &afs_fs_dentry_operations;
-       dentry->d_fsdata = (void *) (unsigned long) vnode->status.version;
 
        d_add(dentry, inode);
        _leave(" = 0 { vn=%u u=%u } -> { ino=%lu v=%lu }",
-              cookie.fid.vnode,
-              cookie.fid.unique,
+              fid.vnode,
+              fid.unique,
               dentry->d_inode->i_ino,
               dentry->d_inode->i_version);
 
        return NULL;
-} /* end afs_dir_lookup() */
+}
 
-/*****************************************************************************/
 /*
  * check that a dentry lookup hit has found a valid entry
  * - NOTE! the hit can be a negative hit too, so we can't assume we have an
  *   inode
- * (derived from nfs_lookup_revalidate)
  */
 static int afs_d_revalidate(struct dentry *dentry, struct nameidata *nd)
 {
-       struct afs_dir_lookup_cookie cookie;
+       struct afs_vnode *vnode, *dir;
+       struct afs_fid fid;
        struct dentry *parent;
-       struct inode *inode, *dir;
-       unsigned fpos;
+       struct key *key;
+       void *dir_version;
        int ret;
 
-       _enter("{sb=%p n=%s},", dentry->d_sb, dentry->d_name.name);
+       vnode = AFS_FS_I(dentry->d_inode);
 
-       /* lock down the parent dentry so we can peer at it */
-       parent = dget_parent(dentry->d_parent);
+       if (dentry->d_inode)
+               _enter("{v={%x:%u} n=%s fl=%lx},",
+                      vnode->fid.vid, vnode->fid.vnode, dentry->d_name.name,
+                      vnode->flags);
+       else
+               _enter("{neg n=%s}", dentry->d_name.name);
 
-       dir = parent->d_inode;
-       inode = dentry->d_inode;
+       key = afs_request_key(AFS_FS_S(dentry->d_sb)->volume->cell);
+       if (IS_ERR(key))
+               key = NULL;
 
-       /* handle a negative dentry */
-       if (!inode)
+       /* lock down the parent dentry so we can peer at it */
+       parent = dget_parent(dentry);
+       if (!parent->d_inode)
                goto out_bad;
 
-       /* handle a bad inode */
-       if (is_bad_inode(inode)) {
-               printk("kAFS: afs_d_revalidate: %s/%s has bad inode\n",
-                      dentry->d_parent->d_name.name, dentry->d_name.name);
-               goto out_bad;
-       }
+       dir = AFS_FS_I(parent->d_inode);
 
-       /* force a full look up if the parent directory changed since last the
-        * server was consulted
-        * - otherwise this inode must still exist, even if the inode details
-        *   themselves have changed
-        */
-       if (AFS_FS_I(dir)->flags & AFS_VNODE_CHANGED)
-               afs_vnode_fetch_status(AFS_FS_I(dir));
+       /* validate the parent directory */
+       if (test_bit(AFS_VNODE_MODIFIED, &dir->flags))
+               afs_validate(dir, key);
 
-       if (AFS_FS_I(dir)->flags & AFS_VNODE_DELETED) {
+       if (test_bit(AFS_VNODE_DELETED, &dir->flags)) {
                _debug("%s: parent dir deleted", dentry->d_name.name);
                goto out_bad;
        }
 
-       if (AFS_FS_I(inode)->flags & AFS_VNODE_DELETED) {
-               _debug("%s: file already deleted", dentry->d_name.name);
-               goto out_bad;
-       }
-
-       if ((unsigned long) dentry->d_fsdata !=
-           (unsigned long) AFS_FS_I(dir)->status.version) {
-               _debug("%s: parent changed %lu -> %u",
-                      dentry->d_name.name,
-                      (unsigned long) dentry->d_fsdata,
-                      (unsigned) AFS_FS_I(dir)->status.version);
+       dir_version = (void *) (unsigned long) dir->status.data_version;
+       if (dentry->d_fsdata == dir_version)
+               goto out_valid; /* the dir contents are unchanged */
 
-               /* search the directory for this vnode */
-               cookie.name     = dentry->d_name.name;
-               cookie.nlen     = dentry->d_name.len;
-               cookie.fid.vid  = AFS_FS_I(inode)->volume->vid;
-               cookie.found    = 0;
+       _debug("dir modified");
 
-               fpos = 0;
-               ret = afs_dir_iterate(dir, &fpos, &cookie,
-                                     afs_dir_lookup_filldir);
-               if (ret < 0) {
-                       _debug("failed to iterate dir %s: %d",
-                              parent->d_name.name, ret);
+       /* search the directory for this vnode */
+       ret = afs_do_lookup(&dir->vfs_inode, dentry, &fid, key);
+       switch (ret) {
+       case 0:
+               /* the filename maps to something */
+               if (!dentry->d_inode)
+                       goto out_bad;
+               if (is_bad_inode(dentry->d_inode)) {
+                       printk("kAFS: afs_d_revalidate: %s/%s has bad inode\n",
+                              parent->d_name.name, dentry->d_name.name);
                        goto out_bad;
-               }
-
-               if (!cookie.found) {
-                       _debug("%s: dirent not found", dentry->d_name.name);
-                       goto not_found;
                }
 
                /* if the vnode ID has changed, then the dirent points to a
                 * different file */
-               if (cookie.fid.vnode != AFS_FS_I(inode)->fid.vnode) {
-                       _debug("%s: dirent changed", dentry->d_name.name);
+               if (fid.vnode != vnode->fid.vnode) {
+                       _debug("%s: dirent changed [%u != %u]",
+                              dentry->d_name.name, fid.vnode,
+                              vnode->fid.vnode);
                        goto not_found;
                }
 
                /* if the vnode ID uniqifier has changed, then the file has
-                * been deleted */
-               if (cookie.fid.unique != AFS_FS_I(inode)->fid.unique) {
+                * been deleted and replaced, and the original vnode ID has
+                * been reused */
+               if (fid.unique != vnode->fid.unique) {
                        _debug("%s: file deleted (uq %u -> %u I:%lu)",
-                              dentry->d_name.name,
-                              cookie.fid.unique,
-                              AFS_FS_I(inode)->fid.unique,
-                              inode->i_version);
-                       spin_lock(&AFS_FS_I(inode)->lock);
-                       AFS_FS_I(inode)->flags |= AFS_VNODE_DELETED;
-                       spin_unlock(&AFS_FS_I(inode)->lock);
-                       invalidate_remote_inode(inode);
-                       goto out_bad;
+                              dentry->d_name.name, fid.unique,
+                              vnode->fid.unique, dentry->d_inode->i_version);
+                       spin_lock(&vnode->lock);
+                       set_bit(AFS_VNODE_DELETED, &vnode->flags);
+                       spin_unlock(&vnode->lock);
+                       goto not_found;
                }
+               goto out_valid;
+
+       case -ENOENT:
+               /* the filename is unknown */
+               _debug("%s: dirent not found", dentry->d_name.name);
+               if (dentry->d_inode)
+                       goto not_found;
+               goto out_valid;
 
-               dentry->d_fsdata =
-                       (void *) (unsigned long) AFS_FS_I(dir)->status.version;
+       default:
+               _debug("failed to iterate dir %s: %d",
+                      parent->d_name.name, ret);
+               goto out_bad;
        }
 
- out_valid:
+out_valid:
+       dentry->d_fsdata = dir_version;
+out_skip:
        dput(parent);
+       key_put(key);
        _leave(" = 1 [valid]");
        return 1;
 
        /* the dirent, if it exists, now points to a different vnode */
- not_found:
+not_found:
        spin_lock(&dentry->d_lock);
        dentry->d_flags |= DCACHE_NFSFS_RENAMED;
        spin_unlock(&dentry->d_lock);
 
- out_bad:
-       if (inode) {
+out_bad:
+       if (dentry->d_inode) {
                /* don't unhash if we have submounts */
                if (have_submounts(dentry))
-                       goto out_valid;
+                       goto out_skip;
        }
 
-       shrink_dcache_parent(dentry);
-
        _debug("dropping dentry %s/%s",
-              dentry->d_parent->d_name.name, dentry->d_name.name);
+              parent->d_name.name, dentry->d_name.name);
+       shrink_dcache_parent(dentry);
        d_drop(dentry);
-
        dput(parent);
+       key_put(key);
 
        _leave(" = 0 [bad]");
        return 0;
-} /* end afs_d_revalidate() */
+}
 
-/*****************************************************************************/
 /*
  * allow the VFS to enquire as to whether a dentry should be unhashed (mustn't
  * sleep)
@@ -649,15 +698,444 @@ static int afs_d_delete(struct dentry *dentry)
        if (dentry->d_flags & DCACHE_NFSFS_RENAMED)
                goto zap;
 
-       if (dentry->d_inode) {
-               if (AFS_FS_I(dentry->d_inode)->flags & AFS_VNODE_DELETED)
+       if (dentry->d_inode &&
+           test_bit(AFS_VNODE_DELETED, &AFS_FS_I(dentry->d_inode)->flags))
                        goto zap;
-       }
 
        _leave(" = 0 [keep]");
        return 0;
 
- zap:
+zap:
        _leave(" = 1 [zap]");
        return 1;
-} /* end afs_d_delete() */
+}
+
+/*
+ * handle dentry release
+ */
+static void afs_d_release(struct dentry *dentry)
+{
+       _enter("%s", dentry->d_name.name);
+}
+
+/*
+ * create a directory on an AFS filesystem
+ */
+static int afs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+{
+       struct afs_file_status status;
+       struct afs_callback cb;
+       struct afs_server *server;
+       struct afs_vnode *dvnode, *vnode;
+       struct afs_fid fid;
+       struct inode *inode;
+       struct key *key;
+       int ret;
+
+       dvnode = AFS_FS_I(dir);
+
+       _enter("{%x:%d},{%s},%o",
+              dvnode->fid.vid, dvnode->fid.vnode, dentry->d_name.name, mode);
+
+       ret = -ENAMETOOLONG;
+       if (dentry->d_name.len > 255)
+               goto error;
+
+       key = afs_request_key(dvnode->volume->cell);
+       if (IS_ERR(key)) {
+               ret = PTR_ERR(key);
+               goto error;
+       }
+
+       mode |= S_IFDIR;
+       ret = afs_vnode_create(dvnode, key, dentry->d_name.name,
+                              mode, &fid, &status, &cb, &server);
+       if (ret < 0)
+               goto mkdir_error;
+
+       inode = afs_iget(dir->i_sb, key, &fid, &status, &cb);
+       if (IS_ERR(inode)) {
+               /* ENOMEM at a really inconvenient time - just abandon the new
+                * directory on the server */
+               ret = PTR_ERR(inode);
+               goto iget_error;
+       }
+
+       /* apply the status report we've got for the new vnode */
+       vnode = AFS_FS_I(inode);
+       spin_lock(&vnode->lock);
+       vnode->update_cnt++;
+       spin_unlock(&vnode->lock);
+       afs_vnode_finalise_status_update(vnode, server);
+       afs_put_server(server);
+
+       d_instantiate(dentry, inode);
+       if (d_unhashed(dentry)) {
+               _debug("not hashed");
+               d_rehash(dentry);
+       }
+       key_put(key);
+       _leave(" = 0");
+       return 0;
+
+iget_error:
+       afs_put_server(server);
+mkdir_error:
+       key_put(key);
+error:
+       d_drop(dentry);
+       _leave(" = %d", ret);
+       return ret;
+}
+
+/*
+ * remove a directory from an AFS filesystem
+ */
+static int afs_rmdir(struct inode *dir, struct dentry *dentry)
+{
+       struct afs_vnode *dvnode, *vnode;
+       struct key *key;
+       int ret;
+
+       dvnode = AFS_FS_I(dir);
+
+       _enter("{%x:%d},{%s}",
+              dvnode->fid.vid, dvnode->fid.vnode, dentry->d_name.name);
+
+       ret = -ENAMETOOLONG;
+       if (dentry->d_name.len > 255)
+               goto error;
+
+       key = afs_request_key(dvnode->volume->cell);
+       if (IS_ERR(key)) {
+               ret = PTR_ERR(key);
+               goto error;
+       }
+
+       ret = afs_vnode_remove(dvnode, key, dentry->d_name.name, true);
+       if (ret < 0)
+               goto rmdir_error;
+
+       if (dentry->d_inode) {
+               vnode = AFS_FS_I(dentry->d_inode);
+               clear_nlink(&vnode->vfs_inode);
+               set_bit(AFS_VNODE_DELETED, &vnode->flags);
+               afs_discard_callback_on_delete(vnode);
+       }
+
+       key_put(key);
+       _leave(" = 0");
+       return 0;
+
+rmdir_error:
+       key_put(key);
+error:
+       _leave(" = %d", ret);
+       return ret;
+}
+
+/*
+ * remove a file from an AFS filesystem
+ */
+static int afs_unlink(struct inode *dir, struct dentry *dentry)
+{
+       struct afs_vnode *dvnode, *vnode;
+       struct key *key;
+       int ret;
+
+       dvnode = AFS_FS_I(dir);
+
+       _enter("{%x:%d},{%s}",
+              dvnode->fid.vid, dvnode->fid.vnode, dentry->d_name.name);
+
+       ret = -ENAMETOOLONG;
+       if (dentry->d_name.len > 255)
+               goto error;
+
+       key = afs_request_key(dvnode->volume->cell);
+       if (IS_ERR(key)) {
+               ret = PTR_ERR(key);
+               goto error;
+       }
+
+       if (dentry->d_inode) {
+               vnode = AFS_FS_I(dentry->d_inode);
+
+               /* make sure we have a callback promise on the victim */
+               ret = afs_validate(vnode, key);
+               if (ret < 0)
+                       goto error;
+       }
+
+       ret = afs_vnode_remove(dvnode, key, dentry->d_name.name, false);
+       if (ret < 0)
+               goto remove_error;
+
+       if (dentry->d_inode) {
+               /* if the file wasn't deleted due to excess hard links, the
+                * fileserver will break the callback promise on the file - if
+                * it had one - before it returns to us, and if it was deleted,
+                * it won't
+                *
+                * however, if we didn't have a callback promise outstanding,
+                * or it was outstanding on a different server, then it won't
+                * break it either...
+                */
+               vnode = AFS_FS_I(dentry->d_inode);
+               if (test_bit(AFS_VNODE_DELETED, &vnode->flags))
+                       _debug("AFS_VNODE_DELETED");
+               if (test_bit(AFS_VNODE_CB_BROKEN, &vnode->flags))
+                       _debug("AFS_VNODE_CB_BROKEN");
+               set_bit(AFS_VNODE_CB_BROKEN, &vnode->flags);
+               ret = afs_validate(vnode, key);
+               _debug("nlink %d [val %d]", vnode->vfs_inode.i_nlink, ret);
+       }
+
+       key_put(key);
+       _leave(" = 0");
+       return 0;
+
+remove_error:
+       key_put(key);
+error:
+       _leave(" = %d", ret);
+       return ret;
+}
+
+/*
+ * create a regular file on an AFS filesystem
+ */
+static int afs_create(struct inode *dir, struct dentry *dentry, int mode,
+                     struct nameidata *nd)
+{
+       struct afs_file_status status;
+       struct afs_callback cb;
+       struct afs_server *server;
+       struct afs_vnode *dvnode, *vnode;
+       struct afs_fid fid;
+       struct inode *inode;
+       struct key *key;
+       int ret;
+
+       dvnode = AFS_FS_I(dir);
+
+       _enter("{%x:%d},{%s},%o,",
+              dvnode->fid.vid, dvnode->fid.vnode, dentry->d_name.name, mode);
+
+       ret = -ENAMETOOLONG;
+       if (dentry->d_name.len > 255)
+               goto error;
+
+       key = afs_request_key(dvnode->volume->cell);
+       if (IS_ERR(key)) {
+               ret = PTR_ERR(key);
+               goto error;
+       }
+
+       mode |= S_IFREG;
+       ret = afs_vnode_create(dvnode, key, dentry->d_name.name,
+                              mode, &fid, &status, &cb, &server);
+       if (ret < 0)
+               goto create_error;
+
+       inode = afs_iget(dir->i_sb, key, &fid, &status, &cb);
+       if (IS_ERR(inode)) {
+               /* ENOMEM at a really inconvenient time - just abandon the new
+                * directory on the server */
+               ret = PTR_ERR(inode);
+               goto iget_error;
+       }
+
+       /* apply the status report we've got for the new vnode */
+       vnode = AFS_FS_I(inode);
+       spin_lock(&vnode->lock);
+       vnode->update_cnt++;
+       spin_unlock(&vnode->lock);
+       afs_vnode_finalise_status_update(vnode, server);
+       afs_put_server(server);
+
+       d_instantiate(dentry, inode);
+       if (d_unhashed(dentry)) {
+               _debug("not hashed");
+               d_rehash(dentry);
+       }
+       key_put(key);
+       _leave(" = 0");
+       return 0;
+
+iget_error:
+       afs_put_server(server);
+create_error:
+       key_put(key);
+error:
+       d_drop(dentry);
+       _leave(" = %d", ret);
+       return ret;
+}
+
+/*
+ * create a hard link between files in an AFS filesystem
+ */
+static int afs_link(struct dentry *from, struct inode *dir,
+                   struct dentry *dentry)
+{
+       struct afs_vnode *dvnode, *vnode;
+       struct key *key;
+       int ret;
+
+       vnode = AFS_FS_I(from->d_inode);
+       dvnode = AFS_FS_I(dir);
+
+       _enter("{%x:%d},{%x:%d},{%s}",
+              vnode->fid.vid, vnode->fid.vnode,
+              dvnode->fid.vid, dvnode->fid.vnode,
+              dentry->d_name.name);
+
+       ret = -ENAMETOOLONG;
+       if (dentry->d_name.len > 255)
+               goto error;
+
+       key = afs_request_key(dvnode->volume->cell);
+       if (IS_ERR(key)) {
+               ret = PTR_ERR(key);
+               goto error;
+       }
+
+       ret = afs_vnode_link(dvnode, vnode, key, dentry->d_name.name);
+       if (ret < 0)
+               goto link_error;
+
+       atomic_inc(&vnode->vfs_inode.i_count);
+       d_instantiate(dentry, &vnode->vfs_inode);
+       key_put(key);
+       _leave(" = 0");
+       return 0;
+
+link_error:
+       key_put(key);
+error:
+       d_drop(dentry);
+       _leave(" = %d", ret);
+       return ret;
+}
+
+/*
+ * create a symlink in an AFS filesystem
+ */
+static int afs_symlink(struct inode *dir, struct dentry *dentry,
+                      const char *content)
+{
+       struct afs_file_status status;
+       struct afs_server *server;
+       struct afs_vnode *dvnode, *vnode;
+       struct afs_fid fid;
+       struct inode *inode;
+       struct key *key;
+       int ret;
+
+       dvnode = AFS_FS_I(dir);
+
+       _enter("{%x:%d},{%s},%s",
+              dvnode->fid.vid, dvnode->fid.vnode, dentry->d_name.name,
+              content);
+
+       ret = -ENAMETOOLONG;
+       if (dentry->d_name.len > 255)
+               goto error;
+
+       ret = -EINVAL;
+       if (strlen(content) > 1023)
+               goto error;
+
+       key = afs_request_key(dvnode->volume->cell);
+       if (IS_ERR(key)) {
+               ret = PTR_ERR(key);
+               goto error;
+       }
+
+       ret = afs_vnode_symlink(dvnode, key, dentry->d_name.name, content,
+                               &fid, &status, &server);
+       if (ret < 0)
+               goto create_error;
+
+       inode = afs_iget(dir->i_sb, key, &fid, &status, NULL);
+       if (IS_ERR(inode)) {
+               /* ENOMEM at a really inconvenient time - just abandon the new
+                * directory on the server */
+               ret = PTR_ERR(inode);
+               goto iget_error;
+       }
+
+       /* apply the status report we've got for the new vnode */
+       vnode = AFS_FS_I(inode);
+       spin_lock(&vnode->lock);
+       vnode->update_cnt++;
+       spin_unlock(&vnode->lock);
+       afs_vnode_finalise_status_update(vnode, server);
+       afs_put_server(server);
+
+       d_instantiate(dentry, inode);
+       if (d_unhashed(dentry)) {
+               _debug("not hashed");
+               d_rehash(dentry);
+       }
+       key_put(key);
+       _leave(" = 0");
+       return 0;
+
+iget_error:
+       afs_put_server(server);
+create_error:
+       key_put(key);
+error:
+       d_drop(dentry);
+       _leave(" = %d", ret);
+       return ret;
+}
+
+/*
+ * rename a file in an AFS filesystem and/or move it between directories
+ */
+static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
+                     struct inode *new_dir, struct dentry *new_dentry)
+{
+       struct afs_vnode *orig_dvnode, *new_dvnode, *vnode;
+       struct key *key;
+       int ret;
+
+       vnode = AFS_FS_I(old_dentry->d_inode);
+       orig_dvnode = AFS_FS_I(old_dir);
+       new_dvnode = AFS_FS_I(new_dir);
+
+       _enter("{%x:%d},{%x:%d},{%x:%d},{%s}",
+              orig_dvnode->fid.vid, orig_dvnode->fid.vnode,
+              vnode->fid.vid, vnode->fid.vnode,
+              new_dvnode->fid.vid, new_dvnode->fid.vnode,
+              new_dentry->d_name.name);
+
+       ret = -ENAMETOOLONG;
+       if (new_dentry->d_name.len > 255)
+               goto error;
+
+       key = afs_request_key(orig_dvnode->volume->cell);
+       if (IS_ERR(key)) {
+               ret = PTR_ERR(key);
+               goto error;
+       }
+
+       ret = afs_vnode_rename(orig_dvnode, new_dvnode, key,
+                              old_dentry->d_name.name,
+                              new_dentry->d_name.name);
+       if (ret < 0)
+               goto rename_error;
+       key_put(key);
+       _leave(" = 0");
+       return 0;
+
+rename_error:
+       key_put(key);
+error:
+       d_drop(new_dentry);
+       _leave(" = %d", ret);
+       return ret;
+}
diff --git a/fs/afs/errors.h b/fs/afs/errors.h
deleted file mode 100644 (file)
index 574d94a..0000000
+++ /dev/null
@@ -1,34 +0,0 @@
-/* errors.h: AFS abort/error codes
- *
- * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#ifndef _LINUX_AFS_ERRORS_H
-#define _LINUX_AFS_ERRORS_H
-
-#include "types.h"
-
-/* file server abort codes */
-typedef enum {
-       VSALVAGE        = 101,  /* volume needs salvaging */
-       VNOVNODE        = 102,  /* no such file/dir (vnode) */
-       VNOVOL          = 103,  /* no such volume or volume unavailable */
-       VVOLEXISTS      = 104,  /* volume name already exists */
-       VNOSERVICE      = 105,  /* volume not currently in service */
-       VOFFLINE        = 106,  /* volume is currently offline (more info available [VVL-spec]) */
-       VONLINE         = 107,  /* volume is already online */
-       VDISKFULL       = 108,  /* disk partition is full */
-       VOVERQUOTA      = 109,  /* volume's maximum quota exceeded */
-       VBUSY           = 110,  /* volume is temporarily unavailable */
-       VMOVED          = 111,  /* volume moved to new server - ask this FS where */
-} afs_rxfs_abort_t;
-
-extern int afs_abort_to_error(int abortcode);
-
-#endif /* _LINUX_AFS_ERRORS_H */
index b176345..ae25649 100644 (file)
@@ -1,6 +1,6 @@
-/* file.c: AFS filesystem file handling
+/* AFS filesystem file handling
  *
- * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2002, 2007 Red Hat, Inc. All Rights Reserved.
  * Written by David Howells (dhowells@redhat.com)
  *
  * This program is free software; you can redistribute it and/or
 #include <linux/slab.h>
 #include <linux/fs.h>
 #include <linux/pagemap.h>
-#include "volume.h"
-#include "vnode.h"
-#include <rxrpc/call.h>
 #include "internal.h"
 
-#if 0
-static int afs_file_open(struct inode *inode, struct file *file);
-static int afs_file_release(struct inode *inode, struct file *file);
-#endif
-
 static int afs_file_readpage(struct file *file, struct page *page);
 static void afs_file_invalidatepage(struct page *page, unsigned long offset);
 static int afs_file_releasepage(struct page *page, gfp_t gfp_flags);
 
+const struct file_operations afs_file_operations = {
+       .open           = afs_open,
+       .release        = afs_release,
+       .llseek         = generic_file_llseek,
+       .read           = do_sync_read,
+       .aio_read       = generic_file_aio_read,
+       .mmap           = generic_file_readonly_mmap,
+       .sendfile       = generic_file_sendfile,
+};
+
 const struct inode_operations afs_file_inode_operations = {
        .getattr        = afs_inode_getattr,
+       .permission     = afs_permission,
 };
 
 const struct address_space_operations afs_fs_aops = {
@@ -40,7 +43,48 @@ const struct address_space_operations afs_fs_aops = {
        .invalidatepage = afs_file_invalidatepage,
 };
 
-/*****************************************************************************/
+/*
+ * open an AFS file or directory and attach a key to it
+ */
+int afs_open(struct inode *inode, struct file *file)
+{
+       struct afs_vnode *vnode = AFS_FS_I(inode);
+       struct key *key;
+       int ret;
+
+       _enter("{%x:%x},", vnode->fid.vid, vnode->fid.vnode);
+
+       key = afs_request_key(vnode->volume->cell);
+       if (IS_ERR(key)) {
+               _leave(" = %ld [key]", PTR_ERR(key));
+               return PTR_ERR(key);
+       }
+
+       ret = afs_validate(vnode, key);
+       if (ret < 0) {
+               _leave(" = %d [val]", ret);
+               return ret;
+       }
+
+       file->private_data = key;
+       _leave(" = 0");
+       return 0;
+}
+
+/*
+ * release an AFS file or directory and discard its key
+ */
+int afs_release(struct inode *inode, struct file *file)
+{
+       struct afs_vnode *vnode = AFS_FS_I(inode);
+
+       _enter("{%x:%x},", vnode->fid.vid, vnode->fid.vnode);
+
+       key_put(file->private_data);
+       _leave(" = 0");
+       return 0;
+}
+
 /*
  * deal with notification that a page was read from the cache
  */
@@ -58,10 +102,9 @@ static void afs_file_readpage_read_complete(void *cookie_data,
                SetPageUptodate(page);
        unlock_page(page);
 
-} /* end afs_file_readpage_read_complete() */
+}
 #endif
 
-/*****************************************************************************/
 /*
  * deal with notification that a page was written to the cache
  */
@@ -74,41 +117,38 @@ static void afs_file_readpage_write_complete(void *cookie_data,
        _enter("%p,%p,%p,%d", cookie_data, page, data, error);
 
        unlock_page(page);
-
-} /* end afs_file_readpage_write_complete() */
+}
 #endif
 
-/*****************************************************************************/
 /*
  * AFS read page from file (or symlink)
  */
 static int afs_file_readpage(struct file *file, struct page *page)
 {
-       struct afs_rxfs_fetch_descriptor desc;
-#ifdef AFS_CACHING_SUPPORT
-       struct cachefs_page *pageio;
-#endif
        struct afs_vnode *vnode;
        struct inode *inode;
+       struct key *key;
+       size_t len;
+       off_t offset;
        int ret;
 
        inode = page->mapping->host;
 
-       _enter("{%lu},{%lu}", inode->i_ino, page->index);
+       ASSERT(file != NULL);
+       key = file->private_data;
+       ASSERT(key != NULL);
+
+       _enter("{%x},{%lu},{%lu}", key_serial(key), inode->i_ino, page->index);
 
        vnode = AFS_FS_I(inode);
 
        BUG_ON(!PageLocked(page));
 
        ret = -ESTALE;
-       if (vnode->flags & AFS_VNODE_DELETED)
+       if (test_bit(AFS_VNODE_DELETED, &vnode->flags))
                goto error;
 
 #ifdef AFS_CACHING_SUPPORT
-       ret = cachefs_page_get_private(page, &pageio, GFP_NOIO);
-       if (ret < 0)
-               goto error;
-
        /* is it cached? */
        ret = cachefs_read_or_alloc_page(vnode->cache,
                                         page,
@@ -132,26 +172,19 @@ static int afs_file_readpage(struct file *file, struct page *page)
        case -ENOBUFS:
        case -ENODATA:
        default:
-               desc.fid        = vnode->fid;
-               desc.offset     = page->index << PAGE_CACHE_SHIFT;
-               desc.size       = min((size_t) (inode->i_size - desc.offset),
-                                     (size_t) PAGE_SIZE);
-               desc.buffer     = kmap(page);
-
-               clear_page(desc.buffer);
+               offset = page->index << PAGE_CACHE_SHIFT;
+               len = min_t(size_t, i_size_read(inode) - offset, PAGE_SIZE);
 
                /* read the contents of the file from the server into the
                 * page */
-               ret = afs_vnode_fetch_data(vnode, &desc);
-               kunmap(page);
+               ret = afs_vnode_fetch_data(vnode, key, offset, len, page);
                if (ret < 0) {
-                       if (ret==-ENOENT) {
+                       if (ret == -ENOENT) {
                                _debug("got NOENT from server"
                                       " - marking file deleted and stale");
-                               vnode->flags |= AFS_VNODE_DELETED;
+                               set_bit(AFS_VNODE_DELETED, &vnode->flags);
                                ret = -ESTALE;
                        }
-
 #ifdef AFS_CACHING_SUPPORT
                        cachefs_uncache_page(vnode->cache, page);
 #endif
@@ -178,16 +211,13 @@ static int afs_file_readpage(struct file *file, struct page *page)
        _leave(" = 0");
        return 0;
 
- error:
+error:
        SetPageError(page);
        unlock_page(page);
-
        _leave(" = %d", ret);
        return ret;
+}
 
-} /* end afs_file_readpage() */
-
-/*****************************************************************************/
 /*
  * get a page cookie for the specified page
  */
@@ -202,10 +232,9 @@ int afs_cache_get_page_cookie(struct page *page,
 
        _leave(" = %d", ret);
        return ret;
-} /* end afs_cache_get_page_cookie() */
+}
 #endif
 
-/*****************************************************************************/
 /*
  * invalidate part or all of a page
  */
@@ -240,9 +269,8 @@ static void afs_file_invalidatepage(struct page *page, unsigned long offset)
        }
 
        _leave(" = %d", ret);
-} /* end afs_file_invalidatepage() */
+}
 
-/*****************************************************************************/
 /*
  * release a page and cleanup its private data
  */
@@ -267,4 +295,4 @@ static int afs_file_releasepage(struct page *page, gfp_t gfp_flags)
 
        _leave(" = 0");
        return 0;
-} /* end afs_file_releasepage() */
+}
index 61bc371..2393d2a 100644 (file)
@@ -1,6 +1,6 @@
-/* fsclient.c: AFS File Server client stubs
+/* AFS File Server client stubs
  *
- * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2002, 2007 Red Hat, Inc. All Rights Reserved.
  * Written by David Howells (dhowells@redhat.com)
  *
  * This program is free software; you can redistribute it and/or
 
 #include <linux/init.h>
 #include <linux/sched.h>
-#include <rxrpc/rxrpc.h>
-#include <rxrpc/transport.h>
-#include <rxrpc/connection.h>
-#include <rxrpc/call.h>
-#include "fsclient.h"
-#include "cmservice.h"
-#include "vnode.h"
-#include "server.h"
-#include "errors.h"
+#include <linux/circ_buf.h>
 #include "internal.h"
+#include "afs_fs.h"
 
-#define FSFETCHSTATUS          132     /* AFS Fetch file status */
-#define FSFETCHDATA            130     /* AFS Fetch file data */
-#define FSGIVEUPCALLBACKS      147     /* AFS Discard callback promises */
-#define FSGETVOLUMEINFO                148     /* AFS Get root volume information */
-#define FSGETROOTVOLUME                151     /* AFS Get root volume name */
-#define FSLOOKUP               161     /* AFS lookup file in directory */
-
-/*****************************************************************************/
 /*
- * map afs abort codes to/from Linux error codes
- * - called with call->lock held
+ * decode an AFSFid block
  */
-static void afs_rxfs_aemap(struct rxrpc_call *call)
+static void xdr_decode_AFSFid(const __be32 **_bp, struct afs_fid *fid)
 {
-       switch (call->app_err_state) {
-       case RXRPC_ESTATE_LOCAL_ABORT:
-               call->app_abort_code = -call->app_errno;
-               break;
-       case RXRPC_ESTATE_PEER_ABORT:
-               call->app_errno = afs_abort_to_error(call->app_abort_code);
-               break;
-       default:
-               break;
-       }
-} /* end afs_rxfs_aemap() */
+       const __be32 *bp = *_bp;
+
+       fid->vid                = ntohl(*bp++);
+       fid->vnode              = ntohl(*bp++);
+       fid->unique             = ntohl(*bp++);
+       *_bp = bp;
+}
 
-/*****************************************************************************/
 /*
- * get the root volume name from a fileserver
- * - this operation doesn't seem to work correctly in OpenAFS server 1.2.2
+ * decode an AFSFetchStatus block
  */
-#if 0
-int afs_rxfs_get_root_volume(struct afs_server *server,
-                            char *buf, size_t *buflen)
+static void xdr_decode_AFSFetchStatus(const __be32 **_bp,
+                                     struct afs_file_status *status,
+                                     struct afs_vnode *vnode)
 {
-       struct rxrpc_connection *conn;
-       struct rxrpc_call *call;
-       struct kvec piov[2];
-       size_t sent;
-       int ret;
-       u32 param[1];
+       const __be32 *bp = *_bp;
+       umode_t mode;
+       u64 data_version, size;
+       u32 changed = 0; /* becomes non-zero if ctime-type changes seen */
+
+#define EXTRACT(DST)                           \
+       do {                                    \
+               u32 x = ntohl(*bp++);           \
+               changed |= DST - x;             \
+               DST = x;                        \
+       } while (0)
+
+       status->if_version = ntohl(*bp++);
+       EXTRACT(status->type);
+       EXTRACT(status->nlink);
+       size = ntohl(*bp++);
+       data_version = ntohl(*bp++);
+       EXTRACT(status->author);
+       EXTRACT(status->owner);
+       EXTRACT(status->caller_access); /* call ticket dependent */
+       EXTRACT(status->anon_access);
+       EXTRACT(status->mode);
+       EXTRACT(status->parent.vnode);
+       EXTRACT(status->parent.unique);
+       bp++; /* seg size */
+       status->mtime_client = ntohl(*bp++);
+       status->mtime_server = ntohl(*bp++);
+       EXTRACT(status->group);
+       bp++; /* sync counter */
+       data_version |= (u64) ntohl(*bp++) << 32;
+       bp++; /* lock count */
+       size |= (u64) ntohl(*bp++) << 32;
+       bp++; /* spare 4 */
+       *_bp = bp;
+
+       if (size != status->size) {
+               status->size = size;
+               changed |= true;
+       }
+       status->mode &= S_IALLUGO;
+
+       _debug("vnode time %lx, %lx",
+              status->mtime_client, status->mtime_server);
+
+       if (vnode) {
+               status->parent.vid = vnode->fid.vid;
+               if (changed && !test_bit(AFS_VNODE_UNSET, &vnode->flags)) {
+                       _debug("vnode changed");
+                       i_size_write(&vnode->vfs_inode, size);
+                       vnode->vfs_inode.i_uid = status->owner;
+                       vnode->vfs_inode.i_gid = status->group;
+                       vnode->vfs_inode.i_version = vnode->fid.unique;
+                       vnode->vfs_inode.i_nlink = status->nlink;
+
+                       mode = vnode->vfs_inode.i_mode;
+                       mode &= ~S_IALLUGO;
+                       mode |= status->mode;
+                       barrier();
+                       vnode->vfs_inode.i_mode = mode;
+               }
 
-       DECLARE_WAITQUEUE(myself, current);
+               vnode->vfs_inode.i_ctime.tv_sec = status->mtime_server;
+               vnode->vfs_inode.i_mtime        = vnode->vfs_inode.i_ctime;
+               vnode->vfs_inode.i_atime        = vnode->vfs_inode.i_ctime;
+       }
 
-       kenter("%p,%p,%u",server, buf, *buflen);
+       if (status->data_version != data_version) {
+               status->data_version = data_version;
+               if (vnode && !test_bit(AFS_VNODE_UNSET, &vnode->flags)) {
+                       _debug("vnode modified %llx on {%x:%u}",
+                              (unsigned long long) data_version,
+                              vnode->fid.vid, vnode->fid.vnode);
+                       set_bit(AFS_VNODE_MODIFIED, &vnode->flags);
+                       set_bit(AFS_VNODE_ZAP_DATA, &vnode->flags);
+               }
+       }
+}
 
-       /* get hold of the fileserver connection */
-       ret = afs_server_get_fsconn(server, &conn);
-       if (ret < 0)
-               goto out;
+/*
+ * decode an AFSCallBack block
+ */
+static void xdr_decode_AFSCallBack(const __be32 **_bp, struct afs_vnode *vnode)
+{
+       const __be32 *bp = *_bp;
 
-       /* create a call through that connection */
-       ret = rxrpc_create_call(conn, NULL, NULL, afs_rxfs_aemap, &call);
-       if (ret < 0) {
-               printk("kAFS: Unable to create call: %d\n", ret);
-               goto out_put_conn;
-       }
-       call->app_opcode = FSGETROOTVOLUME;
+       vnode->cb_version       = ntohl(*bp++);
+       vnode->cb_expiry        = ntohl(*bp++);
+       vnode->cb_type          = ntohl(*bp++);
+       vnode->cb_expires       = vnode->cb_expiry + get_seconds();
+       *_bp = bp;
+}
 
-       /* we want to get event notifications from the call */
-       add_wait_queue(&call->waitq, &myself);
+static void xdr_decode_AFSCallBack_raw(const __be32 **_bp,
+                                      struct afs_callback *cb)
+{
+       const __be32 *bp = *_bp;
 
-       /* marshall the parameters */
-       param[0] = htonl(FSGETROOTVOLUME);
-
-       piov[0].iov_len = sizeof(param);
-       piov[0].iov_base = param;
-
-       /* send the parameters to the server */
-       ret = rxrpc_call_write_data(call, 1, piov, RXRPC_LAST_PACKET, GFP_NOFS,
-                                   0, &sent);
-       if (ret < 0)
-               goto abort;
-
-       /* wait for the reply to completely arrive */
-       for (;;) {
-               set_current_state(TASK_INTERRUPTIBLE);
-               if (call->app_call_state != RXRPC_CSTATE_CLNT_RCV_REPLY ||
-                   signal_pending(current))
-                       break;
-               schedule();
-       }
-       set_current_state(TASK_RUNNING);
+       cb->version     = ntohl(*bp++);
+       cb->expiry      = ntohl(*bp++);
+       cb->type        = ntohl(*bp++);
+       *_bp = bp;
+}
 
-       ret = -EINTR;
-       if (signal_pending(current))
-               goto abort;
+/*
+ * decode an AFSVolSync block
+ */
+static void xdr_decode_AFSVolSync(const __be32 **_bp,
+                                 struct afs_volsync *volsync)
+{
+       const __be32 *bp = *_bp;
 
-       switch (call->app_call_state) {
-       case RXRPC_CSTATE_ERROR:
-               ret = call->app_errno;
-               kdebug("Got Error: %d", ret);
-               goto out_unwait;
+       volsync->creation = ntohl(*bp++);
+       bp++; /* spare2 */
+       bp++; /* spare3 */
+       bp++; /* spare4 */
+       bp++; /* spare5 */
+       bp++; /* spare6 */
+       *_bp = bp;
+}
 
-       case RXRPC_CSTATE_CLNT_GOT_REPLY:
-               /* read the reply */
-               kdebug("Got Reply: qty=%d", call->app_ready_qty);
+/*
+ * deliver reply data to an FS.FetchStatus
+ */
+static int afs_deliver_fs_fetch_status(struct afs_call *call,
+                                      struct sk_buff *skb, bool last)
+{
+       struct afs_vnode *vnode = call->reply;
+       const __be32 *bp;
 
-               ret = -EBADMSG;
-               if (call->app_ready_qty <= 4)
-                       goto abort;
+       _enter(",,%u", last);
 
-               ret = rxrpc_call_read_data(call, NULL, call->app_ready_qty, 0);
-               if (ret < 0)
-                       goto abort;
+       afs_transfer_reply(call, skb);
+       if (!last)
+               return 0;
 
-#if 0
-               /* unmarshall the reply */
-               bp = buffer;
-               for (loop = 0; loop < 65; loop++)
-                       entry->name[loop] = ntohl(*bp++);
-               entry->name[64] = 0;
+       if (call->reply_size != call->reply_max)
+               return -EBADMSG;
 
-               entry->type = ntohl(*bp++);
-               entry->num_servers = ntohl(*bp++);
+       /* unmarshall the reply once we've received all of it */
+       bp = call->buffer;
+       xdr_decode_AFSFetchStatus(&bp, &vnode->status, vnode);
+       xdr_decode_AFSCallBack(&bp, vnode);
+       if (call->reply2)
+               xdr_decode_AFSVolSync(&bp, call->reply2);
 
-               for (loop = 0; loop < 8; loop++)
-                       entry->servers[loop].addr.s_addr = *bp++;
+       _leave(" = 0 [done]");
+       return 0;
+}
 
-               for (loop = 0; loop < 8; loop++)
-                       entry->servers[loop].partition = ntohl(*bp++);
+/*
+ * FS.FetchStatus operation type
+ */
+static const struct afs_call_type afs_RXFSFetchStatus = {
+       .name           = "FS.FetchStatus",
+       .deliver        = afs_deliver_fs_fetch_status,
+       .abort_to_error = afs_abort_to_error,
+       .destructor     = afs_flat_call_destructor,
+};
 
-               for (loop = 0; loop < 8; loop++)
-                       entry->servers[loop].flags = ntohl(*bp++);
+/*
+ * fetch the status information for a file
+ */
+int afs_fs_fetch_file_status(struct afs_server *server,
+                            struct key *key,
+                            struct afs_vnode *vnode,
+                            struct afs_volsync *volsync,
+                            const struct afs_wait_mode *wait_mode)
+{
+       struct afs_call *call;
+       __be32 *bp;
 
-               for (loop = 0; loop < 3; loop++)
-                       entry->volume_ids[loop] = ntohl(*bp++);
+       _enter(",%x,{%x:%d},,",
+              key_serial(key), vnode->fid.vid, vnode->fid.vnode);
 
-               entry->clone_id = ntohl(*bp++);
-               entry->flags = ntohl(*bp);
-#endif
+       call = afs_alloc_flat_call(&afs_RXFSFetchStatus, 16, (21 + 3 + 6) * 4);
+       if (!call)
+               return -ENOMEM;
 
-               /* success */
-               ret = 0;
-               goto out_unwait;
+       call->key = key;
+       call->reply = vnode;
+       call->reply2 = volsync;
+       call->service_id = FS_SERVICE;
+       call->port = htons(AFS_FS_PORT);
 
-       default:
-               BUG();
-       }
+       /* marshall the parameters */
+       bp = call->request;
+       bp[0] = htonl(FSFETCHSTATUS);
+       bp[1] = htonl(vnode->fid.vid);
+       bp[2] = htonl(vnode->fid.vnode);
+       bp[3] = htonl(vnode->fid.unique);
+
+       return afs_make_call(&server->addr, call, GFP_NOFS, wait_mode);
+}
 
- abort:
-       set_current_state(TASK_UNINTERRUPTIBLE);
-       rxrpc_call_abort(call, ret);
-       schedule();
- out_unwait:
-       set_current_state(TASK_RUNNING);
-       remove_wait_queue(&call->waitq, &myself);
-       rxrpc_put_call(call);
- out_put_conn:
-       afs_server_release_fsconn(server, conn);
- out:
-       kleave("");
-       return ret;
-} /* end afs_rxfs_get_root_volume() */
-#endif
-
-/*****************************************************************************/
 /*
- * get information about a volume
+ * deliver reply data to an FS.FetchData
  */
-#if 0
-int afs_rxfs_get_volume_info(struct afs_server *server,
-                            const char *name,
-                            struct afs_volume_info *vinfo)
+static int afs_deliver_fs_fetch_data(struct afs_call *call,
+                                    struct sk_buff *skb, bool last)
 {
-       struct rxrpc_connection *conn;
-       struct rxrpc_call *call;
-       struct kvec piov[3];
-       size_t sent;
+       struct afs_vnode *vnode = call->reply;
+       const __be32 *bp;
+       struct page *page;
+       void *buffer;
        int ret;
-       u32 param[2], *bp, zero;
 
-       DECLARE_WAITQUEUE(myself, current);
+       _enter("{%u},{%u},%d", call->unmarshall, skb->len, last);
+
+       switch (call->unmarshall) {
+       case 0:
+               call->offset = 0;
+               call->unmarshall++;
+
+               /* extract the returned data length */
+       case 1:
+               _debug("extract data length");
+               ret = afs_extract_data(call, skb, last, &call->tmp, 4);
+               switch (ret) {
+               case 0:         break;
+               case -EAGAIN:   return 0;
+               default:        return ret;
+               }
 
-       _enter("%p,%s,%p", server, name, vinfo);
+               call->count = ntohl(call->tmp);
+               _debug("DATA length: %u", call->count);
+               if (call->count > PAGE_SIZE)
+                       return -EBADMSG;
+               call->offset = 0;
+               call->unmarshall++;
+
+               if (call->count < PAGE_SIZE) {
+                       buffer = kmap_atomic(call->reply3, KM_USER0);
+                       memset(buffer + PAGE_SIZE - call->count, 0,
+                              call->count);
+                       kunmap_atomic(buffer, KM_USER0);
+               }
 
-       /* get hold of the fileserver connection */
-       ret = afs_server_get_fsconn(server, &conn);
-       if (ret < 0)
-               goto out;
+               /* extract the returned data */
+       case 2:
+               _debug("extract data");
+               page = call->reply3;
+               buffer = kmap_atomic(page, KM_USER0);
+               ret = afs_extract_data(call, skb, last, buffer, call->count);
+               kunmap_atomic(buffer, KM_USER0);
+               switch (ret) {
+               case 0:         break;
+               case -EAGAIN:   return 0;
+               default:        return ret;
+               }
 
-       /* create a call through that connection */
-       ret = rxrpc_create_call(conn, NULL, NULL, afs_rxfs_aemap, &call);
-       if (ret < 0) {
-               printk("kAFS: Unable to create call: %d\n", ret);
-               goto out_put_conn;
-       }
-       call->app_opcode = FSGETVOLUMEINFO;
+               call->offset = 0;
+               call->unmarshall++;
+
+               /* extract the metadata */
+       case 3:
+               ret = afs_extract_data(call, skb, last, call->buffer,
+                                      (21 + 3 + 6) * 4);
+               switch (ret) {
+               case 0:         break;
+               case -EAGAIN:   return 0;
+               default:        return ret;
+               }
 
-       /* we want to get event notifications from the call */
-       add_wait_queue(&call->waitq, &myself);
+               bp = call->buffer;
+               xdr_decode_AFSFetchStatus(&bp, &vnode->status, vnode);
+               xdr_decode_AFSCallBack(&bp, vnode);
+               if (call->reply2)
+                       xdr_decode_AFSVolSync(&bp, call->reply2);
 
-       /* marshall the parameters */
-       piov[1].iov_len = strlen(name);
-       piov[1].iov_base = (char *) name;
-
-       zero = 0;
-       piov[2].iov_len = (4 - (piov[1].iov_len & 3)) & 3;
-       piov[2].iov_base = &zero;
-
-       param[0] = htonl(FSGETVOLUMEINFO);
-       param[1] = htonl(piov[1].iov_len);
-
-       piov[0].iov_len = sizeof(param);
-       piov[0].iov_base = param;
-
-       /* send the parameters to the server */
-       ret = rxrpc_call_write_data(call, 3, piov, RXRPC_LAST_PACKET, GFP_NOFS,
-                                   0, &sent);
-       if (ret < 0)
-               goto abort;
-
-       /* wait for the reply to completely arrive */
-       bp = rxrpc_call_alloc_scratch(call, 64);
-
-       ret = rxrpc_call_read_data(call, bp, 64,
-                                  RXRPC_CALL_READ_BLOCK |
-                                  RXRPC_CALL_READ_ALL);
-       if (ret < 0) {
-               if (ret == -ECONNABORTED) {
-                       ret = call->app_errno;
-                       goto out_unwait;
-               }
-               goto abort;
+               call->offset = 0;
+               call->unmarshall++;
+
+       case 4:
+               _debug("trailer");
+               if (skb->len != 0)
+                       return -EBADMSG;
+               break;
        }
 
-       /* unmarshall the reply */
-       vinfo->vid = ntohl(*bp++);
-       vinfo->type = ntohl(*bp++);
-
-       vinfo->type_vids[0] = ntohl(*bp++);
-       vinfo->type_vids[1] = ntohl(*bp++);
-       vinfo->type_vids[2] = ntohl(*bp++);
-       vinfo->type_vids[3] = ntohl(*bp++);
-       vinfo->type_vids[4] = ntohl(*bp++);
-
-       vinfo->nservers = ntohl(*bp++);
-       vinfo->servers[0].addr.s_addr = *bp++;
-       vinfo->servers[1].addr.s_addr = *bp++;
-       vinfo->servers[2].addr.s_addr = *bp++;
-       vinfo->servers[3].addr.s_addr = *bp++;
-       vinfo->servers[4].addr.s_addr = *bp++;
-       vinfo->servers[5].addr.s_addr = *bp++;
-       vinfo->servers[6].addr.s_addr = *bp++;
-       vinfo->servers[7].addr.s_addr = *bp++;
-
-       ret = -EBADMSG;
-       if (vinfo->nservers > 8)
-               goto abort;
-
-       /* success */
-       ret = 0;
-
- out_unwait:
-       set_current_state(TASK_RUNNING);
-       remove_wait_queue(&call->waitq, &myself);
-       rxrpc_put_call(call);
- out_put_conn:
-       afs_server_release_fsconn(server, conn);
- out:
-       _leave("");
-       return ret;
-
- abort:
-       set_current_state(TASK_UNINTERRUPTIBLE);
-       rxrpc_call_abort(call, ret);
-       schedule();
-       goto out_unwait;
-
-} /* end afs_rxfs_get_volume_info() */
-#endif
-
-/*****************************************************************************/
+       if (!last)
+               return 0;
+
+       _leave(" = 0 [done]");
+       return 0;
+}
+
 /*
- * fetch the status information for a file
+ * FS.FetchData operation type
+ */
+static const struct afs_call_type afs_RXFSFetchData = {
+       .name           = "FS.FetchData",
+       .deliver        = afs_deliver_fs_fetch_data,
+       .abort_to_error = afs_abort_to_error,
+       .destructor     = afs_flat_call_destructor,
+};
+
+/*
+ * fetch data from a file
  */
-int afs_rxfs_fetch_file_status(struct afs_server *server,
-                              struct afs_vnode *vnode,
-                              struct afs_volsync *volsync)
+int afs_fs_fetch_data(struct afs_server *server,
+                     struct key *key,
+                     struct afs_vnode *vnode,
+                     off_t offset, size_t length,
+                     struct page *buffer,
+                     const struct afs_wait_mode *wait_mode)
 {
-       struct afs_server_callslot callslot;
-       struct rxrpc_call *call;
-       struct kvec piov[1];
-       size_t sent;
-       int ret;
+       struct afs_call *call;
        __be32 *bp;
 
-       DECLARE_WAITQUEUE(myself, current);
+       _enter("");
 
-       _enter("%p,{%u,%u,%u}",
-              server, vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique);
+       call = afs_alloc_flat_call(&afs_RXFSFetchData, 24, (21 + 3 + 6) * 4);
+       if (!call)
+               return -ENOMEM;
 
-       /* get hold of the fileserver connection */
-       ret = afs_server_request_callslot(server, &callslot);
-       if (ret < 0)
-               goto out;
-
-       /* create a call through that connection */
-       ret = rxrpc_create_call(callslot.conn, NULL, NULL, afs_rxfs_aemap,
-                               &call);
-       if (ret < 0) {
-               printk("kAFS: Unable to create call: %d\n", ret);
-               goto out_put_conn;
-       }
-       call->app_opcode = FSFETCHSTATUS;
-
-       /* we want to get event notifications from the call */
-       add_wait_queue(&call->waitq, &myself);
+       call->key = key;
+       call->reply = vnode;
+       call->reply2 = NULL; /* volsync */
+       call->reply3 = buffer;
+       call->service_id = FS_SERVICE;
+       call->port = htons(AFS_FS_PORT);
 
        /* marshall the parameters */
-       bp = rxrpc_call_alloc_scratch(call, 16);
-       bp[0] = htonl(FSFETCHSTATUS);
+       bp = call->request;
+       bp[0] = htonl(FSFETCHDATA);
        bp[1] = htonl(vnode->fid.vid);
        bp[2] = htonl(vnode->fid.vnode);
        bp[3] = htonl(vnode->fid.unique);
+       bp[4] = htonl(offset);
+       bp[5] = htonl(length);
 
-       piov[0].iov_len = 16;
-       piov[0].iov_base = bp;
-
-       /* send the parameters to the server */
-       ret = rxrpc_call_write_data(call, 1, piov, RXRPC_LAST_PACKET, GFP_NOFS,
-                                   0, &sent);
-       if (ret < 0)
-               goto abort;
-
-       /* wait for the reply to completely arrive */
-       bp = rxrpc_call_alloc_scratch(call, 120);
-
-       ret = rxrpc_call_read_data(call, bp, 120,
-                                  RXRPC_CALL_READ_BLOCK |
-                                  RXRPC_CALL_READ_ALL);
-       if (ret < 0) {
-               if (ret == -ECONNABORTED) {
-                       ret = call->app_errno;
-                       goto out_unwait;
-               }
-               goto abort;
-       }
+       return afs_make_call(&server->addr, call, GFP_NOFS, wait_mode);
+}
 
-       /* unmarshall the reply */
-       vnode->status.if_version        = ntohl(*bp++);
-       vnode->status.type              = ntohl(*bp++);
-       vnode->status.nlink             = ntohl(*bp++);
-       vnode->status.size              = ntohl(*bp++);
-       vnode->status.version           = ntohl(*bp++);
-       vnode->status.author            = ntohl(*bp++);
-       vnode->status.owner             = ntohl(*bp++);
-       vnode->status.caller_access     = ntohl(*bp++);
-       vnode->status.anon_access       = ntohl(*bp++);
-       vnode->status.mode              = ntohl(*bp++);
-       vnode->status.parent.vid        = vnode->fid.vid;
-       vnode->status.parent.vnode      = ntohl(*bp++);
-       vnode->status.parent.unique     = ntohl(*bp++);
-       bp++; /* seg size */
-       vnode->status.mtime_client      = ntohl(*bp++);
-       vnode->status.mtime_server      = ntohl(*bp++);
-       bp++; /* group */
-       bp++; /* sync counter */
-       vnode->status.version |= ((unsigned long long) ntohl(*bp++)) << 32;
-       bp++; /* spare2 */
-       bp++; /* spare3 */
-       bp++; /* spare4 */
+/*
+ * deliver reply data to an FS.GiveUpCallBacks
+ */
+static int afs_deliver_fs_give_up_callbacks(struct afs_call *call,
+                                           struct sk_buff *skb, bool last)
+{
+       _enter(",{%u},%d", skb->len, last);
 
-       vnode->cb_version               = ntohl(*bp++);
-       vnode->cb_expiry                = ntohl(*bp++);
-       vnode->cb_type                  = ntohl(*bp++);
-
-       if (volsync) {
-               volsync->creation       = ntohl(*bp++);
-               bp++; /* spare2 */
-               bp++; /* spare3 */
-               bp++; /* spare4 */
-               bp++; /* spare5 */
-               bp++; /* spare6 */
-       }
+       if (skb->len > 0)
+               return -EBADMSG; /* shouldn't be any reply data */
+       return 0;
+}
 
-       /* success */
-       ret = 0;
-
- out_unwait:
-       set_current_state(TASK_RUNNING);
-       remove_wait_queue(&call->waitq, &myself);
-       rxrpc_put_call(call);
- out_put_conn:
-       afs_server_release_callslot(server, &callslot);
- out:
-       _leave("");
-       return ret;
-
- abort:
-       set_current_state(TASK_UNINTERRUPTIBLE);
-       rxrpc_call_abort(call, ret);
-       schedule();
-       goto out_unwait;
-} /* end afs_rxfs_fetch_file_status() */
-
-/*****************************************************************************/
 /*
- * fetch the contents of a file or directory
+ * FS.GiveUpCallBacks operation type
  */
-int afs_rxfs_fetch_file_data(struct afs_server *server,
-                            struct afs_vnode *vnode,
-                            struct afs_rxfs_fetch_descriptor *desc,
-                            struct afs_volsync *volsync)
+static const struct afs_call_type afs_RXFSGiveUpCallBacks = {
+       .name           = "FS.GiveUpCallBacks",
+       .deliver        = afs_deliver_fs_give_up_callbacks,
+       .abort_to_error = afs_abort_to_error,
+       .destructor     = afs_flat_call_destructor,
+};
+
+/*
+ * give up a set of callbacks
+ * - the callbacks are held in the server->cb_break ring
+ */
+int afs_fs_give_up_callbacks(struct afs_server *server,
+                            const struct afs_wait_mode *wait_mode)
 {
-       struct afs_server_callslot callslot;
-       struct rxrpc_call *call;
-       struct kvec piov[1];
-       size_t sent;
-       int ret;
-       __be32 *bp;
+       struct afs_call *call;
+       size_t ncallbacks;
+       __be32 *bp, *tp;
+       int loop;
 
-       DECLARE_WAITQUEUE(myself, current);
-
-       _enter("%p,{fid={%u,%u,%u},sz=%Zu,of=%lu}",
-              server,
-              desc->fid.vid,
-              desc->fid.vnode,
-              desc->fid.unique,
-              desc->size,
-              desc->offset);
-
-       /* get hold of the fileserver connection */
-       ret = afs_server_request_callslot(server, &callslot);
-       if (ret < 0)
-               goto out;
-
-       /* create a call through that connection */
-       ret = rxrpc_create_call(callslot.conn, NULL, NULL, afs_rxfs_aemap, &call);
-       if (ret < 0) {
-               printk("kAFS: Unable to create call: %d\n", ret);
-               goto out_put_conn;
-       }
-       call->app_opcode = FSFETCHDATA;
+       ncallbacks = CIRC_CNT(server->cb_break_head, server->cb_break_tail,
+                             ARRAY_SIZE(server->cb_break));
+
+       _enter("{%zu},", ncallbacks);
+
+       if (ncallbacks == 0)
+               return 0;
+       if (ncallbacks > AFSCBMAX)
+               ncallbacks = AFSCBMAX;
+
+       _debug("break %zu callbacks", ncallbacks);
 
-       /* we want to get event notifications from the call */
-       add_wait_queue(&call->waitq, &myself);
+       call = afs_alloc_flat_call(&afs_RXFSGiveUpCallBacks,
+                                  12 + ncallbacks * 6 * 4, 0);
+       if (!call)
+               return -ENOMEM;
+
+       call->service_id = FS_SERVICE;
+       call->port = htons(AFS_FS_PORT);
 
        /* marshall the parameters */
-       bp = rxrpc_call_alloc_scratch(call, 24);
-       bp[0] = htonl(FSFETCHDATA);
-       bp[1] = htonl(desc->fid.vid);
-       bp[2] = htonl(desc->fid.vnode);
-       bp[3] = htonl(desc->fid.unique);
-       bp[4] = htonl(desc->offset);
-       bp[5] = htonl(desc->size);
-
-       piov[0].iov_len = 24;
-       piov[0].iov_base = bp;
-
-       /* send the parameters to the server */
-       ret = rxrpc_call_write_data(call, 1, piov, RXRPC_LAST_PACKET, GFP_NOFS,
-                                   0, &sent);
-       if (ret < 0)
-               goto abort;
-
-       /* wait for the data count to arrive */
-       ret = rxrpc_call_read_data(call, bp, 4, RXRPC_CALL_READ_BLOCK);
-       if (ret < 0)
-               goto read_failed;
-
-       desc->actual = ntohl(bp[0]);
-       if (desc->actual != desc->size) {
-               ret = -EBADMSG;
-               goto abort;
+       bp = call->request;
+       tp = bp + 2 + ncallbacks * 3;
+       *bp++ = htonl(FSGIVEUPCALLBACKS);
+       *bp++ = htonl(ncallbacks);
+       *tp++ = htonl(ncallbacks);
+
+       atomic_sub(ncallbacks, &server->cb_break_n);
+       for (loop = ncallbacks; loop > 0; loop--) {
+               struct afs_callback *cb =
+                       &server->cb_break[server->cb_break_tail];
+
+               *bp++ = htonl(cb->fid.vid);
+               *bp++ = htonl(cb->fid.vnode);
+               *bp++ = htonl(cb->fid.unique);
+               *tp++ = htonl(cb->version);
+               *tp++ = htonl(cb->expiry);
+               *tp++ = htonl(cb->type);
+               smp_mb();
+               server->cb_break_tail =
+                       (server->cb_break_tail + 1) &
+                       (ARRAY_SIZE(server->cb_break) - 1);
        }
 
-       /* call the app to read the actual data */
-       rxrpc_call_reset_scratch(call);
-
-       ret = rxrpc_call_read_data(call, desc->buffer, desc->actual,
-                                  RXRPC_CALL_READ_BLOCK);
-       if (ret < 0)
-               goto read_failed;
-
-       /* wait for the rest of the reply to completely arrive */
-       rxrpc_call_reset_scratch(call);
-       bp = rxrpc_call_alloc_scratch(call, 120);
-
-       ret = rxrpc_call_read_data(call, bp, 120,
-                                  RXRPC_CALL_READ_BLOCK |
-                                  RXRPC_CALL_READ_ALL);
-       if (ret < 0)
-               goto read_failed;
-
-       /* unmarshall the reply */
-       vnode->status.if_version        = ntohl(*bp++);
-       vnode->status.type              = ntohl(*bp++);
-       vnode->status.nlink             = ntohl(*bp++);
-       vnode->status.size              = ntohl(*bp++);
-       vnode->status.version           = ntohl(*bp++);
-       vnode->status.author            = ntohl(*bp++);
-       vnode->status.owner             = ntohl(*bp++);
-       vnode->status.caller_access     = ntohl(*bp++);
-       vnode->status.anon_access       = ntohl(*bp++);
-       vnode->status.mode              = ntohl(*bp++);
-       vnode->status.parent.vid        = desc->fid.vid;
-       vnode->status.parent.vnode      = ntohl(*bp++);
-       vnode->status.parent.unique     = ntohl(*bp++);
-       bp++; /* seg size */
-       vnode->status.mtime_client      = ntohl(*bp++);
-       vnode->status.mtime_server      = ntohl(*bp++);
-       bp++; /* group */
-       bp++; /* sync counter */
-       vnode->status.version |= ((unsigned long long) ntohl(*bp++)) << 32;
-       bp++; /* spare2 */
-       bp++; /* spare3 */
-       bp++; /* spare4 */
+       ASSERT(ncallbacks > 0);
+       wake_up_nr(&server->cb_break_waitq, ncallbacks);
 
-       vnode->cb_version               = ntohl(*bp++);
-       vnode->cb_expiry                = ntohl(*bp++);
-       vnode->cb_type                  = ntohl(*bp++);
-
-       if (volsync) {
-               volsync->creation       = ntohl(*bp++);
-               bp++; /* spare2 */
-               bp++; /* spare3 */
-               bp++; /* spare4 */
-               bp++; /* spare5 */
-               bp++; /* spare6 */
-       }
+       return afs_make_call(&server->addr, call, GFP_NOFS, wait_mode);
+}
 
-       /* success */
-       ret = 0;
-
- out_unwait:
-       set_current_state(TASK_RUNNING);
-       remove_wait_queue(&call->waitq,&myself);
-       rxrpc_put_call(call);
- out_put_conn:
-       afs_server_release_callslot(server, &callslot);
- out:
-       _leave(" = %d", ret);
-       return ret;
-
- read_failed:
-       if (ret == -ECONNABORTED) {
-               ret = call->app_errno;
-               goto out_unwait;
-       }
+/*
+ * deliver reply data to an FS.CreateFile or an FS.MakeDir
+ */
+static int afs_deliver_fs_create_vnode(struct afs_call *call,
+                                      struct sk_buff *skb, bool last)
+{
+       struct afs_vnode *vnode = call->reply;
+       const __be32 *bp;
+
+       _enter("{%u},{%u},%d", call->unmarshall, skb->len, last);
 
- abort:
-       set_current_state(TASK_UNINTERRUPTIBLE);
-       rxrpc_call_abort(call, ret);
-       schedule();
-       goto out_unwait;
+       afs_transfer_reply(call, skb);
+       if (!last)
+               return 0;
 
-} /* end afs_rxfs_fetch_file_data() */
+       if (call->reply_size != call->reply_max)
+               return -EBADMSG;
+
+       /* unmarshall the reply once we've received all of it */
+       bp = call->buffer;
+       xdr_decode_AFSFid(&bp, call->reply2);
+       xdr_decode_AFSFetchStatus(&bp, call->reply3, NULL);
+       xdr_decode_AFSFetchStatus(&bp, &vnode->status, vnode);
+       xdr_decode_AFSCallBack_raw(&bp, call->reply4);
+       /* xdr_decode_AFSVolSync(&bp, call->replyX); */
+
+       _leave(" = 0 [done]");
+       return 0;
+}
+
+/*
+ * FS.CreateFile and FS.MakeDir operation type
+ */
+static const struct afs_call_type afs_RXFSCreateXXXX = {
+       .name           = "FS.CreateXXXX",
+       .deliver        = afs_deliver_fs_create_vnode,
+       .abort_to_error = afs_abort_to_error,
+       .destructor     = afs_flat_call_destructor,
+};
 
-/*****************************************************************************/
 /*
- * ask the AFS fileserver to discard a callback request on a file
+ * create a file or make a directory
  */
-int afs_rxfs_give_up_callback(struct afs_server *server,
-                             struct afs_vnode *vnode)
+int afs_fs_create(struct afs_server *server,
+                 struct key *key,
+                 struct afs_vnode *vnode,
+                 const char *name,
+                 umode_t mode,
+                 struct afs_fid *newfid,
+                 struct afs_file_status *newstatus,
+                 struct afs_callback *newcb,
+                 const struct afs_wait_mode *wait_mode)
 {
-       struct afs_server_callslot callslot;
-       struct rxrpc_call *call;
-       struct kvec piov[1];
-       size_t sent;
-       int ret;
+       struct afs_call *call;
+       size_t namesz, reqsz, padsz;
        __be32 *bp;
 
-       DECLARE_WAITQUEUE(myself, current);
+       _enter("");
 
-       _enter("%p,{%u,%u,%u}",
-              server, vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique);
+       namesz = strlen(name);
+       padsz = (4 - (namesz & 3)) & 3;
+       reqsz = (5 * 4) + namesz + padsz + (6 * 4);
 
-       /* get hold of the fileserver connection */
-       ret = afs_server_request_callslot(server, &callslot);
-       if (ret < 0)
-               goto out;
+       call = afs_alloc_flat_call(&afs_RXFSCreateXXXX, reqsz,
+                                  (3 + 21 + 21 + 3 + 6) * 4);
+       if (!call)
+               return -ENOMEM;
 
-       /* create a call through that connection */
-       ret = rxrpc_create_call(callslot.conn, NULL, NULL, afs_rxfs_aemap, &call);
-       if (ret < 0) {
-               printk("kAFS: Unable to create call: %d\n", ret);
-               goto out_put_conn;
+       call->key = key;
+       call->reply = vnode;
+       call->reply2 = newfid;
+       call->reply3 = newstatus;
+       call->reply4 = newcb;
+       call->service_id = FS_SERVICE;
+       call->port = htons(AFS_FS_PORT);
+
+       /* marshall the parameters */
+       bp = call->request;
+       *bp++ = htonl(S_ISDIR(mode) ? FSMAKEDIR : FSCREATEFILE);
+       *bp++ = htonl(vnode->fid.vid);
+       *bp++ = htonl(vnode->fid.vnode);
+       *bp++ = htonl(vnode->fid.unique);
+       *bp++ = htonl(namesz);
+       memcpy(bp, name, namesz);
+       bp = (void *) bp + namesz;
+       if (padsz > 0) {
+               memset(bp, 0, padsz);
+               bp = (void *) bp + padsz;
        }
-       call->app_opcode = FSGIVEUPCALLBACKS;
+       *bp++ = htonl(AFS_SET_MODE);
+       *bp++ = 0; /* mtime */
+       *bp++ = 0; /* owner */
+       *bp++ = 0; /* group */
+       *bp++ = htonl(mode & S_IALLUGO); /* unix mode */
+       *bp++ = 0; /* segment size */
 
-       /* we want to get event notifications from the call */
-       add_wait_queue(&call->waitq, &myself);
+       return afs_make_call(&server->addr, call, GFP_NOFS, wait_mode);
+}
 
-       /* marshall the parameters */
-       bp = rxrpc_call_alloc_scratch(call, (1 + 4 + 4) * 4);
+/*
+ * deliver reply data to an FS.RemoveFile or FS.RemoveDir
+ */
+static int afs_deliver_fs_remove(struct afs_call *call,
+                                struct sk_buff *skb, bool last)
+{
+       struct afs_vnode *vnode = call->reply;
+       const __be32 *bp;
 
-       piov[0].iov_len = (1 + 4 + 4) * 4;
-       piov[0].iov_base = bp;
+       _enter("{%u},{%u},%d", call->unmarshall, skb->len, last);
 
-       *bp++ = htonl(FSGIVEUPCALLBACKS);
-       *bp++ = htonl(1);
+       afs_transfer_reply(call, skb);
+       if (!last)
+               return 0;
+
+       if (call->reply_size != call->reply_max)
+               return -EBADMSG;
+
+       /* unmarshall the reply once we've received all of it */
+       bp = call->buffer;
+       xdr_decode_AFSFetchStatus(&bp, &vnode->status, vnode);
+       /* xdr_decode_AFSVolSync(&bp, call->replyX); */
+
+       _leave(" = 0 [done]");
+       return 0;
+}
+
+/*
+ * FS.RemoveDir/FS.RemoveFile operation type
+ */
+static const struct afs_call_type afs_RXFSRemoveXXXX = {
+       .name           = "FS.RemoveXXXX",
+       .deliver        = afs_deliver_fs_remove,
+       .abort_to_error = afs_abort_to_error,
+       .destructor     = afs_flat_call_destructor,
+};
+
+/*
+ * remove a file or directory
+ */
+int afs_fs_remove(struct afs_server *server,
+                 struct key *key,
+                 struct afs_vnode *vnode,
+                 const char *name,
+                 bool isdir,
+                 const struct afs_wait_mode *wait_mode)
+{
+       struct afs_call *call;
+       size_t namesz, reqsz, padsz;
+       __be32 *bp;
+
+       _enter("");
+
+       namesz = strlen(name);
+       padsz = (4 - (namesz & 3)) & 3;
+       reqsz = (5 * 4) + namesz + padsz;
+
+       call = afs_alloc_flat_call(&afs_RXFSRemoveXXXX, reqsz, (21 + 6) * 4);
+       if (!call)
+               return -ENOMEM;
+
+       call->key = key;
+       call->reply = vnode;
+       call->service_id = FS_SERVICE;
+       call->port = htons(AFS_FS_PORT);
+
+       /* marshall the parameters */
+       bp = call->request;
+       *bp++ = htonl(isdir ? FSREMOVEDIR : FSREMOVEFILE);
        *bp++ = htonl(vnode->fid.vid);
        *bp++ = htonl(vnode->fid.vnode);
        *bp++ = htonl(vnode->fid.unique);
-       *bp++ = htonl(1);
-       *bp++ = htonl(vnode->cb_version);
-       *bp++ = htonl(vnode->cb_expiry);
-       *bp++ = htonl(vnode->cb_type);
-
-       /* send the parameters to the server */
-       ret = rxrpc_call_write_data(call, 1, piov, RXRPC_LAST_PACKET, GFP_NOFS,
-                                   0, &sent);
-       if (ret < 0)
-               goto abort;
-
-       /* wait for the reply to completely arrive */
-       for (;;) {
-               set_current_state(TASK_INTERRUPTIBLE);
-               if (call->app_call_state != RXRPC_CSTATE_CLNT_RCV_REPLY ||
-                   signal_pending(current))
-                       break;
-               schedule();
+       *bp++ = htonl(namesz);
+       memcpy(bp, name, namesz);
+       bp = (void *) bp + namesz;
+       if (padsz > 0) {
+               memset(bp, 0, padsz);
+               bp = (void *) bp + padsz;
        }
-       set_current_state(TASK_RUNNING);
 
-       ret = -EINTR;
-       if (signal_pending(current))
-               goto abort;
+       return afs_make_call(&server->addr, call, GFP_NOFS, wait_mode);
+}
 
-       switch (call->app_call_state) {
-       case RXRPC_CSTATE_ERROR:
-               ret = call->app_errno;
-               goto out_unwait;
+/*
+ * deliver reply data to an FS.Link
+ */
+static int afs_deliver_fs_link(struct afs_call *call,
+                              struct sk_buff *skb, bool last)
+{
+       struct afs_vnode *dvnode = call->reply, *vnode = call->reply2;
+       const __be32 *bp;
 
-       case RXRPC_CSTATE_CLNT_GOT_REPLY:
-               ret = 0;
-               goto out_unwait;
+       _enter("{%u},{%u},%d", call->unmarshall, skb->len, last);
 
-       default:
-               BUG();
-       }
+       afs_transfer_reply(call, skb);
+       if (!last)
+               return 0;
+
+       if (call->reply_size != call->reply_max)
+               return -EBADMSG;
+
+       /* unmarshall the reply once we've received all of it */
+       bp = call->buffer;
+       xdr_decode_AFSFetchStatus(&bp, &vnode->status, vnode);
+       xdr_decode_AFSFetchStatus(&bp, &dvnode->status, dvnode);
+       /* xdr_decode_AFSVolSync(&bp, call->replyX); */
+
+       _leave(" = 0 [done]");
+       return 0;
+}
+
+/*
+ * FS.Link operation type
+ */
+static const struct afs_call_type afs_RXFSLink = {
+       .name           = "FS.Link",
+       .deliver        = afs_deliver_fs_link,
+       .abort_to_error = afs_abort_to_error,
+       .destructor     = afs_flat_call_destructor,
+};
 
- out_unwait:
-       set_current_state(TASK_RUNNING);
-       remove_wait_queue(&call->waitq, &myself);
-       rxrpc_put_call(call);
- out_put_conn:
-       afs_server_release_callslot(server, &callslot);
- out:
-       _leave("");
-       return ret;
-
- abort:
-       set_current_state(TASK_UNINTERRUPTIBLE);
-       rxrpc_call_abort(call, ret);
-       schedule();
-       goto out_unwait;
-} /* end afs_rxfs_give_up_callback() */
-
-/*****************************************************************************/
 /*
- * look a filename up in a directory
- * - this operation doesn't seem to work correctly in OpenAFS server 1.2.2
+ * make a hard link
  */
-#if 0
-int afs_rxfs_lookup(struct afs_server *server,
-                   struct afs_vnode *dir,
-                   const char *filename,
-                   struct afs_vnode *vnode,
-                   struct afs_volsync *volsync)
+int afs_fs_link(struct afs_server *server,
+               struct key *key,
+               struct afs_vnode *dvnode,
+               struct afs_vnode *vnode,
+               const char *name,
+               const struct afs_wait_mode *wait_mode)
 {
-       struct rxrpc_connection *conn;
-       struct rxrpc_call *call;
-       struct kvec piov[3];
-       size_t sent;
-       int ret;
-       u32 *bp, zero;
+       struct afs_call *call;
+       size_t namesz, reqsz, padsz;
+       __be32 *bp;
 
-       DECLARE_WAITQUEUE(myself, current);
+       _enter("");
 
-       kenter("%p,{%u,%u,%u},%s",
-              server, fid->vid, fid->vnode, fid->unique, filename);
+       namesz = strlen(name);
+       padsz = (4 - (namesz & 3)) & 3;
+       reqsz = (5 * 4) + namesz + padsz + (3 * 4);
 
-       /* get hold of the fileserver connection */
-       ret = afs_server_get_fsconn(server, &conn);
-       if (ret < 0)
-               goto out;
+       call = afs_alloc_flat_call(&afs_RXFSLink, reqsz, (21 + 21 + 6) * 4);
+       if (!call)
+               return -ENOMEM;
 
-       /* create a call through that connection */
-       ret = rxrpc_create_call(conn, NULL, NULL, afs_rxfs_aemap, &call);
-       if (ret < 0) {
-               printk("kAFS: Unable to create call: %d\n", ret);
-               goto out_put_conn;
+       call->key = key;
+       call->reply = dvnode;
+       call->reply2 = vnode;
+       call->service_id = FS_SERVICE;
+       call->port = htons(AFS_FS_PORT);
+
+       /* marshall the parameters */
+       bp = call->request;
+       *bp++ = htonl(FSLINK);
+       *bp++ = htonl(dvnode->fid.vid);
+       *bp++ = htonl(dvnode->fid.vnode);
+       *bp++ = htonl(dvnode->fid.unique);
+       *bp++ = htonl(namesz);
+       memcpy(bp, name, namesz);
+       bp = (void *) bp + namesz;
+       if (padsz > 0) {
+               memset(bp, 0, padsz);
+               bp = (void *) bp + padsz;
        }
-       call->app_opcode = FSLOOKUP;
+       *bp++ = htonl(vnode->fid.vid);
+       *bp++ = htonl(vnode->fid.vnode);
+       *bp++ = htonl(vnode->fid.unique);
+
+       return afs_make_call(&server->addr, call, GFP_NOFS, wait_mode);
+}
+
+/*
+ * deliver reply data to an FS.Symlink
+ */
+static int afs_deliver_fs_symlink(struct afs_call *call,
+                                 struct sk_buff *skb, bool last)
+{
+       struct afs_vnode *vnode = call->reply;
+       const __be32 *bp;
 
-       /* we want to get event notifications from the call */
-       add_wait_queue(&call->waitq,&myself);
+       _enter("{%u},{%u},%d", call->unmarshall, skb->len, last);
+
+       afs_transfer_reply(call, skb);
+       if (!last)
+               return 0;
+
+       if (call->reply_size != call->reply_max)
+               return -EBADMSG;
+
+       /* unmarshall the reply once we've received all of it */
+       bp = call->buffer;
+       xdr_decode_AFSFid(&bp, call->reply2);
+       xdr_decode_AFSFetchStatus(&bp, call->reply3, NULL);
+       xdr_decode_AFSFetchStatus(&bp, &vnode->status, vnode);
+       /* xdr_decode_AFSVolSync(&bp, call->replyX); */
+
+       _leave(" = 0 [done]");
+       return 0;
+}
+
+/*
+ * FS.Symlink operation type
+ */
+static const struct afs_call_type afs_RXFSSymlink = {
+       .name           = "FS.Symlink",
+       .deliver        = afs_deliver_fs_symlink,
+       .abort_to_error = afs_abort_to_error,
+       .destructor     = afs_flat_call_destructor,
+};
+
+/*
+ * create a symbolic link
+ */
+int afs_fs_symlink(struct afs_server *server,
+                  struct key *key,
+                  struct afs_vnode *vnode,
+                  const char *name,
+                  const char *contents,
+                  struct afs_fid *newfid,
+                  struct afs_file_status *newstatus,
+                  const struct afs_wait_mode *wait_mode)
+{
+       struct afs_call *call;
+       size_t namesz, reqsz, padsz, c_namesz, c_padsz;
+       __be32 *bp;
+
+       _enter("");
+
+       namesz = strlen(name);
+       padsz = (4 - (namesz & 3)) & 3;
+
+       c_namesz = strlen(contents);
+       c_padsz = (4 - (c_namesz & 3)) & 3;
+
+       reqsz = (6 * 4) + namesz + padsz + c_namesz + c_padsz + (6 * 4);
+
+       call = afs_alloc_flat_call(&afs_RXFSSymlink, reqsz,
+                                  (3 + 21 + 21 + 6) * 4);
+       if (!call)
+               return -ENOMEM;
+
+       call->key = key;
+       call->reply = vnode;
+       call->reply2 = newfid;
+       call->reply3 = newstatus;
+       call->service_id = FS_SERVICE;
+       call->port = htons(AFS_FS_PORT);
 
        /* marshall the parameters */
-       bp = rxrpc_call_alloc_scratch(call, 20);
-
-       zero = 0;
-
-       piov[0].iov_len = 20;
-       piov[0].iov_base = bp;
-       piov[1].iov_len = strlen(filename);
-       piov[1].iov_base = (char *) filename;
-       piov[2].iov_len = (4 - (piov[1].iov_len & 3)) & 3;
-       piov[2].iov_base = &zero;
-
-       *bp++ = htonl(FSLOOKUP);
-       *bp++ = htonl(dirfid->vid);
-       *bp++ = htonl(dirfid->vnode);
-       *bp++ = htonl(dirfid->unique);
-       *bp++ = htonl(piov[1].iov_len);
-
-       /* send the parameters to the server */
-       ret = rxrpc_call_write_data(call, 3, piov, RXRPC_LAST_PACKET, GFP_NOFS,
-                                   0, &sent);
-       if (ret < 0)
-               goto abort;
-
-       /* wait for the reply to completely arrive */
-       bp = rxrpc_call_alloc_scratch(call, 220);
-
-       ret = rxrpc_call_read_data(call, bp, 220,
-                                  RXRPC_CALL_READ_BLOCK |
-                                  RXRPC_CALL_READ_ALL);
-       if (ret < 0) {
-               if (ret == -ECONNABORTED) {
-                       ret = call->app_errno;
-                       goto out_unwait;
-               }
-               goto abort;
+       bp = call->request;
+       *bp++ = htonl(FSSYMLINK);
+       *bp++ = htonl(vnode->fid.vid);
+       *bp++ = htonl(vnode->fid.vnode);
+       *bp++ = htonl(vnode->fid.unique);
+       *bp++ = htonl(namesz);
+       memcpy(bp, name, namesz);
+       bp = (void *) bp + namesz;
+       if (padsz > 0) {
+               memset(bp, 0, padsz);
+               bp = (void *) bp + padsz;
        }
+       *bp++ = htonl(c_namesz);
+       memcpy(bp, contents, c_namesz);
+       bp = (void *) bp + c_namesz;
+       if (c_padsz > 0) {
+               memset(bp, 0, c_padsz);
+               bp = (void *) bp + c_padsz;
+       }
+       *bp++ = htonl(AFS_SET_MODE);
+       *bp++ = 0; /* mtime */
+       *bp++ = 0; /* owner */
+       *bp++ = 0; /* group */
+       *bp++ = htonl(S_IRWXUGO); /* unix mode */
+       *bp++ = 0; /* segment size */
 
-       /* unmarshall the reply */
-       fid->vid                = ntohl(*bp++);
-       fid->vnode              = ntohl(*bp++);
-       fid->unique             = ntohl(*bp++);
+       return afs_make_call(&server->addr, call, GFP_NOFS, wait_mode);
+}
 
-       vnode->status.if_version        = ntohl(*bp++);
-       vnode->status.type              = ntohl(*bp++);
-       vnode->status.nlink             = ntohl(*bp++);
-       vnode->status.size              = ntohl(*bp++);
-       vnode->status.version           = ntohl(*bp++);
-       vnode->status.author            = ntohl(*bp++);
-       vnode->status.owner             = ntohl(*bp++);
-       vnode->status.caller_access     = ntohl(*bp++);
-       vnode->status.anon_access       = ntohl(*bp++);
-       vnode->status.mode              = ntohl(*bp++);
-       vnode->status.parent.vid        = dirfid->vid;
-       vnode->status.parent.vnode      = ntohl(*bp++);
-       vnode->status.parent.unique     = ntohl(*bp++);
-       bp++; /* seg size */
-       vnode->status.mtime_client      = ntohl(*bp++);
-       vnode->status.mtime_server      = ntohl(*bp++);
-       bp++; /* group */
-       bp++; /* sync counter */
-       vnode->status.version |= ((unsigned long long) ntohl(*bp++)) << 32;
-       bp++; /* spare2 */
-       bp++; /* spare3 */
-       bp++; /* spare4 */
+/*
+ * deliver reply data to an FS.Rename
+ */
+static int afs_deliver_fs_rename(struct afs_call *call,
+                                 struct sk_buff *skb, bool last)
+{
+       struct afs_vnode *orig_dvnode = call->reply, *new_dvnode = call->reply2;
+       const __be32 *bp;
 
-       dir->status.if_version          = ntohl(*bp++);
-       dir->status.type                = ntohl(*bp++);
-       dir->status.nlink               = ntohl(*bp++);
-       dir->status.size                = ntohl(*bp++);
-       dir->status.version             = ntohl(*bp++);
-       dir->status.author              = ntohl(*bp++);
-       dir->status.owner               = ntohl(*bp++);
-       dir->status.caller_access       = ntohl(*bp++);
-       dir->status.anon_access         = ntohl(*bp++);
-       dir->status.mode                = ntohl(*bp++);
-       dir->status.parent.vid          = dirfid->vid;
-       dir->status.parent.vnode        = ntohl(*bp++);
-       dir->status.parent.unique       = ntohl(*bp++);
-       bp++; /* seg size */
-       dir->status.mtime_client        = ntohl(*bp++);
-       dir->status.mtime_server        = ntohl(*bp++);
-       bp++; /* group */
-       bp++; /* sync counter */
-       dir->status.version |= ((unsigned long long) ntohl(*bp++)) << 32;
-       bp++; /* spare2 */
-       bp++; /* spare3 */
-       bp++; /* spare4 */
+       _enter("{%u},{%u},%d", call->unmarshall, skb->len, last);
+
+       afs_transfer_reply(call, skb);
+       if (!last)
+               return 0;
+
+       if (call->reply_size != call->reply_max)
+               return -EBADMSG;
+
+       /* unmarshall the reply once we've received all of it */
+       bp = call->buffer;
+       xdr_decode_AFSFetchStatus(&bp, &orig_dvnode->status, orig_dvnode);
+       if (new_dvnode != orig_dvnode)
+               xdr_decode_AFSFetchStatus(&bp, &new_dvnode->status, new_dvnode);
+       /* xdr_decode_AFSVolSync(&bp, call->replyX); */
+
+       _leave(" = 0 [done]");
+       return 0;
+}
+
+/*
+ * FS.Rename operation type
+ */
+static const struct afs_call_type afs_RXFSRename = {
+       .name           = "FS.Rename",
+       .deliver        = afs_deliver_fs_rename,
+       .abort_to_error = afs_abort_to_error,
+       .destructor     = afs_flat_call_destructor,
+};
+
+/*
+ * create a symbolic link
+ */
+int afs_fs_rename(struct afs_server *server,
+                 struct key *key,
+                 struct afs_vnode *orig_dvnode,
+                 const char *orig_name,
+                 struct afs_vnode *new_dvnode,
+                 const char *new_name,
+                 const struct afs_wait_mode *wait_mode)
+{
+       struct afs_call *call;
+       size_t reqsz, o_namesz, o_padsz, n_namesz, n_padsz;
+       __be32 *bp;
+
+       _enter("");
+
+       o_namesz = strlen(orig_name);
+       o_padsz = (4 - (o_namesz & 3)) & 3;
+
+       n_namesz = strlen(new_name);
+       n_padsz = (4 - (n_namesz & 3)) & 3;
+
+       reqsz = (4 * 4) +
+               4 + o_namesz + o_padsz +
+               (3 * 4) +
+               4 + n_namesz + n_padsz;
+
+       call = afs_alloc_flat_call(&afs_RXFSRename, reqsz, (21 + 21 + 6) * 4);
+       if (!call)
+               return -ENOMEM;
+
+       call->key = key;
+       call->reply = orig_dvnode;
+       call->reply2 = new_dvnode;
+       call->service_id = FS_SERVICE;
+       call->port = htons(AFS_FS_PORT);
+
+       /* marshall the parameters */
+       bp = call->request;
+       *bp++ = htonl(FSRENAME);
+       *bp++ = htonl(orig_dvnode->fid.vid);
+       *bp++ = htonl(orig_dvnode->fid.vnode);
+       *bp++ = htonl(orig_dvnode->fid.unique);
+       *bp++ = htonl(o_namesz);
+       memcpy(bp, orig_name, o_namesz);
+       bp = (void *) bp + o_namesz;
+       if (o_padsz > 0) {
+               memset(bp, 0, o_padsz);
+               bp = (void *) bp + o_padsz;
+       }
 
-       callback->fid           = *fid;
-       callback->version       = ntohl(*bp++);
-       callback->expiry        = ntohl(*bp++);
-       callback->type          = ntohl(*bp++);
-
-       if (volsync) {
-               volsync->creation       = ntohl(*bp++);
-               bp++; /* spare2 */
-               bp++; /* spare3 */
-               bp++; /* spare4 */
-               bp++; /* spare5 */
-               bp++; /* spare6 */
+       *bp++ = htonl(new_dvnode->fid.vid);
+       *bp++ = htonl(new_dvnode->fid.vnode);
+       *bp++ = htonl(new_dvnode->fid.unique);
+       *bp++ = htonl(n_namesz);
+       memcpy(bp, new_name, n_namesz);
+       bp = (void *) bp + n_namesz;
+       if (n_padsz > 0) {
+               memset(bp, 0, n_padsz);
+               bp = (void *) bp + n_padsz;
        }
 
-       /* success */
-       ret = 0;
-
- out_unwait:
-       set_current_state(TASK_RUNNING);
-       remove_wait_queue(&call->waitq, &myself);
-       rxrpc_put_call(call);
- out_put_conn:
-       afs_server_release_fsconn(server, conn);
- out:
-       kleave("");
-       return ret;
-
- abort:
-       set_current_state(TASK_UNINTERRUPTIBLE);
-       rxrpc_call_abort(call, ret);
-       schedule();
-       goto out_unwait;
-} /* end afs_rxfs_lookup() */
-#endif
+       return afs_make_call(&server->addr, call, GFP_NOFS, wait_mode);
+}
diff --git a/fs/afs/fsclient.h b/fs/afs/fsclient.h
deleted file mode 100644 (file)
index 8ba3e74..0000000
+++ /dev/null
@@ -1,54 +0,0 @@
-/* fsclient.h: AFS File Server client stub declarations
- *
- * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#ifndef _LINUX_AFS_FSCLIENT_H
-#define _LINUX_AFS_FSCLIENT_H
-
-#include "server.h"
-
-extern int afs_rxfs_get_volume_info(struct afs_server *server,
-                                   const char *name,
-                                   struct afs_volume_info *vinfo);
-
-extern int afs_rxfs_fetch_file_status(struct afs_server *server,
-                                     struct afs_vnode *vnode,
-                                     struct afs_volsync *volsync);
-
-struct afs_rxfs_fetch_descriptor {
-       struct afs_fid  fid;            /* file ID to fetch */
-       size_t          size;           /* total number of bytes to fetch */
-       off_t           offset;         /* offset in file to start from */
-       void            *buffer;        /* read buffer */
-       size_t          actual;         /* actual size sent back by server */
-};
-
-extern int afs_rxfs_fetch_file_data(struct afs_server *server,
-                                   struct afs_vnode *vnode,
-                                   struct afs_rxfs_fetch_descriptor *desc,
-                                   struct afs_volsync *volsync);
-
-extern int afs_rxfs_give_up_callback(struct afs_server *server,
-                                    struct afs_vnode *vnode);
-
-/* this doesn't appear to work in OpenAFS server */
-extern int afs_rxfs_lookup(struct afs_server *server,
-                          struct afs_vnode *dir,
-                          const char *filename,
-                          struct afs_vnode *vnode,
-                          struct afs_volsync *volsync);
-
-/* this is apparently mis-implemented in OpenAFS server */
-extern int afs_rxfs_get_root_volume(struct afs_server *server,
-                                   char *buf,
-                                   size_t *buflen);
-
-
-#endif /* _LINUX_AFS_FSCLIENT_H */
index 9d9bca6..c184a4e 100644 (file)
@@ -19,9 +19,6 @@
 #include <linux/slab.h>
 #include <linux/fs.h>
 #include <linux/pagemap.h>
-#include "volume.h"
-#include "vnode.h"
-#include "super.h"
 #include "internal.h"
 
 struct afs_iget_data {
@@ -29,26 +26,25 @@ struct afs_iget_data {
        struct afs_volume       *volume;        /* volume on which resides */
 };
 
-/*****************************************************************************/
 /*
  * map the AFS file status to the inode member variables
  */
-static int afs_inode_map_status(struct afs_vnode *vnode)
+static int afs_inode_map_status(struct afs_vnode *vnode, struct key *key)
 {
        struct inode *inode = AFS_VNODE_TO_I(vnode);
 
-       _debug("FS: ft=%d lk=%d sz=%Zu ver=%Lu mod=%hu",
+       _debug("FS: ft=%d lk=%d sz=%llu ver=%Lu mod=%hu",
               vnode->status.type,
               vnode->status.nlink,
-              vnode->status.size,
-              vnode->status.version,
+              (unsigned long long) vnode->status.size,
+              vnode->status.data_version,
               vnode->status.mode);
 
        switch (vnode->status.type) {
        case AFS_FTYPE_FILE:
                inode->i_mode   = S_IFREG | vnode->status.mode;
                inode->i_op     = &afs_file_inode_operations;
-               inode->i_fop    = &generic_ro_fops;
+               inode->i_fop    = &afs_file_operations;
                break;
        case AFS_FTYPE_DIR:
                inode->i_mode   = S_IFDIR | vnode->status.mode;
@@ -77,9 +73,9 @@ static int afs_inode_map_status(struct afs_vnode *vnode)
 
        /* check to see whether a symbolic link is really a mountpoint */
        if (vnode->status.type == AFS_FTYPE_SYMLINK) {
-               afs_mntpt_check_symlink(vnode);
+               afs_mntpt_check_symlink(vnode, key);
 
-               if (vnode->flags & AFS_VNODE_MOUNTPOINT) {
+               if (test_bit(AFS_VNODE_MOUNTPOINT, &vnode->flags)) {
                        inode->i_mode   = S_IFDIR | vnode->status.mode;
                        inode->i_op     = &afs_mntpt_inode_operations;
                        inode->i_fop    = &afs_mntpt_file_operations;
@@ -87,30 +83,8 @@ static int afs_inode_map_status(struct afs_vnode *vnode)
        }
 
        return 0;
-} /* end afs_inode_map_status() */
+}
 
-/*****************************************************************************/
-/*
- * attempt to fetch the status of an inode, coelescing multiple simultaneous
- * fetches
- */
-static int afs_inode_fetch_status(struct inode *inode)
-{
-       struct afs_vnode *vnode;
-       int ret;
-
-       vnode = AFS_FS_I(inode);
-
-       ret = afs_vnode_fetch_status(vnode);
-
-       if (ret == 0)
-               ret = afs_inode_map_status(vnode);
-
-       return ret;
-
-} /* end afs_inode_fetch_status() */
-
-/*****************************************************************************/
 /*
  * iget5() comparator
  */
@@ -120,9 +94,8 @@ static int afs_iget5_test(struct inode *inode, void *opaque)
 
        return inode->i_ino == data->fid.vnode &&
                inode->i_version == data->fid.unique;
-} /* end afs_iget5_test() */
+}
 
-/*****************************************************************************/
 /*
  * iget5() inode initialiser
  */
@@ -137,14 +110,14 @@ static int afs_iget5_set(struct inode *inode, void *opaque)
        vnode->volume = data->volume;
 
        return 0;
-} /* end afs_iget5_set() */
+}
 
-/*****************************************************************************/
 /*
  * inode retrieval
  */
-inline int afs_iget(struct super_block *sb, struct afs_fid *fid,
-                   struct inode **_inode)
+struct inode *afs_iget(struct super_block *sb, struct key *key,
+                      struct afs_fid *fid, struct afs_file_status *status,
+                      struct afs_callback *cb)
 {
        struct afs_iget_data data = { .fid = *fid };
        struct afs_super_info *as;
@@ -161,20 +134,18 @@ inline int afs_iget(struct super_block *sb, struct afs_fid *fid,
                             &data);
        if (!inode) {
                _leave(" = -ENOMEM");
-               return -ENOMEM;
+               return ERR_PTR(-ENOMEM);
        }
 
+       _debug("GOT INODE %p { vl=%x vn=%x, u=%x }",
+              inode, fid->vid, fid->vnode, fid->unique);
+
        vnode = AFS_FS_I(inode);
 
        /* deal with an existing inode */
        if (!(inode->i_state & I_NEW)) {
-               ret = afs_vnode_fetch_status(vnode);
-               if (ret==0)
-                       *_inode = inode;
-               else
-                       iput(inode);
-               _leave(" = %d", ret);
-               return ret;
+               _leave(" = %p", inode);
+               return inode;
        }
 
 #ifdef AFS_CACHING_SUPPORT
@@ -186,100 +157,185 @@ inline int afs_iget(struct super_block *sb, struct afs_fid *fid,
                               &vnode->cache);
 #endif
 
-       /* okay... it's a new inode */
-       inode->i_flags |= S_NOATIME;
-       vnode->flags |= AFS_VNODE_CHANGED;
-       ret = afs_inode_fetch_status(inode);
-       if (ret<0)
+       if (!status) {
+               /* it's a remotely extant inode */
+               set_bit(AFS_VNODE_CB_BROKEN, &vnode->flags);
+               ret = afs_vnode_fetch_status(vnode, NULL, key);
+               if (ret < 0)
+                       goto bad_inode;
+       } else {
+               /* it's an inode we just created */
+               memcpy(&vnode->status, status, sizeof(vnode->status));
+
+               if (!cb) {
+                       /* it's a symlink we just created (the fileserver
+                        * didn't give us a callback) */
+                       vnode->cb_version = 0;
+                       vnode->cb_expiry = 0;
+                       vnode->cb_type = 0;
+                       vnode->cb_expires = get_seconds();
+               } else {
+                       vnode->cb_version = cb->version;
+                       vnode->cb_expiry = cb->expiry;
+                       vnode->cb_type = cb->type;
+                       vnode->cb_expires = vnode->cb_expiry + get_seconds();
+               }
+       }
+
+       ret = afs_inode_map_status(vnode, key);
+       if (ret < 0)
                goto bad_inode;
 
        /* success */
+       clear_bit(AFS_VNODE_UNSET, &vnode->flags);
+       inode->i_flags |= S_NOATIME;
        unlock_new_inode(inode);
-
-       *_inode = inode;
-       _leave(" = 0 [CB { v=%u x=%lu t=%u }]",
-              vnode->cb_version,
-              vnode->cb_timeout.timo_jif,
-              vnode->cb_type);
-       return 0;
+       _leave(" = %p [CB { v=%u t=%u }]", inode, vnode->cb_version, vnode->cb_type);
+       return inode;
 
        /* failure */
- bad_inode:
+bad_inode:
        make_bad_inode(inode);
        unlock_new_inode(inode);
        iput(inode);
 
        _leave(" = %d [bad]", ret);
+       return ERR_PTR(ret);
+}
+
+/*
+ * validate a vnode/inode
+ * - there are several things we need to check
+ *   - parent dir data changes (rm, rmdir, rename, mkdir, create, link,
+ *     symlink)
+ *   - parent dir metadata changed (security changes)
+ *   - dentry data changed (write, truncate)
+ *   - dentry metadata changed (security changes)
+ */
+int afs_validate(struct afs_vnode *vnode, struct key *key)
+{
+       int ret;
+
+       _enter("{v={%x:%u} fl=%lx},%x",
+              vnode->fid.vid, vnode->fid.vnode, vnode->flags,
+              key_serial(key));
+
+       if (vnode->cb_promised &&
+           !test_bit(AFS_VNODE_CB_BROKEN, &vnode->flags) &&
+           !test_bit(AFS_VNODE_MODIFIED, &vnode->flags) &&
+           !test_bit(AFS_VNODE_ZAP_DATA, &vnode->flags)) {
+               if (vnode->cb_expires < get_seconds() + 10) {
+                       _debug("callback expired");
+                       set_bit(AFS_VNODE_CB_BROKEN, &vnode->flags);
+               } else {
+                       goto valid;
+               }
+       }
+
+       if (test_bit(AFS_VNODE_DELETED, &vnode->flags))
+               goto valid;
+
+       mutex_lock(&vnode->validate_lock);
+
+       /* if the promise has expired, we need to check the server again to get
+        * a new promise - note that if the (parent) directory's metadata was
+        * changed then the security may be different and we may no longer have
+        * access */
+       if (!vnode->cb_promised ||
+           test_bit(AFS_VNODE_CB_BROKEN, &vnode->flags)) {
+               _debug("not promised");
+               ret = afs_vnode_fetch_status(vnode, NULL, key);
+               if (ret < 0)
+                       goto error_unlock;
+               _debug("new promise [fl=%lx]", vnode->flags);
+       }
+
+       if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) {
+               _debug("file already deleted");
+               ret = -ESTALE;
+               goto error_unlock;
+       }
+
+       /* if the vnode's data version number changed then its contents are
+        * different */
+       if (test_and_clear_bit(AFS_VNODE_ZAP_DATA, &vnode->flags)) {
+               _debug("zap data {%x:%d}", vnode->fid.vid, vnode->fid.vnode);
+               invalidate_remote_inode(&vnode->vfs_inode);
+       }
+
+       clear_bit(AFS_VNODE_MODIFIED, &vnode->flags);
+       mutex_unlock(&vnode->validate_lock);
+valid:
+       _leave(" = 0");
+       return 0;
+
+error_unlock:
+       mutex_unlock(&vnode->validate_lock);
+       _leave(" = %d", ret);
        return ret;
-} /* end afs_iget() */
+}
 
-/*****************************************************************************/
 /*
  * read the attributes of an inode
  */
 int afs_inode_getattr(struct vfsmount *mnt, struct dentry *dentry,
                      struct kstat *stat)
 {
-       struct afs_vnode *vnode;
        struct inode *inode;
-       int ret;
 
        inode = dentry->d_inode;
 
        _enter("{ ino=%lu v=%lu }", inode->i_ino, inode->i_version);
 
-       vnode = AFS_FS_I(inode);
-
-       ret = afs_inode_fetch_status(inode);
-       if (ret == -ENOENT) {
-               _leave(" = %d [%d %p]",
-                      ret, atomic_read(&dentry->d_count), dentry->d_inode);
-               return ret;
-       }
-       else if (ret < 0) {
-               make_bad_inode(inode);
-               _leave(" = %d", ret);
-               return ret;
-       }
-
-       /* transfer attributes from the inode structure to the stat
-        * structure */
        generic_fillattr(inode, stat);
-
-       _leave(" = 0 CB { v=%u x=%u t=%u }",
-              vnode->cb_version,
-              vnode->cb_expiry,
-              vnode->cb_type);
-
        return 0;
-} /* end afs_inode_getattr() */
+}
 
-/*****************************************************************************/
 /*
  * clear an AFS inode
  */
 void afs_clear_inode(struct inode *inode)
 {
+       struct afs_permits *permits;
        struct afs_vnode *vnode;
 
        vnode = AFS_FS_I(inode);
 
-       _enter("ino=%lu { vn=%08x v=%u x=%u t=%u }",
-              inode->i_ino,
+       _enter("{%x:%d.%d} v=%u x=%u t=%u }",
+              vnode->fid.vid,
               vnode->fid.vnode,
+              vnode->fid.unique,
               vnode->cb_version,
               vnode->cb_expiry,
-              vnode->cb_type
-              );
+              vnode->cb_type);
 
-       BUG_ON(inode->i_ino != vnode->fid.vnode);
+       _debug("CLEAR INODE %p", inode);
 
-       afs_vnode_give_up_callback(vnode);
+       ASSERTCMP(inode->i_ino, ==, vnode->fid.vnode);
+
+       afs_give_up_callback(vnode);
+
+       if (vnode->server) {
+               spin_lock(&vnode->server->fs_lock);
+               rb_erase(&vnode->server_rb, &vnode->server->fs_vnodes);
+               spin_unlock(&vnode->server->fs_lock);
+               afs_put_server(vnode->server);
+               vnode->server = NULL;
+       }
+
+       ASSERT(!vnode->cb_promised);
 
 #ifdef AFS_CACHING_SUPPORT
        cachefs_relinquish_cookie(vnode->cache, 0);
        vnode->cache = NULL;
 #endif
 
+       mutex_lock(&vnode->permits_lock);
+       permits = vnode->permits;
+       rcu_assign_pointer(vnode->permits, NULL);
+       mutex_unlock(&vnode->permits_lock);
+       if (permits)
+               call_rcu(&permits->rcu, afs_zap_permits);
+
        _leave("");
-} /* end afs_clear_inode() */
+}
index 5151d5d..6dd3197 100644 (file)
@@ -1,6 +1,6 @@
-/* internal.h: internal AFS stuff
+/* internal AFS stuff
  *
- * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2002, 2007 Red Hat, Inc. All Rights Reserved.
  * Written by David Howells (dhowells@redhat.com)
  *
  * This program is free software; you can redistribute it and/or
  * 2 of the License, or (at your option) any later version.
  */
 
-#ifndef AFS_INTERNAL_H
-#define AFS_INTERNAL_H
-
 #include <linux/compiler.h>
 #include <linux/kernel.h>
 #include <linux/fs.h>
 #include <linux/pagemap.h>
+#include <linux/skbuff.h>
+#include <linux/rxrpc.h>
+#include <linux/key.h>
+#include "afs.h"
+#include "afs_vl.h"
+
+#define AFS_CELL_MAX_ADDRS 15
+
+struct afs_call;
+
+typedef enum {
+       AFS_VL_NEW,                     /* new, uninitialised record */
+       AFS_VL_CREATING,                /* creating record */
+       AFS_VL_VALID,                   /* record is pending */
+       AFS_VL_NO_VOLUME,               /* no such volume available */
+       AFS_VL_UPDATING,                /* update in progress */
+       AFS_VL_VOLUME_DELETED,          /* volume was deleted */
+       AFS_VL_UNCERTAIN,               /* uncertain state (update failed) */
+} __attribute__((packed)) afs_vlocation_state_t;
+
+struct afs_mount_params {
+       bool                    rwpath;         /* T if the parent should be considered R/W */
+       bool                    force;          /* T to force cell type */
+       afs_voltype_t           type;           /* type of volume requested */
+       int                     volnamesz;      /* size of volume name */
+       const char              *volname;       /* name of volume to mount */
+       struct afs_cell         *cell;          /* cell in which to find volume */
+       struct afs_volume       *volume;        /* volume record */
+       struct key              *key;           /* key to use for secure mounting */
+};
 
 /*
- * debug tracing
+ * definition of how to wait for the completion of an operation
  */
-#define kenter(FMT, a...)      printk("==> %s("FMT")\n",__FUNCTION__ , ## a)
-#define kleave(FMT, a...)      printk("<== %s()"FMT"\n",__FUNCTION__ , ## a)
-#define kdebug(FMT, a...)      printk(FMT"\n" , ## a)
-#define kproto(FMT, a...)      printk("### "FMT"\n" , ## a)
-#define knet(FMT, a...)                printk(FMT"\n" , ## a)
-
-#ifdef __KDEBUG
-#define _enter(FMT, a...)      kenter(FMT , ## a)
-#define _leave(FMT, a...)      kleave(FMT , ## a)
-#define _debug(FMT, a...)      kdebug(FMT , ## a)
-#define _proto(FMT, a...)      kproto(FMT , ## a)
-#define _net(FMT, a...)                knet(FMT , ## a)
-#else
-#define _enter(FMT, a...)      do { } while(0)
-#define _leave(FMT, a...)      do { } while(0)
-#define _debug(FMT, a...)      do { } while(0)
-#define _proto(FMT, a...)      do { } while(0)
-#define _net(FMT, a...)                do { } while(0)
-#endif
+struct afs_wait_mode {
+       /* RxRPC received message notification */
+       void (*rx_wakeup)(struct afs_call *call);
 
-static inline void afs_discard_my_signals(void)
-{
-       while (signal_pending(current)) {
-               siginfo_t sinfo;
+       /* synchronous call waiter and call dispatched notification */
+       int (*wait)(struct afs_call *call);
+
+       /* asynchronous call completion */
+       void (*async_complete)(void *reply, int error);
+};
+
+extern const struct afs_wait_mode afs_sync_call;
+extern const struct afs_wait_mode afs_async_call;
 
-               spin_lock_irq(&current->sighand->siglock);
-               dequeue_signal(current,&current->blocked, &sinfo);
-               spin_unlock_irq(&current->sighand->siglock);
-       }
+/*
+ * a record of an in-progress RxRPC call
+ */
+struct afs_call {
+       const struct afs_call_type *type;       /* type of call */
+       const struct afs_wait_mode *wait_mode;  /* completion wait mode */
+       wait_queue_head_t       waitq;          /* processes awaiting completion */
+       struct work_struct      async_work;     /* asynchronous work processor */
+       struct work_struct      work;           /* actual work processor */
+       struct sk_buff_head     rx_queue;       /* received packets */
+       struct rxrpc_call       *rxcall;        /* RxRPC call handle */
+       struct key              *key;           /* security for this call */
+       struct afs_server       *server;        /* server affected by incoming CM call */
+       void                    *request;       /* request data (first part) */
+       void                    *request2;      /* request data (second part) */
+       void                    *buffer;        /* reply receive buffer */
+       void                    *reply;         /* reply buffer (first part) */
+       void                    *reply2;        /* reply buffer (second part) */
+       void                    *reply3;        /* reply buffer (third part) */
+       void                    *reply4;        /* reply buffer (fourth part) */
+       enum {                                  /* call state */
+               AFS_CALL_REQUESTING,    /* request is being sent for outgoing call */
+               AFS_CALL_AWAIT_REPLY,   /* awaiting reply to outgoing call */
+               AFS_CALL_AWAIT_OP_ID,   /* awaiting op ID on incoming call */
+               AFS_CALL_AWAIT_REQUEST, /* awaiting request data on incoming call */
+               AFS_CALL_REPLYING,      /* replying to incoming call */
+               AFS_CALL_AWAIT_ACK,     /* awaiting final ACK of incoming call */
+               AFS_CALL_COMPLETE,      /* successfully completed */
+               AFS_CALL_BUSY,          /* server was busy */
+               AFS_CALL_ABORTED,       /* call was aborted */
+               AFS_CALL_ERROR,         /* call failed due to error */
+       }                       state;
+       int                     error;          /* error code */
+       unsigned                request_size;   /* size of request data */
+       unsigned                reply_max;      /* maximum size of reply */
+       unsigned                reply_size;     /* current size of reply */
+       unsigned short          offset;         /* offset into received data store */
+       unsigned char           unmarshall;     /* unmarshalling phase */
+       bool                    incoming;       /* T if incoming call */
+       u16                     service_id;     /* RxRPC service ID to call */
+       __be16                  port;           /* target UDP port */
+       __be32                  operation_ID;   /* operation ID for an incoming call */
+       u32                     count;          /* count for use in unmarshalling */
+       __be32                  tmp;            /* place to extract temporary data */
+};
+
+struct afs_call_type {
+       const char *name;
+
+       /* deliver request or reply data to an call
+        * - returning an error will cause the call to be aborted
+        */
+       int (*deliver)(struct afs_call *call, struct sk_buff *skb,
+                      bool last);
+
+       /* map an abort code to an error number */
+       int (*abort_to_error)(u32 abort_code);
+
+       /* clean up a call */
+       void (*destructor)(struct afs_call *call);
+};
+
+/*
+ * AFS superblock private data
+ * - there's one superblock per volume
+ */
+struct afs_super_info {
+       struct afs_volume       *volume;        /* volume record */
+       char                    rwparent;       /* T if parent is R/W AFS volume */
+};
+
+static inline struct afs_super_info *AFS_FS_S(struct super_block *sb)
+{
+       return sb->s_fs_info;
 }
 
+extern struct file_system_type afs_fs_type;
+
+/*
+ * entry in the cached cell catalogue
+ */
+struct afs_cache_cell {
+       char            name[AFS_MAXCELLNAME];  /* cell name (padded with NULs) */
+       struct in_addr  vl_servers[15];         /* cached cell VL servers */
+};
+
+/*
+ * AFS cell record
+ */
+struct afs_cell {
+       atomic_t                usage;
+       struct list_head        link;           /* main cell list link */
+       struct key              *anonymous_key; /* anonymous user key for this cell */
+       struct list_head        proc_link;      /* /proc cell list link */
+       struct proc_dir_entry   *proc_dir;      /* /proc dir for this cell */
+#ifdef AFS_CACHING_SUPPORT
+       struct cachefs_cookie   *cache;         /* caching cookie */
+#endif
+
+       /* server record management */
+       rwlock_t                servers_lock;   /* active server list lock */
+       struct list_head        servers;        /* active server list */
+
+       /* volume location record management */
+       struct rw_semaphore     vl_sem;         /* volume management serialisation semaphore */
+       struct list_head        vl_list;        /* cell's active VL record list */
+       spinlock_t              vl_lock;        /* vl_list lock */
+       unsigned short          vl_naddrs;      /* number of VL servers in addr list */
+       unsigned short          vl_curr_svix;   /* current server index */
+       struct in_addr          vl_addrs[AFS_CELL_MAX_ADDRS];   /* cell VL server addresses */
+
+       char                    name[0];        /* cell name - must go last */
+};
+
+/*
+ * entry in the cached volume location catalogue
+ */
+struct afs_cache_vlocation {
+       /* volume name (lowercase, padded with NULs) */
+       uint8_t                 name[AFS_MAXVOLNAME + 1];
+
+       uint8_t                 nservers;       /* number of entries used in servers[] */
+       uint8_t                 vidmask;        /* voltype mask for vid[] */
+       uint8_t                 srvtmask[8];    /* voltype masks for servers[] */
+#define AFS_VOL_VTM_RW 0x01 /* R/W version of the volume is available (on this server) */
+#define AFS_VOL_VTM_RO 0x02 /* R/O version of the volume is available (on this server) */
+#define AFS_VOL_VTM_BAK        0x04 /* backup version of the volume is available (on this server) */
+
+       afs_volid_t             vid[3];         /* volume IDs for R/W, R/O and Bak volumes */
+       struct in_addr          servers[8];     /* fileserver addresses */
+       time_t                  rtime;          /* last retrieval time */
+};
+
+/*
+ * volume -> vnode hash table entry
+ */
+struct afs_cache_vhash {
+       afs_voltype_t           vtype;          /* which volume variation */
+       uint8_t                 hash_bucket;    /* which hash bucket this represents */
+} __attribute__((packed));
+
+/*
+ * AFS volume location record
+ */
+struct afs_vlocation {
+       atomic_t                usage;
+       time_t                  time_of_death;  /* time at which put reduced usage to 0 */
+       struct list_head        link;           /* link in cell volume location list */
+       struct list_head        grave;          /* link in master graveyard list */
+       struct list_head        update;         /* link in master update list */
+       struct afs_cell         *cell;          /* cell to which volume belongs */
+#ifdef AFS_CACHING_SUPPORT
+       struct cachefs_cookie   *cache;         /* caching cookie */
+#endif
+       struct afs_cache_vlocation vldb;        /* volume information DB record */
+       struct afs_volume       *vols[3];       /* volume access record pointer (index by type) */
+       wait_queue_head_t       waitq;          /* status change waitqueue */
+       time_t                  update_at;      /* time at which record should be updated */
+       spinlock_t              lock;           /* access lock */
+       afs_vlocation_state_t   state;          /* volume location state */
+       unsigned short          upd_rej_cnt;    /* ENOMEDIUM count during update */
+       unsigned short          upd_busy_cnt;   /* EBUSY count during update */
+       bool                    valid;          /* T if valid */
+};
+
+/*
+ * AFS fileserver record
+ */
+struct afs_server {
+       atomic_t                usage;
+       time_t                  time_of_death;  /* time at which put reduced usage to 0 */
+       struct in_addr          addr;           /* server address */
+       struct afs_cell         *cell;          /* cell in which server resides */
+       struct list_head        link;           /* link in cell's server list */
+       struct list_head        grave;          /* link in master graveyard list */
+       struct rb_node          master_rb;      /* link in master by-addr tree */
+       struct rw_semaphore     sem;            /* access lock */
+
+       /* file service access */
+       struct rb_root          fs_vnodes;      /* vnodes backed by this server (ordered by FID) */
+       unsigned long           fs_act_jif;     /* time at which last activity occurred */
+       unsigned long           fs_dead_jif;    /* time at which no longer to be considered dead */
+       spinlock_t              fs_lock;        /* access lock */
+       int                     fs_state;       /* 0 or reason FS currently marked dead (-errno) */
+
+       /* callback promise management */
+       struct rb_root          cb_promises;    /* vnode expiration list (ordered earliest first) */
+       struct delayed_work     cb_updater;     /* callback updater */
+       struct delayed_work     cb_break_work;  /* collected break dispatcher */
+       wait_queue_head_t       cb_break_waitq; /* space available in cb_break waitqueue */
+       spinlock_t              cb_lock;        /* access lock */
+       struct afs_callback     cb_break[64];   /* ring of callbacks awaiting breaking */
+       atomic_t                cb_break_n;     /* number of pending breaks */
+       u8                      cb_break_head;  /* head of callback breaking ring */
+       u8                      cb_break_tail;  /* tail of callback breaking ring */
+};
+
+/*
+ * AFS volume access record
+ */
+struct afs_volume {
+       atomic_t                usage;
+       struct afs_cell         *cell;          /* cell to which belongs (unrefd ptr) */
+       struct afs_vlocation    *vlocation;     /* volume location */
+#ifdef AFS_CACHING_SUPPORT
+       struct cachefs_cookie   *cache;         /* caching cookie */
+#endif
+       afs_volid_t             vid;            /* volume ID */
+       afs_voltype_t           type;           /* type of volume */
+       char                    type_force;     /* force volume type (suppress R/O -> R/W) */
+       unsigned short          nservers;       /* number of server slots filled */
+       unsigned short          rjservers;      /* number of servers discarded due to -ENOMEDIUM */
+       struct afs_server       *servers[8];    /* servers on which volume resides (ordered) */
+       struct rw_semaphore     server_sem;     /* lock for accessing current server */
+};
+
+/*
+ * vnode catalogue entry
+ */
+struct afs_cache_vnode {
+       afs_vnodeid_t           vnode_id;       /* vnode ID */
+       unsigned                vnode_unique;   /* vnode ID uniquifier */
+       afs_dataversion_t       data_version;   /* data version */
+};
+
+/*
+ * AFS inode private data
+ */
+struct afs_vnode {
+       struct inode            vfs_inode;      /* the VFS's inode record */
+
+       struct afs_volume       *volume;        /* volume on which vnode resides */
+       struct afs_server       *server;        /* server currently supplying this file */
+       struct afs_fid          fid;            /* the file identifier for this inode */
+       struct afs_file_status  status;         /* AFS status info for this file */
+#ifdef AFS_CACHING_SUPPORT
+       struct cachefs_cookie   *cache;         /* caching cookie */
+#endif
+       struct afs_permits      *permits;       /* cache of permits so far obtained */
+       struct mutex            permits_lock;   /* lock for altering permits list */
+       struct mutex            validate_lock;  /* lock for validating this vnode */
+       wait_queue_head_t       update_waitq;   /* status fetch waitqueue */
+       int                     update_cnt;     /* number of outstanding ops that will update the
+                                                * status */
+       spinlock_t              lock;           /* waitqueue/flags lock */
+       unsigned long           flags;
+#define AFS_VNODE_CB_BROKEN    0               /* set if vnode's callback was broken */
+#define AFS_VNODE_UNSET                1               /* set if vnode attributes not yet set */
+#define AFS_VNODE_MODIFIED     2               /* set if vnode's data modified */
+#define AFS_VNODE_ZAP_DATA     3               /* set if vnode's data should be invalidated */
+#define AFS_VNODE_DELETED      4               /* set if vnode deleted on server */
+#define AFS_VNODE_MOUNTPOINT   5               /* set if vnode is a mountpoint symlink */
+
+       long                    acl_order;      /* ACL check count (callback break count) */
+
+       /* outstanding callback notification on this file */
+       struct rb_node          server_rb;      /* link in server->fs_vnodes */
+       struct rb_node          cb_promise;     /* link in server->cb_promises */
+       struct work_struct      cb_broken_work; /* work to be done on callback break */
+       time_t                  cb_expires;     /* time at which callback expires */
+       time_t                  cb_expires_at;  /* time used to order cb_promise */
+       unsigned                cb_version;     /* callback version */
+       unsigned                cb_expiry;      /* callback expiry time */
+       afs_callback_type_t     cb_type;        /* type of callback */
+       bool                    cb_promised;    /* true if promise still holds */
+};
+
+/*
+ * cached security record for one user's attempt to access a vnode
+ */
+struct afs_permit {
+       struct key              *key;           /* RxRPC ticket holding a security context */
+       afs_access_t            access_mask;    /* access mask for this key */
+};
+
+/*
+ * cache of security records from attempts to access a vnode
+ */
+struct afs_permits {
+       struct rcu_head         rcu;            /* disposal procedure */
+       int                     count;          /* number of records */
+       struct afs_permit       permits[0];     /* the permits so far examined */
+};
+
+/*
+ * record of one of a system's set of network interfaces
+ */
+struct afs_interface {
+       unsigned        index;          /* interface index */
+       struct in_addr  address;        /* IPv4 address bound to interface */
+       struct in_addr  netmask;        /* netmask applied to address */
+       unsigned        mtu;            /* MTU of interface */
+};
+
+/*
+ * UUID definition [internet draft]
+ * - the timestamp is a 60-bit value, split 32/16/12, and goes in 100ns
+ *   increments since midnight 15th October 1582
+ *   - add AFS_UUID_TO_UNIX_TIME to convert unix time in 100ns units to UUID
+ *     time
+ * - the clock sequence is a 14-bit counter to avoid duplicate times
+ */
+struct afs_uuid {
+       u32             time_low;                       /* low part of timestamp */
+       u16             time_mid;                       /* mid part of timestamp */
+       u16             time_hi_and_version;            /* high part of timestamp and version  */
+#define AFS_UUID_TO_UNIX_TIME  0x01b21dd213814000
+#define AFS_UUID_TIMEHI_MASK   0x0fff
+#define AFS_UUID_VERSION_TIME  0x1000  /* time-based UUID */
+#define AFS_UUID_VERSION_NAME  0x3000  /* name-based UUID */
+#define AFS_UUID_VERSION_RANDOM        0x4000  /* (pseudo-)random generated UUID */
+       u8              clock_seq_hi_and_reserved;      /* clock seq hi and variant */
+#define AFS_UUID_CLOCKHI_MASK  0x3f
+#define AFS_UUID_VARIANT_STD   0x80
+       u8              clock_seq_low;                  /* clock seq low */
+       u8              node[6];                        /* spatially unique node ID (MAC addr) */
+};
+
+/*****************************************************************************/
+/*
+ * callback.c
+ */
+extern void afs_init_callback_state(struct afs_server *);
+extern void afs_broken_callback_work(struct work_struct *);
+extern void afs_break_callbacks(struct afs_server *, size_t,
+                               struct afs_callback[]);
+extern void afs_discard_callback_on_delete(struct afs_vnode *);
+extern void afs_give_up_callback(struct afs_vnode *);
+extern void afs_dispatch_give_up_callbacks(struct work_struct *);
+extern void afs_flush_callback_breaks(struct afs_server *);
+extern int __init afs_callback_update_init(void);
+extern void __exit afs_callback_update_kill(void);
+
 /*
  * cell.c
  */
@@ -60,57 +403,156 @@ extern struct list_head afs_proc_cells;
 extern struct cachefs_index_def afs_cache_cell_index_def;
 #endif
 
+#define afs_get_cell(C) do { atomic_inc(&(C)->usage); } while(0)
+extern int afs_cell_init(char *);
+extern struct afs_cell *afs_cell_create(const char *, char *);
+extern struct afs_cell *afs_cell_lookup(const char *, unsigned);
+extern struct afs_cell *afs_grab_cell(struct afs_cell *);
+extern void afs_put_cell(struct afs_cell *);
+extern void afs_cell_purge(void);
+
+/*
+ * cmservice.c
+ */
+extern bool afs_cm_incoming_call(struct afs_call *);
+
 /*
  * dir.c
  */
 extern const struct inode_operations afs_dir_inode_operations;
 extern const struct file_operations afs_dir_file_operations;
 
+extern int afs_permission(struct inode *, int, struct nameidata *);
+
 /*
  * file.c
  */
 extern const struct address_space_operations afs_fs_aops;
 extern const struct inode_operations afs_file_inode_operations;
+extern const struct file_operations afs_file_operations;
+
+extern int afs_open(struct inode *, struct file *);
+extern int afs_release(struct inode *, struct file *);
 
 #ifdef AFS_CACHING_SUPPORT
-extern int afs_cache_get_page_cookie(struct page *page,
-                                    struct cachefs_page **_page_cookie);
+extern int afs_cache_get_page_cookie(struct page *, struct cachefs_page **);
 #endif
 
 /*
- * inode.c
+ * fsclient.c
  */
-extern int afs_iget(struct super_block *sb, struct afs_fid *fid,
-                   struct inode **_inode);
-extern int afs_inode_getattr(struct vfsmount *mnt, struct dentry *dentry,
-                            struct kstat *stat);
-extern void afs_clear_inode(struct inode *inode);
+extern int afs_fs_fetch_file_status(struct afs_server *, struct key *,
+                                   struct afs_vnode *, struct afs_volsync *,
+                                   const struct afs_wait_mode *);
+extern int afs_fs_give_up_callbacks(struct afs_server *,
+                                   const struct afs_wait_mode *);
+extern int afs_fs_fetch_data(struct afs_server *, struct key *,
+                            struct afs_vnode *, off_t, size_t, struct page *,
+                            const struct afs_wait_mode *);
+extern int afs_fs_create(struct afs_server *, struct key *,
+                        struct afs_vnode *, const char *, umode_t,
+                        struct afs_fid *, struct afs_file_status *,
+                        struct afs_callback *,
+                        const struct afs_wait_mode *);
+extern int afs_fs_remove(struct afs_server *, struct key *,
+                        struct afs_vnode *, const char *, bool,
+                        const struct afs_wait_mode *);
+extern int afs_fs_link(struct afs_server *, struct key *, struct afs_vnode *,
+                      struct afs_vnode *, const char *,
+                      const struct afs_wait_mode *);
+extern int afs_fs_symlink(struct afs_server *, struct key *,
+                         struct afs_vnode *, const char *, const char *,
+                         struct afs_fid *, struct afs_file_status *,
+                         const struct afs_wait_mode *);
+extern int afs_fs_rename(struct afs_server *, struct key *,
+                        struct afs_vnode *, const char *,
+                        struct afs_vnode *, const char *,
+                        const struct afs_wait_mode *);
 
 /*
- * key_afs.c
+ * inode.c
  */
-#ifdef CONFIG_KEYS
-extern int afs_key_register(void);
-extern void afs_key_unregister(void);
-#endif
+extern struct inode *afs_iget(struct super_block *, struct key *,
+                             struct afs_fid *, struct afs_file_status *,
+                             struct afs_callback *);
+extern int afs_validate(struct afs_vnode *, struct key *);
+extern int afs_inode_getattr(struct vfsmount *, struct dentry *,
+                            struct kstat *);
+extern void afs_zap_permits(struct rcu_head *);
+extern void afs_clear_inode(struct inode *);
 
 /*
  * main.c
  */
+extern struct afs_uuid afs_uuid;
 #ifdef AFS_CACHING_SUPPORT
 extern struct cachefs_netfs afs_cache_netfs;
 #endif
 
+/*
+ * misc.c
+ */
+extern int afs_abort_to_error(u32);
+
 /*
  * mntpt.c
  */
 extern const struct inode_operations afs_mntpt_inode_operations;
 extern const struct file_operations afs_mntpt_file_operations;
-extern struct afs_timer afs_mntpt_expiry_timer;
-extern struct afs_timer_ops afs_mntpt_expiry_timer_ops;
 extern unsigned long afs_mntpt_expiry_timeout;
 
-extern int afs_mntpt_check_symlink(struct afs_vnode *vnode);
+extern int afs_mntpt_check_symlink(struct afs_vnode *, struct key *);
+extern void afs_mntpt_kill_timer(void);
+extern void afs_umount_begin(struct vfsmount *, int);
+
+/*
+ * proc.c
+ */
+extern int afs_proc_init(void);
+extern void afs_proc_cleanup(void);
+extern int afs_proc_cell_setup(struct afs_cell *);
+extern void afs_proc_cell_remove(struct afs_cell *);
+
+/*
+ * rxrpc.c
+ */
+extern int afs_open_socket(void);
+extern void afs_close_socket(void);
+extern int afs_make_call(struct in_addr *, struct afs_call *, gfp_t,
+                        const struct afs_wait_mode *);
+extern struct afs_call *afs_alloc_flat_call(const struct afs_call_type *,
+                                           size_t, size_t);
+extern void afs_flat_call_destructor(struct afs_call *);
+extern void afs_transfer_reply(struct afs_call *, struct sk_buff *);
+extern void afs_send_empty_reply(struct afs_call *);
+extern void afs_send_simple_reply(struct afs_call *, const void *, size_t);
+extern int afs_extract_data(struct afs_call *, struct sk_buff *, bool, void *,
+                           size_t);
+
+/*
+ * security.c
+ */
+extern void afs_clear_permits(struct afs_vnode *);
+extern void afs_cache_permit(struct afs_vnode *, struct key *, long);
+extern struct key *afs_request_key(struct afs_cell *);
+extern int afs_permission(struct inode *, int, struct nameidata *);
+
+/*
+ * server.c
+ */
+extern spinlock_t afs_server_peer_lock;
+
+#define afs_get_server(S)                                      \
+do {                                                           \
+       _debug("GET SERVER %d", atomic_read(&(S)->usage));      \
+       atomic_inc(&(S)->usage);                                \
+} while(0)
+
+extern struct afs_server *afs_lookup_server(struct afs_cell *,
+                                           const struct in_addr *);
+extern struct afs_server *afs_find_server(const struct in_addr *);
+extern void afs_put_server(struct afs_server *);
+extern void __exit afs_purge_servers(void);
 
 /*
  * super.c
@@ -118,22 +560,211 @@ extern int afs_mntpt_check_symlink(struct afs_vnode *vnode);
 extern int afs_fs_init(void);
 extern void afs_fs_exit(void);
 
-#define AFS_CB_HASH_COUNT (PAGE_SIZE / sizeof(struct list_head))
+/*
+ * use-rtnetlink.c
+ */
+extern int afs_get_ipv4_interfaces(struct afs_interface *, size_t, bool);
+extern int afs_get_MAC_address(u8 [6]);
 
-extern struct list_head afs_cb_hash_tbl[];
-extern spinlock_t afs_cb_hash_lock;
+/*
+ * vlclient.c
+ */
+#ifdef AFS_CACHING_SUPPORT
+extern struct cachefs_index_def afs_vlocation_cache_index_def;
+#endif
 
-#define afs_cb_hash(SRV,FID) \
-       afs_cb_hash_tbl[((unsigned long)(SRV) + \
-                       (FID)->vid + (FID)->vnode + (FID)->unique) % \
-                       AFS_CB_HASH_COUNT]
+extern int afs_vl_get_entry_by_name(struct in_addr *, struct key *,
+                                   const char *, struct afs_cache_vlocation *,
+                                   const struct afs_wait_mode *);
+extern int afs_vl_get_entry_by_id(struct in_addr *, struct key *,
+                                 afs_volid_t, afs_voltype_t,
+                                 struct afs_cache_vlocation *,
+                                 const struct afs_wait_mode *);
 
 /*
- * proc.c
+ * vlocation.c
  */
-extern int afs_proc_init(void);
-extern void afs_proc_cleanup(void);
-extern int afs_proc_cell_setup(struct afs_cell *cell);
-extern void afs_proc_cell_remove(struct afs_cell *cell);
+#define afs_get_vlocation(V) do { atomic_inc(&(V)->usage); } while(0)
+
+extern int __init afs_vlocation_update_init(void);
+extern struct afs_vlocation *afs_vlocation_lookup(struct afs_cell *,
+                                                 struct key *,
+                                                 const char *, size_t);
+extern void afs_put_vlocation(struct afs_vlocation *);
+extern void __exit afs_vlocation_purge(void);
+
+/*
+ * vnode.c
+ */
+#ifdef AFS_CACHING_SUPPORT
+extern struct cachefs_index_def afs_vnode_cache_index_def;
+#endif
+
+extern struct afs_timer_ops afs_vnode_cb_timed_out_ops;
+
+static inline struct afs_vnode *AFS_FS_I(struct inode *inode)
+{
+       return container_of(inode, struct afs_vnode, vfs_inode);
+}
+
+static inline struct inode *AFS_VNODE_TO_I(struct afs_vnode *vnode)
+{
+       return &vnode->vfs_inode;
+}
+
+extern void afs_vnode_finalise_status_update(struct afs_vnode *,
+                                            struct afs_server *);
+extern int afs_vnode_fetch_status(struct afs_vnode *, struct afs_vnode *,
+                                 struct key *);
+extern int afs_vnode_fetch_data(struct afs_vnode *, struct key *,
+                               off_t, size_t, struct page *);
+extern int afs_vnode_create(struct afs_vnode *, struct key *, const char *,
+                           umode_t, struct afs_fid *, struct afs_file_status *,
+                           struct afs_callback *, struct afs_server **);
+extern int afs_vnode_remove(struct afs_vnode *, struct key *, const char *,
+                           bool);
+extern int afs_vnode_link(struct afs_vnode *, struct afs_vnode *, struct key *,
+                         const char *);
+extern int afs_vnode_symlink(struct afs_vnode *, struct key *, const char *,
+                            const char *, struct afs_fid *,
+                            struct afs_file_status *, struct afs_server **);
+extern int afs_vnode_rename(struct afs_vnode *, struct afs_vnode *,
+                           struct key *, const char *, const char *);
+
+/*
+ * volume.c
+ */
+#ifdef AFS_CACHING_SUPPORT
+extern struct cachefs_index_def afs_volume_cache_index_def;
+#endif
+
+#define afs_get_volume(V) do { atomic_inc(&(V)->usage); } while(0)
+
+extern void afs_put_volume(struct afs_volume *);
+extern struct afs_volume *afs_volume_lookup(struct afs_mount_params *);
+extern struct afs_server *afs_volume_pick_fileserver(struct afs_vnode *);
+extern int afs_volume_release_fileserver(struct afs_vnode *,
+                                        struct afs_server *, int);
+
+/*****************************************************************************/
+/*
+ * debug tracing
+ */
+extern unsigned afs_debug;
+
+#define dbgprintk(FMT,...) \
+       printk("[%x%-6.6s] "FMT"\n", smp_processor_id(), current->comm ,##__VA_ARGS__)
+
+/* make sure we maintain the format strings, even when debugging is disabled */
+static inline __attribute__((format(printf,1,2)))
+void _dbprintk(const char *fmt, ...)
+{
+}
+
+#define kenter(FMT,...)        dbgprintk("==> %s("FMT")",__FUNCTION__ ,##__VA_ARGS__)
+#define kleave(FMT,...)        dbgprintk("<== %s()"FMT"",__FUNCTION__ ,##__VA_ARGS__)
+#define kdebug(FMT,...)        dbgprintk("    "FMT ,##__VA_ARGS__)
+
+
+#if defined(__KDEBUG)
+#define _enter(FMT,...)        kenter(FMT,##__VA_ARGS__)
+#define _leave(FMT,...)        kleave(FMT,##__VA_ARGS__)
+#define _debug(FMT,...)        kdebug(FMT,##__VA_ARGS__)
+
+#elif defined(CONFIG_AFS_DEBUG)
+#define AFS_DEBUG_KENTER       0x01
+#define AFS_DEBUG_KLEAVE       0x02
+#define AFS_DEBUG_KDEBUG       0x04
+
+#define _enter(FMT,...)                                        \
+do {                                                   \
+       if (unlikely(afs_debug & AFS_DEBUG_KENTER))     \
+               kenter(FMT,##__VA_ARGS__);              \
+} while (0)
+
+#define _leave(FMT,...)                                        \
+do {                                                   \
+       if (unlikely(afs_debug & AFS_DEBUG_KLEAVE))     \
+               kleave(FMT,##__VA_ARGS__);              \
+} while (0)
+
+#define _debug(FMT,...)                                        \
+do {                                                   \
+       if (unlikely(afs_debug & AFS_DEBUG_KDEBUG))     \
+               kdebug(FMT,##__VA_ARGS__);              \
+} while (0)
+
+#else
+#define _enter(FMT,...)        _dbprintk("==> %s("FMT")",__FUNCTION__ ,##__VA_ARGS__)
+#define _leave(FMT,...)        _dbprintk("<== %s()"FMT"",__FUNCTION__ ,##__VA_ARGS__)
+#define _debug(FMT,...)        _dbprintk("    "FMT ,##__VA_ARGS__)
+#endif
+
+/*
+ * debug assertion checking
+ */
+#if 1 // defined(__KDEBUGALL)
+
+#define ASSERT(X)                                              \
+do {                                                           \
+       if (unlikely(!(X))) {                                   \
+               printk(KERN_ERR "\n");                          \
+               printk(KERN_ERR "AFS: Assertion failed\n");     \
+               BUG();                                          \
+       }                                                       \
+} while(0)
+
+#define ASSERTCMP(X, OP, Y)                                            \
+do {                                                                   \
+       if (unlikely(!((X) OP (Y)))) {                                  \
+               printk(KERN_ERR "\n");                                  \
+               printk(KERN_ERR "AFS: Assertion failed\n");             \
+               printk(KERN_ERR "%lu " #OP " %lu is false\n",           \
+                      (unsigned long)(X), (unsigned long)(Y));         \
+               printk(KERN_ERR "0x%lx " #OP " 0x%lx is false\n",       \
+                      (unsigned long)(X), (unsigned long)(Y));         \
+               BUG();                                                  \
+       }                                                               \
+} while(0)
+
+#define ASSERTIF(C, X)                                         \
+do {                                                           \
+       if (unlikely((C) && !(X))) {                            \
+               printk(KERN_ERR "\n");                          \
+               printk(KERN_ERR "AFS: Assertion failed\n");     \
+               BUG();                                          \
+       }                                                       \
+} while(0)
+
+#define ASSERTIFCMP(C, X, OP, Y)                                       \
+do {                                                                   \
+       if (unlikely((C) && !((X) OP (Y)))) {                           \
+               printk(KERN_ERR "\n");                                  \
+               printk(KERN_ERR "AFS: Assertion failed\n");             \
+               printk(KERN_ERR "%lu " #OP " %lu is false\n",           \
+                      (unsigned long)(X), (unsigned long)(Y));         \
+               printk(KERN_ERR "0x%lx " #OP " 0x%lx is false\n",       \
+                      (unsigned long)(X), (unsigned long)(Y));         \
+               BUG();                                                  \
+       }                                                               \
+} while(0)
+
+#else
+
+#define ASSERT(X)                              \
+do {                                           \
+} while(0)
+
+#define ASSERTCMP(X, OP, Y)                    \
+do {                                           \
+} while(0)
+
+#define ASSERTIF(C, X)                         \
+do {                                           \
+} while(0)
+
+#define ASSERTIFCMP(C, X, OP, Y)               \
+do {                                           \
+} while(0)
 
-#endif /* AFS_INTERNAL_H */
+#endif /* __KDEBUGALL */
diff --git a/fs/afs/kafsasyncd.c b/fs/afs/kafsasyncd.c
deleted file mode 100644 (file)
index 615df24..0000000
+++ /dev/null
@@ -1,255 +0,0 @@
-/* kafsasyncd.c: AFS asynchronous operation daemon
- *
- * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
- *
- * The AFS async daemon is used to the following:
- * - probe "dead" servers to see whether they've come back to life yet.
- * - probe "live" servers that we haven't talked to for a while to see if they are better
- *   candidates for serving than what we're currently using
- * - poll volume location servers to keep up to date volume location lists
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/completion.h>
-#include <linux/freezer.h>
-#include "cell.h"
-#include "server.h"
-#include "volume.h"
-#include "kafsasyncd.h"
-#include "kafstimod.h"
-#include <rxrpc/call.h>
-#include <asm/errno.h>
-#include "internal.h"
-
-static DECLARE_COMPLETION(kafsasyncd_alive);
-static DECLARE_COMPLETION(kafsasyncd_dead);
-static DECLARE_WAIT_QUEUE_HEAD(kafsasyncd_sleepq);
-static struct task_struct *kafsasyncd_task;
-static int kafsasyncd_die;
-
-static int kafsasyncd(void *arg);
-
-static LIST_HEAD(kafsasyncd_async_attnq);
-static LIST_HEAD(kafsasyncd_async_busyq);
-static DEFINE_SPINLOCK(kafsasyncd_async_lock);
-
-static void kafsasyncd_null_call_attn_func(struct rxrpc_call *call)
-{
-}
-
-static void kafsasyncd_null_call_error_func(struct rxrpc_call *call)
-{
-}
-
-/*****************************************************************************/
-/*
- * start the async daemon
- */
-int afs_kafsasyncd_start(void)
-{
-       int ret;
-
-       ret = kernel_thread(kafsasyncd, NULL, 0);
-       if (ret < 0)
-               return ret;
-
-       wait_for_completion(&kafsasyncd_alive);
-
-       return ret;
-} /* end afs_kafsasyncd_start() */
-
-/*****************************************************************************/
-/*
- * stop the async daemon
- */
-void afs_kafsasyncd_stop(void)
-{
-       /* get rid of my daemon */
-       kafsasyncd_die = 1;
-       wake_up(&kafsasyncd_sleepq);
-       wait_for_completion(&kafsasyncd_dead);
-
-} /* end afs_kafsasyncd_stop() */
-
-/*****************************************************************************/
-/*
- * probing daemon
- */
-static int kafsasyncd(void *arg)
-{
-       struct afs_async_op *op;
-       int die;
-
-       DECLARE_WAITQUEUE(myself, current);
-
-       kafsasyncd_task = current;
-
-       printk("kAFS: Started kafsasyncd %d\n", current->pid);
-
-       daemonize("kafsasyncd");
-
-       complete(&kafsasyncd_alive);
-
-       /* loop around looking for things to attend to */
-       do {
-               set_current_state(TASK_INTERRUPTIBLE);
-               add_wait_queue(&kafsasyncd_sleepq, &myself);
-
-               for (;;) {
-                       if (!list_empty(&kafsasyncd_async_attnq) ||
-                           signal_pending(current) ||
-                           kafsasyncd_die)
-                               break;
-
-                       schedule();
-                       set_current_state(TASK_INTERRUPTIBLE);
-               }
-
-               remove_wait_queue(&kafsasyncd_sleepq, &myself);
-               set_current_state(TASK_RUNNING);
-
-               try_to_freeze();
-
-               /* discard pending signals */
-               afs_discard_my_signals();
-
-               die = kafsasyncd_die;
-
-               /* deal with the next asynchronous operation requiring
-                * attention */
-               if (!list_empty(&kafsasyncd_async_attnq)) {
-                       struct afs_async_op *op;
-
-                       _debug("@@@ Begin Asynchronous Operation");
-
-                       op = NULL;
-                       spin_lock(&kafsasyncd_async_lock);
-
-                       if (!list_empty(&kafsasyncd_async_attnq)) {
-                               op = list_entry(kafsasyncd_async_attnq.next,
-                                               struct afs_async_op, link);
-                               list_move_tail(&op->link,
-                                             &kafsasyncd_async_busyq);
-                       }
-
-                       spin_unlock(&kafsasyncd_async_lock);
-
-                       _debug("@@@ Operation %p {%p}\n",
-                              op, op ? op->ops : NULL);
-
-                       if (op)
-                               op->ops->attend(op);
-
-                       _debug("@@@ End Asynchronous Operation");
-               }
-
-       } while(!die);
-
-       /* need to kill all outstanding asynchronous operations before
-        * exiting */
-       kafsasyncd_task = NULL;
-       spin_lock(&kafsasyncd_async_lock);
-
-       /* fold the busy and attention queues together */
-       list_splice_init(&kafsasyncd_async_busyq,
-                        &kafsasyncd_async_attnq);
-
-       /* dequeue kafsasyncd from all their wait queues */
-       list_for_each_entry(op, &kafsasyncd_async_attnq, link) {
-               op->call->app_attn_func = kafsasyncd_null_call_attn_func;
-               op->call->app_error_func = kafsasyncd_null_call_error_func;
-               remove_wait_queue(&op->call->waitq, &op->waiter);
-       }
-
-       spin_unlock(&kafsasyncd_async_lock);
-
-       /* abort all the operations */
-       while (!list_empty(&kafsasyncd_async_attnq)) {
-               op = list_entry(kafsasyncd_async_attnq.next, struct afs_async_op, link);
-               list_del_init(&op->link);
-
-               rxrpc_call_abort(op->call, -EIO);
-               rxrpc_put_call(op->call);
-               op->call = NULL;
-
-               op->ops->discard(op);
-       }
-
-       /* and that's all */
-       _leave("");
-       complete_and_exit(&kafsasyncd_dead, 0);
-
-} /* end kafsasyncd() */
-
-/*****************************************************************************/
-/*
- * begin an operation
- * - place operation on busy queue
- */
-void afs_kafsasyncd_begin_op(struct afs_async_op *op)
-{
-       _enter("");
-
-       spin_lock(&kafsasyncd_async_lock);
-
-       init_waitqueue_entry(&op->waiter, kafsasyncd_task);
-       add_wait_queue(&op->call->waitq, &op->waiter);
-
-       list_move_tail(&op->link, &kafsasyncd_async_busyq);
-
-       spin_unlock(&kafsasyncd_async_lock);
-
-       _leave("");
-} /* end afs_kafsasyncd_begin_op() */
-
-/*****************************************************************************/
-/*
- * request attention for an operation
- * - move to attention queue
- */
-void afs_kafsasyncd_attend_op(struct afs_async_op *op)
-{
-       _enter("");
-
-       spin_lock(&kafsasyncd_async_lock);
-
-       list_move_tail(&op->link, &kafsasyncd_async_attnq);
-
-       spin_unlock(&kafsasyncd_async_lock);
-
-       wake_up(&kafsasyncd_sleepq);
-
-       _leave("");
-} /* end afs_kafsasyncd_attend_op() */
-
-/*****************************************************************************/
-/*
- * terminate an operation
- * - remove from either queue
- */
-void afs_kafsasyncd_terminate_op(struct afs_async_op *op)
-{
-       _enter("");
-
-       spin_lock(&kafsasyncd_async_lock);
-
-       if (!list_empty(&op->link)) {
-               list_del_init(&op->link);
-               remove_wait_queue(&op->call->waitq, &op->waiter);
-       }
-
-       spin_unlock(&kafsasyncd_async_lock);
-
-       wake_up(&kafsasyncd_sleepq);
-
-       _leave("");
-} /* end afs_kafsasyncd_terminate_op() */
diff --git a/fs/afs/kafsasyncd.h b/fs/afs/kafsasyncd.h
deleted file mode 100644 (file)
index 791803f..0000000
+++ /dev/null
@@ -1,52 +0,0 @@
-/* kafsasyncd.h: AFS asynchronous operation daemon
- *
- * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#ifndef _LINUX_AFS_KAFSASYNCD_H
-#define _LINUX_AFS_KAFSASYNCD_H
-
-#include "types.h"
-
-struct afs_async_op;
-
-struct afs_async_op_ops {
-       void (*attend)(struct afs_async_op *op);
-       void (*discard)(struct afs_async_op *op);
-};
-
-/*****************************************************************************/
-/*
- * asynchronous operation record
- */
-struct afs_async_op
-{
-       struct list_head                link;
-       struct afs_server               *server;        /* server being contacted */
-       struct rxrpc_call               *call;          /* RxRPC call performing op */
-       wait_queue_t                    waiter;         /* wait queue for kafsasyncd */
-       const struct afs_async_op_ops   *ops;           /* operations */
-};
-
-static inline void afs_async_op_init(struct afs_async_op *op,
-                                    const struct afs_async_op_ops *ops)
-{
-       INIT_LIST_HEAD(&op->link);
-       op->call = NULL;
-       op->ops = ops;
-}
-
-extern int afs_kafsasyncd_start(void);
-extern void afs_kafsasyncd_stop(void);
-
-extern void afs_kafsasyncd_begin_op(struct afs_async_op *op);
-extern void afs_kafsasyncd_attend_op(struct afs_async_op *op);
-extern void afs_kafsasyncd_terminate_op(struct afs_async_op *op);
-
-#endif /* _LINUX_AFS_KAFSASYNCD_H */
diff --git a/fs/afs/kafstimod.c b/fs/afs/kafstimod.c
deleted file mode 100644 (file)
index 694344e..0000000
+++ /dev/null
@@ -1,205 +0,0 @@
-/* kafstimod.c: AFS timeout daemon
- *
- * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/completion.h>
-#include <linux/freezer.h>
-#include "cell.h"
-#include "volume.h"
-#include "kafstimod.h"
-#include <asm/errno.h>
-#include "internal.h"
-
-static DECLARE_COMPLETION(kafstimod_alive);
-static DECLARE_COMPLETION(kafstimod_dead);
-static DECLARE_WAIT_QUEUE_HEAD(kafstimod_sleepq);
-static int kafstimod_die;
-
-static LIST_HEAD(kafstimod_list);
-static DEFINE_SPINLOCK(kafstimod_lock);
-
-static int kafstimod(void *arg);
-
-/*****************************************************************************/
-/*
- * start the timeout daemon
- */
-int afs_kafstimod_start(void)
-{
-       int ret;
-
-       ret = kernel_thread(kafstimod, NULL, 0);
-       if (ret < 0)
-               return ret;
-
-       wait_for_completion(&kafstimod_alive);
-
-       return ret;
-} /* end afs_kafstimod_start() */
-
-/*****************************************************************************/
-/*
- * stop the timeout daemon
- */
-void afs_kafstimod_stop(void)
-{
-       /* get rid of my daemon */
-       kafstimod_die = 1;
-       wake_up(&kafstimod_sleepq);
-       wait_for_completion(&kafstimod_dead);
-
-} /* end afs_kafstimod_stop() */
-
-/*****************************************************************************/
-/*
- * timeout processing daemon
- */
-static int kafstimod(void *arg)
-{
-       struct afs_timer *timer;
-
-       DECLARE_WAITQUEUE(myself, current);
-
-       printk("kAFS: Started kafstimod %d\n", current->pid);
-
-       daemonize("kafstimod");
-
-       complete(&kafstimod_alive);
-
-       /* loop around looking for things to attend to */
- loop:
-       set_current_state(TASK_INTERRUPTIBLE);
-       add_wait_queue(&kafstimod_sleepq, &myself);
-
-       for (;;) {
-               unsigned long jif;
-               signed long timeout;
-
-               /* deal with the server being asked to die */
-               if (kafstimod_die) {
-                       remove_wait_queue(&kafstimod_sleepq, &myself);
-                       _leave("");
-                       complete_and_exit(&kafstimod_dead, 0);
-               }
-
-               try_to_freeze();
-
-               /* discard pending signals */
-               afs_discard_my_signals();
-
-               /* work out the time to elapse before the next event */
-               spin_lock(&kafstimod_lock);
-               if (list_empty(&kafstimod_list)) {
-                       timeout = MAX_SCHEDULE_TIMEOUT;
-               }
-               else {
-                       timer = list_entry(kafstimod_list.next,
-                                          struct afs_timer, link);
-                       timeout = timer->timo_jif;
-                       jif = jiffies;
-
-                       if (time_before_eq((unsigned long) timeout, jif))
-                               goto immediate;
-
-                       else {
-                               timeout = (long) timeout - (long) jiffies;
-                       }
-               }
-               spin_unlock(&kafstimod_lock);
-
-               schedule_timeout(timeout);
-
-               set_current_state(TASK_INTERRUPTIBLE);
-       }
-
-       /* the thing on the front of the queue needs processing
-        * - we come here with the lock held and timer pointing to the expired
-        *   entry
-        */
- immediate:
-       remove_wait_queue(&kafstimod_sleepq, &myself);
-       set_current_state(TASK_RUNNING);
-
-       _debug("@@@ Begin Timeout of %p", timer);
-
-       /* dequeue the timer */
-       list_del_init(&timer->link);
-       spin_unlock(&kafstimod_lock);
-
-       /* call the timeout function */
-       timer->ops->timed_out(timer);
-
-       _debug("@@@ End Timeout");
-       goto loop;
-
-} /* end kafstimod() */
-
-/*****************************************************************************/
-/*
- * (re-)queue a timer
- */
-void afs_kafstimod_add_timer(struct afs_timer *timer, unsigned long timeout)
-{
-       struct afs_timer *ptimer;
-       struct list_head *_p;
-
-       _enter("%p,%lu", timer, timeout);
-
-       spin_lock(&kafstimod_lock);
-
-       list_del(&timer->link);
-
-       /* the timer was deferred or reset - put it back in the queue at the
-        * right place */
-       timer->timo_jif = jiffies + timeout;
-
-       list_for_each(_p, &kafstimod_list) {
-               ptimer = list_entry(_p, struct afs_timer, link);
-               if (time_before(timer->timo_jif, ptimer->timo_jif))
-                       break;
-       }
-
-       list_add_tail(&timer->link, _p); /* insert before stopping point */
-
-       spin_unlock(&kafstimod_lock);
-
-       wake_up(&kafstimod_sleepq);
-
-       _leave("");
-} /* end afs_kafstimod_add_timer() */
-
-/*****************************************************************************/
-/*
- * dequeue a timer
- * - returns 0 if the timer was deleted or -ENOENT if it wasn't queued
- */
-int afs_kafstimod_del_timer(struct afs_timer *timer)
-{
-       int ret = 0;
-
-       _enter("%p", timer);
-
-       spin_lock(&kafstimod_lock);
-
-       if (list_empty(&timer->link))
-               ret = -ENOENT;
-       else
-               list_del_init(&timer->link);
-
-       spin_unlock(&kafstimod_lock);
-
-       wake_up(&kafstimod_sleepq);
-
-       _leave(" = %d", ret);
-       return ret;
-} /* end afs_kafstimod_del_timer() */
diff --git a/fs/afs/kafstimod.h b/fs/afs/kafstimod.h
deleted file mode 100644 (file)
index e312f1a..0000000
+++ /dev/null
@@ -1,49 +0,0 @@
-/* kafstimod.h: AFS timeout daemon
- *
- * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#ifndef _LINUX_AFS_KAFSTIMOD_H
-#define _LINUX_AFS_KAFSTIMOD_H
-
-#include "types.h"
-
-struct afs_timer;
-
-struct afs_timer_ops {
-       /* called when the front of the timer queue has timed out */
-       void (*timed_out)(struct afs_timer *timer);
-};
-
-/*****************************************************************************/
-/*
- * AFS timer/timeout record
- */
-struct afs_timer
-{
-       struct list_head                link;           /* link in timer queue */
-       unsigned long                   timo_jif;       /* timeout time */
-       const struct afs_timer_ops      *ops;           /* timeout expiry function */
-};
-
-static inline void afs_timer_init(struct afs_timer *timer,
-                                 const struct afs_timer_ops *ops)
-{
-       INIT_LIST_HEAD(&timer->link);
-       timer->ops = ops;
-}
-
-extern int afs_kafstimod_start(void);
-extern void afs_kafstimod_stop(void);
-
-extern void afs_kafstimod_add_timer(struct afs_timer *timer,
-                                   unsigned long timeout);
-extern int afs_kafstimod_del_timer(struct afs_timer *timer);
-
-#endif /* _LINUX_AFS_KAFSTIMOD_H */
index f2704ba..40c2704 100644 (file)
@@ -1,4 +1,4 @@
-/* main.c: AFS client file system
+/* AFS client file system
  *
  * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
  * Written by David Howells (dhowells@redhat.com)
 #include <linux/moduleparam.h>
 #include <linux/init.h>
 #include <linux/completion.h>
-#include <rxrpc/rxrpc.h>
-#include <rxrpc/transport.h>
-#include <rxrpc/call.h>
-#include <rxrpc/peer.h>
-#include "cache.h"
-#include "cell.h"
-#include "server.h"
-#include "fsclient.h"
-#include "cmservice.h"
-#include "kafstimod.h"
-#include "kafsasyncd.h"
 #include "internal.h"
 
-struct rxrpc_transport *afs_transport;
-
-static int afs_adding_peer(struct rxrpc_peer *peer);
-static void afs_discarding_peer(struct rxrpc_peer *peer);
-
-
 MODULE_DESCRIPTION("AFS Client File System");
 MODULE_AUTHOR("Red Hat, Inc.");
 MODULE_LICENSE("GPL");
 
+unsigned afs_debug;
+module_param_named(debug, afs_debug, uint, S_IWUSR | S_IRUGO);
+MODULE_PARM_DESC(afs_debug, "AFS debugging mask");
+
 static char *rootcell;
 
 module_param(rootcell, charp, 0);
 MODULE_PARM_DESC(rootcell, "root AFS cell name and VL server IP addr list");
 
-
-static struct rxrpc_peer_ops afs_peer_ops = {
-       .adding         = afs_adding_peer,
-       .discarding     = afs_discarding_peer,
-};
-
-struct list_head afs_cb_hash_tbl[AFS_CB_HASH_COUNT];
-DEFINE_SPINLOCK(afs_cb_hash_lock);
-
 #ifdef AFS_CACHING_SUPPORT
 static struct cachefs_netfs_operations afs_cache_ops = {
        .get_page_cookie        = afs_cache_get_page_cookie,
@@ -62,20 +40,63 @@ struct cachefs_netfs afs_cache_netfs = {
 };
 #endif
 
-/*****************************************************************************/
+struct afs_uuid afs_uuid;
+
+/*
+ * get a client UUID
+ */
+static int __init afs_get_client_UUID(void)
+{
+       struct timespec ts;
+       u64 uuidtime;
+       u16 clockseq;
+       int ret;
+
+       /* read the MAC address of one of the external interfaces and construct
+        * a UUID from it */
+       ret = afs_get_MAC_address(afs_uuid.node);
+       if (ret < 0)
+               return ret;
+
+       getnstimeofday(&ts);
+       uuidtime = (u64) ts.tv_sec * 1000 * 1000 * 10;
+       uuidtime += ts.tv_nsec / 100;
+       uuidtime += AFS_UUID_TO_UNIX_TIME;
+       afs_uuid.time_low = uuidtime;
+       afs_uuid.time_mid = uuidtime >> 32;
+       afs_uuid.time_hi_and_version = (uuidtime >> 48) & AFS_UUID_TIMEHI_MASK;
+       afs_uuid.time_hi_and_version = AFS_UUID_VERSION_TIME;
+
+       get_random_bytes(&clockseq, 2);
+       afs_uuid.clock_seq_low = clockseq;
+       afs_uuid.clock_seq_hi_and_reserved =
+               (clockseq >> 8) & AFS_UUID_CLOCKHI_MASK;
+       afs_uuid.clock_seq_hi_and_reserved = AFS_UUID_VARIANT_STD;
+
+       _debug("AFS UUID: %08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x",
+              afs_uuid.time_low,
+              afs_uuid.time_mid,
+              afs_uuid.time_hi_and_version,
+              afs_uuid.clock_seq_hi_and_reserved,
+              afs_uuid.clock_seq_low,
+              afs_uuid.node[0], afs_uuid.node[1], afs_uuid.node[2],
+              afs_uuid.node[3], afs_uuid.node[4], afs_uuid.node[5]);
+
+       return 0;
+}
+
 /*
  * initialise the AFS client FS module
  */
 static int __init afs_init(void)
 {
-       int loop, ret;
+       int ret;
 
        printk(KERN_INFO "kAFS: Red Hat AFS client v0.1 registering.\n");
 
-       /* initialise the callback hash table */
-       spin_lock_init(&afs_cb_hash_lock);
-       for (loop = AFS_CB_HASH_COUNT - 1; loop >= 0; loop--)
-               INIT_LIST_HEAD(&afs_cb_hash_tbl[loop]);
+       ret = afs_get_client_UUID();
+       if (ret < 0)
+               return ret;
 
        /* register the /proc stuff */
        ret = afs_proc_init();
@@ -86,12 +107,6 @@ static int __init afs_init(void)
        /* we want to be able to cache */
        ret = cachefs_register_netfs(&afs_cache_netfs,
                                     &afs_cache_cell_index_def);
-       if (ret < 0)
-               goto error;
-#endif
-
-#ifdef CONFIG_KEYS_TURNED_OFF
-       ret = afs_key_register();
        if (ret < 0)
                goto error_cache;
 #endif
@@ -99,58 +114,50 @@ static int __init afs_init(void)
        /* initialise the cell DB */
        ret = afs_cell_init(rootcell);
        if (ret < 0)
-               goto error_keys;
+               goto error_cell_init;
 
-       /* start the timeout daemon */
-       ret = afs_kafstimod_start();
+       /* initialise the VL update process */
+       ret = afs_vlocation_update_init();
        if (ret < 0)
-               goto error_keys;
+               goto error_vl_update_init;
 
-       /* start the async operation daemon */
-       ret = afs_kafsasyncd_start();
-       if (ret < 0)
-               goto error_kafstimod;
+       /* initialise the callback update process */
+       ret = afs_callback_update_init();
 
        /* create the RxRPC transport */
-       ret = rxrpc_create_transport(7001, &afs_transport);
+       ret = afs_open_socket();
        if (ret < 0)
-               goto error_kafsasyncd;
-
-       afs_transport->peer_ops = &afs_peer_ops;
+               goto error_open_socket;
 
        /* register the filesystems */
        ret = afs_fs_init();
        if (ret < 0)
-               goto error_transport;
+               goto error_fs;
 
        return ret;
 
- error_transport:
-       rxrpc_put_transport(afs_transport);
- error_kafsasyncd:
-       afs_kafsasyncd_stop();
- error_kafstimod:
-       afs_kafstimod_stop();
- error_keys:
-#ifdef CONFIG_KEYS_TURNED_OFF
-       afs_key_unregister();
- error_cache:
-#endif
+error_fs:
+       afs_close_socket();
+error_open_socket:
+error_vl_update_init:
+error_cell_init:
 #ifdef AFS_CACHING_SUPPORT
        cachefs_unregister_netfs(&afs_cache_netfs);
- error:
+error_cache:
 #endif
+       afs_callback_update_kill();
+       afs_vlocation_purge();
        afs_cell_purge();
        afs_proc_cleanup();
        printk(KERN_ERR "kAFS: failed to register: %d\n", ret);
        return ret;
-} /* end afs_init() */
+}
 
 /* XXX late_initcall is kludgy, but the only alternative seems to create
  * a transport upon the first mount, which is worse. Or is it?
  */
 late_initcall(afs_init);       /* must be called after net/ to create socket */
-/*****************************************************************************/
+
 /*
  * clean up on module removal
  */
@@ -159,127 +166,16 @@ static void __exit afs_exit(void)
        printk(KERN_INFO "kAFS: Red Hat AFS client v0.1 unregistering.\n");
 
        afs_fs_exit();
-       rxrpc_put_transport(afs_transport);
-       afs_kafstimod_stop();
-       afs_kafsasyncd_stop();
+       afs_close_socket();
+       afs_purge_servers();
+       afs_callback_update_kill();
+       afs_vlocation_purge();
+       flush_scheduled_work();
        afs_cell_purge();
-#ifdef CONFIG_KEYS_TURNED_OFF
-       afs_key_unregister();
-#endif
 #ifdef AFS_CACHING_SUPPORT
        cachefs_unregister_netfs(&afs_cache_netfs);
 #endif
        afs_proc_cleanup();
-
-} /* end afs_exit() */
-
-module_exit(afs_exit);
-
-/*****************************************************************************/
-/*
- * notification that new peer record is being added
- * - called from krxsecd
- * - return an error to induce an abort
- * - mustn't sleep (caller holds an rwlock)
- */
-static int afs_adding_peer(struct rxrpc_peer *peer)
-{
-       struct afs_server *server;
-       int ret;
-
-       _debug("kAFS: Adding new peer %08x\n", ntohl(peer->addr.s_addr));
-
-       /* determine which server the peer resides in (if any) */
-       ret = afs_server_find_by_peer(peer, &server);
-       if (ret < 0)
-               return ret; /* none that we recognise, so abort */
-
-       _debug("Server %p{u=%d}\n", server, atomic_read(&server->usage));
-
-       _debug("Cell %p{u=%d}\n",
-              server->cell, atomic_read(&server->cell->usage));
-
-       /* cross-point the structs under a global lock */
-       spin_lock(&afs_server_peer_lock);
-       peer->user = server;
-       server->peer = peer;
-       spin_unlock(&afs_server_peer_lock);
-
-       afs_put_server(server);
-
-       return 0;
-} /* end afs_adding_peer() */
-
-/*****************************************************************************/
-/*
- * notification that a peer record is being discarded
- * - called from krxiod or krxsecd
- */
-static void afs_discarding_peer(struct rxrpc_peer *peer)
-{
-       struct afs_server *server;
-
-       _enter("%p",peer);
-
-       _debug("Discarding peer %08x (rtt=%lu.%lumS)\n",
-              ntohl(peer->addr.s_addr),
-              (long) (peer->rtt / 1000),
-              (long) (peer->rtt % 1000));
-
-       /* uncross-point the structs under a global lock */
-       spin_lock(&afs_server_peer_lock);
-       server = peer->user;
-       if (server) {
-               peer->user = NULL;
-               server->peer = NULL;
-       }
-       spin_unlock(&afs_server_peer_lock);
-
-       _leave("");
-
-} /* end afs_discarding_peer() */
-
-/*****************************************************************************/
-/*
- * clear the dead space between task_struct and kernel stack
- * - called by supplying -finstrument-functions to gcc
- */
-#if 0
-void __cyg_profile_func_enter (void *this_fn, void *call_site)
-__attribute__((no_instrument_function));
-
-void __cyg_profile_func_enter (void *this_fn, void *call_site)
-{
-       asm volatile("  movl    %%esp,%%edi     \n"
-                    "  andl    %0,%%edi        \n"
-                    "  addl    %1,%%edi        \n"
-                    "  movl    %%esp,%%ecx     \n"
-                    "  subl    %%edi,%%ecx     \n"
-                    "  shrl    $2,%%ecx        \n"
-                    "  movl    $0xedededed,%%eax     \n"
-                    "  rep stosl               \n"
-                    :
-                    : "i"(~(THREAD_SIZE - 1)), "i"(sizeof(struct thread_info))
-                    : "eax", "ecx", "edi", "memory", "cc"
-                    );
 }
 
-void __cyg_profile_func_exit(void *this_fn, void *call_site)
-__attribute__((no_instrument_function));
-
-void __cyg_profile_func_exit(void *this_fn, void *call_site)
-{
-       asm volatile("  movl    %%esp,%%edi     \n"
-                    "  andl    %0,%%edi        \n"
-                    "  addl    %1,%%edi        \n"
-                    "  movl    %%esp,%%ecx     \n"
-                    "  subl    %%edi,%%ecx     \n"
-                    "  shrl    $2,%%ecx        \n"
-                    "  movl    $0xdadadada,%%eax     \n"
-                    "  rep stosl               \n"
-                    :
-                    : "i"(~(THREAD_SIZE - 1)), "i"(sizeof(struct thread_info))
-                    : "eax", "ecx", "edi", "memory", "cc"
-                    );
-}
-#endif
+module_exit(afs_exit);
index e4fce66..cdb9792 100644 (file)
@@ -1,6 +1,6 @@
-/* misc.c: miscellaneous bits
+/* miscellaneous bits
  *
- * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2002, 2007 Red Hat, Inc. All Rights Reserved.
  * Written by David Howells (dhowells@redhat.com)
  *
  * This program is free software; you can redistribute it and/or
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/errno.h>
-#include "errors.h"
 #include "internal.h"
+#include "afs_fs.h"
 
-/*****************************************************************************/
 /*
  * convert an AFS abort code to a Linux error number
  */
-int afs_abort_to_error(int abortcode)
+int afs_abort_to_error(u32 abort_code)
 {
-       switch (abortcode) {
+       switch (abort_code) {
+       case 13:                return -EACCES;
+       case 30:                return -EROFS;
        case VSALVAGE:          return -EIO;
        case VNOVNODE:          return -ENOENT;
-       case VNOVOL:            return -ENXIO;
+       case VNOVOL:            return -ENOMEDIUM;
        case VVOLEXISTS:        return -EEXIST;
        case VNOSERVICE:        return -EIO;
        case VOFFLINE:          return -ENOENT;
@@ -33,7 +34,24 @@ int afs_abort_to_error(int abortcode)
        case VOVERQUOTA:        return -EDQUOT;
        case VBUSY:             return -EBUSY;
        case VMOVED:            return -ENXIO;
-       default:                return -EIO;
+       case 0x2f6df0c:         return -EACCES;
+       case 0x2f6df0f:         return -EBUSY;
+       case 0x2f6df10:         return -EEXIST;
+       case 0x2f6df11:         return -EXDEV;
+       case 0x2f6df13:         return -ENOTDIR;
+       case 0x2f6df14:         return -EISDIR;
+       case 0x2f6df15:         return -EINVAL;
+       case 0x2f6df1a:         return -EFBIG;
+       case 0x2f6df1b:         return -ENOSPC;
+       case 0x2f6df1d:         return -EROFS;
+       case 0x2f6df1e:         return -EMLINK;
+       case 0x2f6df20:         return -EDOM;
+       case 0x2f6df21:         return -ERANGE;
+       case 0x2f6df22:         return -EDEADLK;
+       case 0x2f6df23:         return -ENAMETOOLONG;
+       case 0x2f6df24:         return -ENOLCK;
+       case 0x2f6df26:         return -ENOTEMPTY;
+       case 0x2f6df78:         return -EDQUOT;
+       default:                return -EREMOTEIO;
        }
-
-} /* end afs_abort_to_error() */
+}
index 68495f0..b905ae3 100644 (file)
@@ -1,4 +1,4 @@
-/* mntpt.c: mountpoint management
+/* mountpoint management
  *
  * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
  * Written by David Howells (dhowells@redhat.com)
 #include <linux/mount.h>
 #include <linux/namei.h>
 #include <linux/mnt_namespace.h>
-#include "super.h"
-#include "cell.h"
-#include "volume.h"
-#include "vnode.h"
 #include "internal.h"
 
 
@@ -30,6 +26,7 @@ static struct dentry *afs_mntpt_lookup(struct inode *dir,
                                       struct nameidata *nd);
 static int afs_mntpt_open(struct inode *inode, struct file *file);
 static void *afs_mntpt_follow_link(struct dentry *dentry, struct nameidata *nd);
+static void afs_mntpt_expiry_timed_out(struct work_struct *work);
 
 const struct file_operations afs_mntpt_file_operations = {
        .open           = afs_mntpt_open,
@@ -43,24 +40,19 @@ const struct inode_operations afs_mntpt_inode_operations = {
 };
 
 static LIST_HEAD(afs_vfsmounts);
+static DECLARE_DELAYED_WORK(afs_mntpt_expiry_timer, afs_mntpt_expiry_timed_out);
 
-static void afs_mntpt_expiry_timed_out(struct afs_timer *timer);
+unsigned long afs_mntpt_expiry_timeout = 10 * 60;
 
-struct afs_timer_ops afs_mntpt_expiry_timer_ops = {
-       .timed_out      = afs_mntpt_expiry_timed_out,
-};
-
-struct afs_timer afs_mntpt_expiry_timer;
-
-unsigned long afs_mntpt_expiry_timeout = 20;
-
-/*****************************************************************************/
 /*
  * check a symbolic link to see whether it actually encodes a mountpoint
  * - sets the AFS_VNODE_MOUNTPOINT flag on the vnode appropriately
  */
-int afs_mntpt_check_symlink(struct afs_vnode *vnode)
+int afs_mntpt_check_symlink(struct afs_vnode *vnode, struct key *key)
 {
+       struct file file = {
+               .private_data = key,
+       };
        struct page *page;
        size_t size;
        char *buf;
@@ -69,7 +61,7 @@ int afs_mntpt_check_symlink(struct afs_vnode *vnode)
        _enter("{%u,%u}", vnode->fid.vnode, vnode->fid.unique);
 
        /* read the contents of the symlink into the pagecache */
-       page = read_mapping_page(AFS_VNODE_TO_I(vnode)->i_mapping, 0, NULL);
+       page = read_mapping_page(AFS_VNODE_TO_I(vnode)->i_mapping, 0, &file);
        if (IS_ERR(page)) {
                ret = PTR_ERR(page);
                goto out;
@@ -85,7 +77,7 @@ int afs_mntpt_check_symlink(struct afs_vnode *vnode)
 
        /* examine the symlink's contents */
        size = vnode->status.size;
-       _debug("symlink to %*.*s", size, (int) size, buf);
+       _debug("symlink to %*.*s", (int) size, (int) size, buf);
 
        if (size > 2 &&
            (buf[0] == '%' || buf[0] == '#') &&
@@ -93,22 +85,20 @@ int afs_mntpt_check_symlink(struct afs_vnode *vnode)
            ) {
                _debug("symlink is a mountpoint");
                spin_lock(&vnode->lock);
-               vnode->flags |= AFS_VNODE_MOUNTPOINT;
+               set_bit(AFS_VNODE_MOUNTPOINT, &vnode->flags);
                spin_unlock(&vnode->lock);
        }
 
        ret = 0;
 
- out_free:
+out_free:
        kunmap(page);
        page_cache_release(page);
- out:
+out:
        _leave(" = %d", ret);
        return ret;
+}
 
-} /* end afs_mntpt_check_symlink() */
-
-/*****************************************************************************/
 /*
  * no valid lookup procedure on this sort of dir
  */
@@ -116,7 +106,7 @@ static struct dentry *afs_mntpt_lookup(struct inode *dir,
                                       struct dentry *dentry,
                                       struct nameidata *nd)
 {
-       kenter("%p,%p{%p{%s},%s}",
+       _enter("%p,%p{%p{%s},%s}",
               dir,
               dentry,
               dentry->d_parent,
@@ -125,15 +115,14 @@ static struct dentry *afs_mntpt_lookup(struct inode *dir,
               dentry->d_name.name);
 
        return ERR_PTR(-EREMOTE);
-} /* end afs_mntpt_lookup() */
+}
 
-/*****************************************************************************/
 /*
  * no valid open procedure on this sort of dir
  */
 static int afs_mntpt_open(struct inode *inode, struct file *file)
 {
-       kenter("%p,%p{%p{%s},%s}",
+       _enter("%p,%p{%p{%s},%s}",
               inode, file,
               file->f_path.dentry->d_parent,
               file->f_path.dentry->d_parent ?
@@ -142,9 +131,8 @@ static int afs_mntpt_open(struct inode *inode, struct file *file)
               file->f_path.dentry->d_name.name);
 
        return -EREMOTE;
-} /* end afs_mntpt_open() */
+}
 
-/*****************************************************************************/
 /*
  * create a vfsmount to be automounted
  */
@@ -157,7 +145,7 @@ static struct vfsmount *afs_mntpt_do_automount(struct dentry *mntpt)
        char *buf, *devname = NULL, *options = NULL;
        int ret;
 
-       kenter("{%s}", mntpt->d_name.name);
+       _enter("{%s}", mntpt->d_name.name);
 
        BUG_ON(!mntpt->d_inode);
 
@@ -201,79 +189,108 @@ static struct vfsmount *afs_mntpt_do_automount(struct dentry *mntpt)
                strcat(options, ",rwpath");
 
        /* try and do the mount */
-       kdebug("--- attempting mount %s -o %s ---", devname, options);
+       _debug("--- attempting mount %s -o %s ---", devname, options);
        mnt = vfs_kern_mount(&afs_fs_type, 0, devname, options);
-       kdebug("--- mount result %p ---", mnt);
+       _debug("--- mount result %p ---", mnt);
 
        free_page((unsigned long) devname);
        free_page((unsigned long) options);
-       kleave(" = %p", mnt);
+       _leave(" = %p", mnt);
        return mnt;
 
- error:
+error:
        if (page)
                page_cache_release(page);
        if (devname)
                free_page((unsigned long) devname);
        if (options)
                free_page((unsigned long) options);
-       kleave(" = %d", ret);
+       _leave(" = %d", ret);
        return ERR_PTR(ret);
-} /* end afs_mntpt_do_automount() */
+}
 
-/*****************************************************************************/
 /*
  * follow a link from a mountpoint directory, thus causing it to be mounted
  */
 static void *afs_mntpt_follow_link(struct dentry *dentry, struct nameidata *nd)
 {
        struct vfsmount *newmnt;
-       struct dentry *old_dentry;
        int err;
 
-       kenter("%p{%s},{%s:%p{%s}}",
+       _enter("%p{%s},{%s:%p{%s},}",
               dentry,
               dentry->d_name.name,
               nd->mnt->mnt_devname,
               dentry,
               nd->dentry->d_name.name);
 
-       newmnt = afs_mntpt_do_automount(dentry);
+       dput(nd->dentry);
+       nd->dentry = dget(dentry);
+
+       newmnt = afs_mntpt_do_automount(nd->dentry);
        if (IS_ERR(newmnt)) {
                path_release(nd);
                return (void *)newmnt;
        }
 
-       old_dentry = nd->dentry;
-       nd->dentry = dentry;
-       err = do_add_mount(newmnt, nd, 0, &afs_vfsmounts);
-       nd->dentry = old_dentry;
-
-       path_release(nd);
-
-       if (!err) {
-               mntget(newmnt);
+       mntget(newmnt);
+       err = do_add_mount(newmnt, nd, MNT_SHRINKABLE, &afs_vfsmounts);
+       switch (err) {
+       case 0:
+               mntput(nd->mnt);
+               dput(nd->dentry);
                nd->mnt = newmnt;
-               dget(newmnt->mnt_root);
-               nd->dentry = newmnt->mnt_root;
+               nd->dentry = dget(newmnt->mnt_root);
+               schedule_delayed_work(&afs_mntpt_expiry_timer,
+                                     afs_mntpt_expiry_timeout * HZ);
+               break;
+       case -EBUSY:
+               /* someone else made a mount here whilst we were busy */
+               while (d_mountpoint(nd->dentry) &&
+                      follow_down(&nd->mnt, &nd->dentry))
+                       ;
+               err = 0;
+       default:
+               mntput(newmnt);
+               break;
        }
 
-       kleave(" = %d", err);
+       _leave(" = %d", err);
        return ERR_PTR(err);
-} /* end afs_mntpt_follow_link() */
+}
 
-/*****************************************************************************/
 /*
  * handle mountpoint expiry timer going off
  */
-static void afs_mntpt_expiry_timed_out(struct afs_timer *timer)
+static void afs_mntpt_expiry_timed_out(struct work_struct *work)
 {
-       kenter("");
+       _enter("");
 
-       mark_mounts_for_expiry(&afs_vfsmounts);
+       if (!list_empty(&afs_vfsmounts)) {
+               mark_mounts_for_expiry(&afs_vfsmounts);
+               schedule_delayed_work(&afs_mntpt_expiry_timer,
+                                     afs_mntpt_expiry_timeout * HZ);
+       }
+
+       _leave("");
+}
 
-       afs_kafstimod_add_timer(&afs_mntpt_expiry_timer,
-                               afs_mntpt_expiry_timeout * HZ);
+/*
+ * kill the AFS mountpoint timer if it's still running
+ */
+void afs_mntpt_kill_timer(void)
+{
+       _enter("");
 
-       kleave("");
-} /* end afs_mntpt_expiry_timed_out() */
+       ASSERT(list_empty(&afs_vfsmounts));
+       cancel_delayed_work(&afs_mntpt_expiry_timer);
+       flush_scheduled_work();
+}
+
+/*
+ * begin unmount by attempting to remove all automounted mountpoints we added
+ */
+void afs_umount_begin(struct vfsmount *vfsmnt, int flags)
+{
+       shrink_submounts(vfsmnt, &afs_vfsmounts);
+}
diff --git a/fs/afs/mount.h b/fs/afs/mount.h
deleted file mode 100644 (file)
index 9d2f46e..0000000
+++ /dev/null
@@ -1,23 +0,0 @@
-/* mount.h: mount parameters
- *
- * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#ifndef _LINUX_AFS_MOUNT_H
-#define _LINUX_AFS_MOUNT_H
-
-struct afs_mountdata {
-       const char              *volume;        /* name of volume */
-       const char              *cell;          /* name of cell containing volume */
-       const char              *cache;         /* name of cache block device */
-       size_t                  nservers;       /* number of server addresses listed */
-       uint32_t                servers[10];    /* IP addresses of servers in this cell */
-};
-
-#endif /* _LINUX_AFS_MOUNT_H */
index ae6b85b..d5601f6 100644 (file)
@@ -1,4 +1,4 @@
-/* proc.c: /proc interface for AFS
+/* /proc interface for AFS
  *
  * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
  * Written by David Howells (dhowells@redhat.com)
@@ -13,8 +13,6 @@
 #include <linux/module.h>
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
-#include "cell.h"
-#include "volume.h"
 #include <asm/uaccess.h>
 #include "internal.h"
 
@@ -130,7 +128,6 @@ static const struct file_operations afs_proc_cell_servers_fops = {
        .release        = afs_proc_cell_servers_release,
 };
 
-/*****************************************************************************/
 /*
  * initialise the /proc/fs/afs/ directory
  */
@@ -142,47 +139,43 @@ int afs_proc_init(void)
 
        proc_afs = proc_mkdir("fs/afs", NULL);
        if (!proc_afs)
-               goto error;
+               goto error_dir;
        proc_afs->owner = THIS_MODULE;
 
        p = create_proc_entry("cells", 0, proc_afs);
        if (!p)
-               goto error_proc;
+               goto error_cells;
        p->proc_fops = &afs_proc_cells_fops;
        p->owner = THIS_MODULE;
 
        p = create_proc_entry("rootcell", 0, proc_afs);
        if (!p)
-               goto error_cells;
+               goto error_rootcell;
        p->proc_fops = &afs_proc_rootcell_fops;
        p->owner = THIS_MODULE;
 
        _leave(" = 0");
        return 0;
 
- error_cells:
+error_rootcell:
        remove_proc_entry("cells", proc_afs);
- error_proc:
+error_cells:
        remove_proc_entry("fs/afs", NULL);
- error:
+error_dir:
        _leave(" = -ENOMEM");
        return -ENOMEM;
+}
 
-} /* end afs_proc_init() */
-
-/*****************************************************************************/
 /*
  * clean up the /proc/fs/afs/ directory
  */
 void afs_proc_cleanup(void)
 {
+       remove_proc_entry("rootcell", proc_afs);
        remove_proc_entry("cells", proc_afs);
-
        remove_proc_entry("fs/afs", NULL);
+}
 
-} /* end afs_proc_cleanup() */
-
-/*****************************************************************************/
 /*
  * open "/proc/fs/afs/cells" which provides a summary of extant cells
  */
@@ -199,9 +192,8 @@ static int afs_proc_cells_open(struct inode *inode, struct file *file)
        m->private = PDE(inode)->data;
 
        return 0;
-} /* end afs_proc_cells_open() */
+}
 
-/*****************************************************************************/
 /*
  * set up the iterator to start reading from the cells list and return the
  * first item
@@ -225,9 +217,8 @@ static void *afs_proc_cells_start(struct seq_file *m, loff_t *_pos)
                        break;
 
        return _p != &afs_proc_cells ? _p : NULL;
-} /* end afs_proc_cells_start() */
+}
 
-/*****************************************************************************/
 /*
  * move to next cell in cells list
  */
@@ -241,19 +232,16 @@ static void *afs_proc_cells_next(struct seq_file *p, void *v, loff_t *pos)
        _p = v == (void *) 1 ? afs_proc_cells.next : _p->next;
 
        return _p != &afs_proc_cells ? _p : NULL;
-} /* end afs_proc_cells_next() */
+}
 
-/*****************************************************************************/
 /*
  * clean up after reading from the cells list
  */
 static void afs_proc_cells_stop(struct seq_file *p, void *v)
 {
        up_read(&afs_proc_cells_sem);
+}
 
-} /* end afs_proc_cells_stop() */
-
-/*****************************************************************************/
 /*
  * display a header line followed by a load of cell lines
  */
@@ -261,19 +249,18 @@ static int afs_proc_cells_show(struct seq_file *m, void *v)
 {
        struct afs_cell *cell = list_entry(v, struct afs_cell, proc_link);
 
-       /* display header on line 1 */
        if (v == (void *) 1) {
+               /* display header on line 1 */
                seq_puts(m, "USE NAME\n");
                return 0;
        }
 
        /* display one cell per line on subsequent lines */
-       seq_printf(m, "%3d %s\n", atomic_read(&cell->usage), cell->name);
-
+       seq_printf(m, "%3d %s\n",
+                  atomic_read(&cell->usage), cell->name);
        return 0;
-} /* end afs_proc_cells_show() */
+}
 
-/*****************************************************************************/
 /*
  * handle writes to /proc/fs/afs/cells
  * - to add cells: echo "add <cellname> <IP>[:<IP>][:<IP>]"
@@ -326,30 +313,32 @@ static ssize_t afs_proc_cells_write(struct file *file, const char __user *buf,
 
        if (strcmp(kbuf, "add") == 0) {
                struct afs_cell *cell;
-               ret = afs_cell_create(name, args, &cell);
-               if (ret < 0)
+
+               cell = afs_cell_create(name, args);
+               if (IS_ERR(cell)) {
+                       ret = PTR_ERR(cell);
                        goto done;
+               }
 
+               afs_put_cell(cell);
                printk("kAFS: Added new cell '%s'\n", name);
-       }
-       else {
+       } else {
                goto inval;
        }
 
        ret = size;
 
- done:
+done:
        kfree(kbuf);
        _leave(" = %d", ret);
        return ret;
 
- inval:
+inval:
        ret = -EINVAL;
        printk("kAFS: Invalid Command on /proc/fs/afs/cells file\n");
        goto done;
-} /* end afs_proc_cells_write() */
+}
 
-/*****************************************************************************/
 /*
  * Stubs for /proc/fs/afs/rootcell
  */
@@ -369,7 +358,6 @@ static ssize_t afs_proc_rootcell_read(struct file *file, char __user *buf,
        return 0;
 }
 
-/*****************************************************************************/
 /*
  * handle writes to /proc/fs/afs/rootcell
  * - to initialize rootcell: echo "cell.name:192.168.231.14"
@@ -407,14 +395,13 @@ static ssize_t afs_proc_rootcell_write(struct file *file,
        if (ret >= 0)
                ret = size;     /* consume everything, always */
 
- infault:
+infault:
        kfree(kbuf);
- nomem:
+nomem:
        _leave(" = %d", ret);
        return ret;
-} /* end afs_proc_rootcell_write() */
+}
 
-/*****************************************************************************/
 /*
  * initialise /proc/fs/afs/<cell>/
  */
@@ -426,25 +413,25 @@ int afs_proc_cell_setup(struct afs_cell *cell)
 
        cell->proc_dir = proc_mkdir(cell->name, proc_afs);
        if (!cell->proc_dir)
-               return -ENOMEM;
+               goto error_dir;
 
        p = create_proc_entry("servers", 0, cell->proc_dir);
        if (!p)
-               goto error_proc;
+               goto error_servers;
        p->proc_fops = &afs_proc_cell_servers_fops;
        p->owner = THIS_MODULE;
        p->data = cell;
 
        p = create_proc_entry("vlservers", 0, cell->proc_dir);
        if (!p)
-               goto error_servers;
+               goto error_vlservers;
        p->proc_fops = &afs_proc_cell_vlservers_fops;
        p->owner = THIS_MODULE;
        p->data = cell;
 
        p = create_proc_entry("volumes", 0, cell->proc_dir);
        if (!p)
-               goto error_vlservers;
+               goto error_volumes;
        p->proc_fops = &afs_proc_cell_volumes_fops;
        p->owner = THIS_MODULE;
        p->data = cell;
@@ -452,17 +439,17 @@ int afs_proc_cell_setup(struct afs_cell *cell)
        _leave(" = 0");
        return 0;
 
- error_vlservers:
+error_volumes:
        remove_proc_entry("vlservers", cell->proc_dir);
- error_servers:
+error_vlservers:
        remove_proc_entry("servers", cell->proc_dir);
- error_proc:
+error_servers:
        remove_proc_entry(cell->name, proc_afs);
+error_dir:
        _leave(" = -ENOMEM");
        return -ENOMEM;
-} /* end afs_proc_cell_setup() */
+}
 
-/*****************************************************************************/
 /*
  * remove /proc/fs/afs/<cell>/
  */
@@ -476,9 +463,8 @@ void afs_proc_cell_remove(struct afs_cell *cell)
        remove_proc_entry(cell->name, proc_afs);
 
        _leave("");
-} /* end afs_proc_cell_remove() */
+}
 
-/*****************************************************************************/
 /*
  * open "/proc/fs/afs/<cell>/volumes" which provides a summary of extant cells
  */
@@ -488,7 +474,7 @@ static int afs_proc_cell_volumes_open(struct inode *inode, struct file *file)
        struct seq_file *m;
        int ret;
 
-       cell = afs_get_cell_maybe((struct afs_cell **) &PDE(inode)->data);
+       cell = PDE(inode)->data;
        if (!cell)
                return -ENOENT;
 
@@ -500,25 +486,16 @@ static int afs_proc_cell_volumes_open(struct inode *inode, struct file *file)
        m->private = cell;
 
        return 0;
-} /* end afs_proc_cell_volumes_open() */
+}
 
-/*****************************************************************************/
 /*
  * close the file and release the ref to the cell
  */
 static int afs_proc_cell_volumes_release(struct inode *inode, struct file *file)
 {
-       struct afs_cell *cell = PDE(inode)->data;
-       int ret;
-
-       ret = seq_release(inode,file);
-
-       afs_put_cell(cell);
-
-       return ret;
-} /* end afs_proc_cell_volumes_release() */
+       return seq_release(inode, file);
+}
 
-/*****************************************************************************/
 /*
  * set up the iterator to start reading from the cells list and return the
  * first item
@@ -545,9 +522,8 @@ static void *afs_proc_cell_volumes_start(struct seq_file *m, loff_t *_pos)
                        break;
 
        return _p != &cell->vl_list ? _p : NULL;
-} /* end afs_proc_cell_volumes_start() */
+}
 
-/*****************************************************************************/
 /*
  * move to next cell in cells list
  */
@@ -562,12 +538,11 @@ static void *afs_proc_cell_volumes_next(struct seq_file *p, void *v,
        (*_pos)++;
 
        _p = v;
-       _p = v == (void *) 1 ? cell->vl_list.next : _p->next;
+       _p = (v == (void *) 1) ? cell->vl_list.next : _p->next;
 
-       return _p != &cell->vl_list ? _p : NULL;
-} /* end afs_proc_cell_volumes_next() */
+       return (_p != &cell->vl_list) ? _p : NULL;
+}
 
-/*****************************************************************************/
 /*
  * clean up after reading from the cells list
  */
@@ -576,10 +551,18 @@ static void afs_proc_cell_volumes_stop(struct seq_file *p, void *v)
        struct afs_cell *cell = p->private;
 
        up_read(&cell->vl_sem);
+}
 
-} /* end afs_proc_cell_volumes_stop() */
+const char afs_vlocation_states[][4] = {
+       [AFS_VL_NEW]                    = "New",
+       [AFS_VL_CREATING]               = "Crt",
+       [AFS_VL_VALID]                  = "Val",
+       [AFS_VL_NO_VOLUME]              = "NoV",
+       [AFS_VL_UPDATING]               = "Upd",
+       [AFS_VL_VOLUME_DELETED]         = "Del",
+       [AFS_VL_UNCERTAIN]              = "Unc",
+};
 
-/*****************************************************************************/
 /*
  * display a header line followed by a load of volume lines
  */
@@ -590,23 +573,22 @@ static int afs_proc_cell_volumes_show(struct seq_file *m, void *v)
 
        /* display header on line 1 */
        if (v == (void *) 1) {
-               seq_puts(m, "USE VLID[0]  VLID[1]  VLID[2]  NAME\n");
+               seq_puts(m, "USE STT VLID[0]  VLID[1]  VLID[2]  NAME\n");
                return 0;
        }
 
        /* display one cell per line on subsequent lines */
-       seq_printf(m, "%3d %08x %08x %08x %s\n",
+       seq_printf(m, "%3d %s %08x %08x %08x %s\n",
                   atomic_read(&vlocation->usage),
+                  afs_vlocation_states[vlocation->state],
                   vlocation->vldb.vid[0],
                   vlocation->vldb.vid[1],
                   vlocation->vldb.vid[2],
-                  vlocation->vldb.name
-                  );
+                  vlocation->vldb.name);
 
        return 0;
-} /* end afs_proc_cell_volumes_show() */
+}
 
-/*****************************************************************************/
 /*
  * open "/proc/fs/afs/<cell>/vlservers" which provides a list of volume
  * location server
@@ -617,11 +599,11 @@ static int afs_proc_cell_vlservers_open(struct inode *inode, struct file *file)
        struct seq_file *m;
        int ret;
 
-       cell = afs_get_cell_maybe((struct afs_cell**)&PDE(inode)->data);
+       cell = PDE(inode)->data;
        if (!cell)
                return -ENOENT;
 
-       ret = seq_open(file,&afs_proc_cell_vlservers_ops);
+       ret = seq_open(file, &afs_proc_cell_vlservers_ops);
        if (ret<0)
                return ret;
 
@@ -629,26 +611,17 @@ static int afs_proc_cell_vlservers_open(struct inode *inode, struct file *file)
        m->private = cell;
 
        return 0;
-} /* end afs_proc_cell_vlservers_open() */
+}
 
-/*****************************************************************************/
 /*
  * close the file and release the ref to the cell
  */
 static int afs_proc_cell_vlservers_release(struct inode *inode,
                                           struct file *file)
 {
-       struct afs_cell *cell = PDE(inode)->data;
-       int ret;
-
-       ret = seq_release(inode,file);
-
-       afs_put_cell(cell);
-
-       return ret;
-} /* end afs_proc_cell_vlservers_release() */
+       return seq_release(inode, file);
+}
 
-/*****************************************************************************/
 /*
  * set up the iterator to start reading from the cells list and return the
  * first item
@@ -672,9 +645,8 @@ static void *afs_proc_cell_vlservers_start(struct seq_file *m, loff_t *_pos)
                return NULL;
 
        return &cell->vl_addrs[pos];
-} /* end afs_proc_cell_vlservers_start() */
+}
 
-/*****************************************************************************/
 /*
  * move to next cell in cells list
  */
@@ -692,9 +664,8 @@ static void *afs_proc_cell_vlservers_next(struct seq_file *p, void *v,
                return NULL;
 
        return &cell->vl_addrs[pos];
-} /* end afs_proc_cell_vlservers_next() */
+}
 
-/*****************************************************************************/
 /*
  * clean up after reading from the cells list
  */
@@ -703,10 +674,8 @@ static void afs_proc_cell_vlservers_stop(struct seq_file *p, void *v)
        struct afs_cell *cell = p->private;
 
        up_read(&cell->vl_sem);
+}
 
-} /* end afs_proc_cell_vlservers_stop() */
-
-/*****************************************************************************/
 /*
  * display a header line followed by a load of volume lines
  */
@@ -722,11 +691,9 @@ static int afs_proc_cell_vlservers_show(struct seq_file *m, void *v)
 
        /* display one cell per line on subsequent lines */
        seq_printf(m, "%u.%u.%u.%u\n", NIPQUAD(addr->s_addr));
-
        return 0;
-} /* end afs_proc_cell_vlservers_show() */
+}
 
-/*****************************************************************************/
 /*
  * open "/proc/fs/afs/<cell>/servers" which provides a summary of active
  * servers
@@ -737,7 +704,7 @@ static int afs_proc_cell_servers_open(struct inode *inode, struct file *file)
        struct seq_file *m;
        int ret;
 
-       cell = afs_get_cell_maybe((struct afs_cell **) &PDE(inode)->data);
+       cell = PDE(inode)->data;
        if (!cell)
                return -ENOENT;
 
@@ -747,34 +714,24 @@ static int afs_proc_cell_servers_open(struct inode *inode, struct file *file)
 
        m = file->private_data;
        m->private = cell;
-
        return 0;
-} /* end afs_proc_cell_servers_open() */
+}
 
-/*****************************************************************************/
 /*
  * close the file and release the ref to the cell
  */
 static int afs_proc_cell_servers_release(struct inode *inode,
                                         struct file *file)
 {
-       struct afs_cell *cell = PDE(inode)->data;
-       int ret;
-
-       ret = seq_release(inode, file);
-
-       afs_put_cell(cell);
-
-       return ret;
-} /* end afs_proc_cell_servers_release() */
+       return seq_release(inode, file);
+}
 
-/*****************************************************************************/
 /*
  * set up the iterator to start reading from the cells list and return the
  * first item
  */
 static void *afs_proc_cell_servers_start(struct seq_file *m, loff_t *_pos)
-       __acquires(m->private->sv_lock)
+       __acquires(m->private->servers_lock)
 {
        struct list_head *_p;
        struct afs_cell *cell = m->private;
@@ -783,7 +740,7 @@ static void *afs_proc_cell_servers_start(struct seq_file *m, loff_t *_pos)
        _enter("cell=%p pos=%Ld", cell, *_pos);
 
        /* lock the list against modification */
-       read_lock(&cell->sv_lock);
+       read_lock(&cell->servers_lock);
 
        /* allow for the header line */
        if (!pos)
@@ -791,14 +748,13 @@ static void *afs_proc_cell_servers_start(struct seq_file *m, loff_t *_pos)
        pos--;
 
        /* find the n'th element in the list */
-       list_for_each(_p, &cell->sv_list)
+       list_for_each(_p, &cell->servers)
                if (!pos--)
                        break;
 
-       return _p != &cell->sv_list ? _p : NULL;
-} /* end afs_proc_cell_servers_start() */
+       return _p != &cell->servers ? _p : NULL;
+}
 
-/*****************************************************************************/
 /*
  * move to next cell in cells list
  */
@@ -813,25 +769,22 @@ static void *afs_proc_cell_servers_next(struct seq_file *p, void *v,
        (*_pos)++;
 
        _p = v;
-       _p = v == (void *) 1 ? cell->sv_list.next : _p->next;
+       _p = v == (void *) 1 ? cell->servers.next : _p->next;
 
-       return _p != &cell->sv_list ? _p : NULL;
-} /* end afs_proc_cell_servers_next() */
+       return _p != &cell->servers ? _p : NULL;
+}
 
-/*****************************************************************************/
 /*
  * clean up after reading from the cells list
  */
 static void afs_proc_cell_servers_stop(struct seq_file *p, void *v)
-       __releases(p->private->sv_lock)
+       __releases(p->private->servers_lock)
 {
        struct afs_cell *cell = p->private;
 
-       read_unlock(&cell->sv_lock);
-
-} /* end afs_proc_cell_servers_stop() */
+       read_unlock(&cell->servers_lock);
+}
 
-/*****************************************************************************/
 /*
  * display a header line followed by a load of volume lines
  */
@@ -849,10 +802,7 @@ static int afs_proc_cell_servers_show(struct seq_file *m, void *v)
        /* display one cell per line on subsequent lines */
        sprintf(ipaddr, "%u.%u.%u.%u", NIPQUAD(server->addr));
        seq_printf(m, "%3d %-15.15s %5d\n",
-                  atomic_read(&server->usage),
-                  ipaddr,
-                  server->fs_state
-                  );
+                  atomic_read(&server->usage), ipaddr, server->fs_state);
 
        return 0;
-} /* end afs_proc_cell_servers_show() */
+}
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
new file mode 100644 (file)
index 0000000..e7b0473
--- /dev/null
@@ -0,0 +1,782 @@
+/* Maintain an RxRPC server socket to do AFS communications through
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <net/sock.h>
+#include <net/af_rxrpc.h>
+#include <rxrpc/packet.h>
+#include "internal.h"
+#include "afs_cm.h"
+
+static struct socket *afs_socket; /* my RxRPC socket */
+static struct workqueue_struct *afs_async_calls;
+static atomic_t afs_outstanding_calls;
+static atomic_t afs_outstanding_skbs;
+
+static void afs_wake_up_call_waiter(struct afs_call *);
+static int afs_wait_for_call_to_complete(struct afs_call *);
+static void afs_wake_up_async_call(struct afs_call *);
+static int afs_dont_wait_for_call_to_complete(struct afs_call *);
+static void afs_process_async_call(struct work_struct *);
+static void afs_rx_interceptor(struct sock *, unsigned long, struct sk_buff *);
+static int afs_deliver_cm_op_id(struct afs_call *, struct sk_buff *, bool);
+
+/* synchronous call management */
+const struct afs_wait_mode afs_sync_call = {
+       .rx_wakeup      = afs_wake_up_call_waiter,
+       .wait           = afs_wait_for_call_to_complete,
+};
+
+/* asynchronous call management */
+const struct afs_wait_mode afs_async_call = {
+       .rx_wakeup      = afs_wake_up_async_call,
+       .wait           = afs_dont_wait_for_call_to_complete,
+};
+
+/* asynchronous incoming call management */
+static const struct afs_wait_mode afs_async_incoming_call = {
+       .rx_wakeup      = afs_wake_up_async_call,
+};
+
+/* asynchronous incoming call initial processing */
+static const struct afs_call_type afs_RXCMxxxx = {
+       .name           = "CB.xxxx",
+       .deliver        = afs_deliver_cm_op_id,
+       .abort_to_error = afs_abort_to_error,
+};
+
+static void afs_collect_incoming_call(struct work_struct *);
+
+static struct sk_buff_head afs_incoming_calls;
+static DECLARE_WORK(afs_collect_incoming_call_work, afs_collect_incoming_call);
+
+/*
+ * open an RxRPC socket and bind it to be a server for callback notifications
+ * - the socket is left in blocking mode and non-blocking ops use MSG_DONTWAIT
+ */
+int afs_open_socket(void)
+{
+       struct sockaddr_rxrpc srx;
+       struct socket *socket;
+       int ret;
+
+       _enter("");
+
+       skb_queue_head_init(&afs_incoming_calls);
+
+       afs_async_calls = create_singlethread_workqueue("kafsd");
+       if (!afs_async_calls) {
+               _leave(" = -ENOMEM [wq]");
+               return -ENOMEM;
+       }
+
+       ret = sock_create_kern(AF_RXRPC, SOCK_DGRAM, PF_INET, &socket);
+       if (ret < 0) {
+               destroy_workqueue(afs_async_calls);
+               _leave(" = %d [socket]", ret);
+               return ret;
+       }
+
+       socket->sk->sk_allocation = GFP_NOFS;
+
+       /* bind the callback manager's address to make this a server socket */
+       srx.srx_family                  = AF_RXRPC;
+       srx.srx_service                 = CM_SERVICE;
+       srx.transport_type              = SOCK_DGRAM;
+       srx.transport_len               = sizeof(srx.transport.sin);
+       srx.transport.sin.sin_family    = AF_INET;
+       srx.transport.sin.sin_port      = htons(AFS_CM_PORT);
+       memset(&srx.transport.sin.sin_addr, 0,
+              sizeof(srx.transport.sin.sin_addr));
+
+       ret = kernel_bind(socket, (struct sockaddr *) &srx, sizeof(srx));
+       if (ret < 0) {
+               sock_release(socket);
+               _leave(" = %d [bind]", ret);
+               return ret;
+       }
+
+       rxrpc_kernel_intercept_rx_messages(socket, afs_rx_interceptor);
+
+       afs_socket = socket;
+       _leave(" = 0");
+       return 0;
+}
+
+/*
+ * close the RxRPC socket AFS was using
+ */
+void afs_close_socket(void)
+{
+       _enter("");
+
+       sock_release(afs_socket);
+
+       _debug("dework");
+       destroy_workqueue(afs_async_calls);
+
+       ASSERTCMP(atomic_read(&afs_outstanding_skbs), ==, 0);
+       ASSERTCMP(atomic_read(&afs_outstanding_calls), ==, 0);
+       _leave("");
+}
+
+/*
+ * note that the data in a socket buffer is now delivered and that the buffer
+ * should be freed
+ */
+static void afs_data_delivered(struct sk_buff *skb)
+{
+       if (!skb) {
+               _debug("DLVR NULL [%d]", atomic_read(&afs_outstanding_skbs));
+               dump_stack();
+       } else {
+               _debug("DLVR %p{%u} [%d]",
+                      skb, skb->mark, atomic_read(&afs_outstanding_skbs));
+               if (atomic_dec_return(&afs_outstanding_skbs) == -1)
+                       BUG();
+               rxrpc_kernel_data_delivered(skb);
+       }
+}
+
+/*
+ * free a socket buffer
+ */
+static void afs_free_skb(struct sk_buff *skb)
+{
+       if (!skb) {
+               _debug("FREE NULL [%d]", atomic_read(&afs_outstanding_skbs));
+               dump_stack();
+       } else {
+               _debug("FREE %p{%u} [%d]",
+                      skb, skb->mark, atomic_read(&afs_outstanding_skbs));
+               if (atomic_dec_return(&afs_outstanding_skbs) == -1)
+                       BUG();
+               rxrpc_kernel_free_skb(skb);
+       }
+}
+
+/*
+ * free a call
+ */
+static void afs_free_call(struct afs_call *call)
+{
+       _debug("DONE %p{%s} [%d]",
+              call, call->type->name, atomic_read(&afs_outstanding_calls));
+       if (atomic_dec_return(&afs_outstanding_calls) == -1)
+               BUG();
+
+       ASSERTCMP(call->rxcall, ==, NULL);
+       ASSERT(!work_pending(&call->async_work));
+       ASSERT(skb_queue_empty(&call->rx_queue));
+       ASSERT(call->type->name != NULL);
+
+       kfree(call->request);
+       kfree(call);
+}
+
+/*
+ * allocate a call with flat request and reply buffers
+ */
+struct afs_call *afs_alloc_flat_call(const struct afs_call_type *type,
+                                    size_t request_size, size_t reply_size)
+{
+       struct afs_call *call;
+
+       call = kzalloc(sizeof(*call), GFP_NOFS);
+       if (!call)
+               goto nomem_call;
+
+       _debug("CALL %p{%s} [%d]",
+              call, type->name, atomic_read(&afs_outstanding_calls));
+       atomic_inc(&afs_outstanding_calls);
+
+       call->type = type;
+       call->request_size = request_size;
+       call->reply_max = reply_size;
+
+       if (request_size) {
+               call->request = kmalloc(request_size, GFP_NOFS);
+               if (!call->request)
+                       goto nomem_free;
+       }
+
+       if (reply_size) {
+               call->buffer = kmalloc(reply_size, GFP_NOFS);
+               if (!call->buffer)
+                       goto nomem_free;
+       }
+
+       init_waitqueue_head(&call->waitq);
+       skb_queue_head_init(&call->rx_queue);
+       return call;
+
+nomem_free:
+       afs_free_call(call);
+nomem_call:
+       return NULL;
+}
+
+/*
+ * clean up a call with flat buffer
+ */
+void afs_flat_call_destructor(struct afs_call *call)
+{
+       _enter("");
+
+       kfree(call->request);
+       call->request = NULL;
+       kfree(call->buffer);
+       call->buffer = NULL;
+}
+
+/*
+ * initiate a call
+ */
+int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp,
+                 const struct afs_wait_mode *wait_mode)
+{
+       struct sockaddr_rxrpc srx;
+       struct rxrpc_call *rxcall;
+       struct msghdr msg;
+       struct kvec iov[1];
+       int ret;
+
+       _enter("%x,{%d},", addr->s_addr, ntohs(call->port));
+
+       ASSERT(call->type != NULL);
+       ASSERT(call->type->name != NULL);
+
+       _debug("MAKE %p{%s} [%d]",
+              call, call->type->name, atomic_read(&afs_outstanding_calls));
+
+       call->wait_mode = wait_mode;
+       INIT_WORK(&call->async_work, afs_process_async_call);
+
+       memset(&srx, 0, sizeof(srx));
+       srx.srx_family = AF_RXRPC;
+       srx.srx_service = call->service_id;
+       srx.transport_type = SOCK_DGRAM;
+       srx.transport_len = sizeof(srx.transport.sin);
+       srx.transport.sin.sin_family = AF_INET;
+       srx.transport.sin.sin_port = call->port;
+       memcpy(&srx.transport.sin.sin_addr, addr, 4);
+
+       /* create a call */
+       rxcall = rxrpc_kernel_begin_call(afs_socket, &srx, call->key,
+                                        (unsigned long) call, gfp);
+       call->key = NULL;
+       if (IS_ERR(rxcall)) {
+               ret = PTR_ERR(rxcall);
+               goto error_kill_call;
+       }
+
+       call->rxcall = rxcall;
+
+       /* send the request */
+       iov[0].iov_base = call->request;
+       iov[0].iov_len  = call->request_size;
+
+       msg.msg_name            = NULL;
+       msg.msg_namelen         = 0;
+       msg.msg_iov             = (struct iovec *) iov;
+       msg.msg_iovlen          = 1;
+       msg.msg_control         = NULL;
+       msg.msg_controllen      = 0;
+       msg.msg_flags           = 0;
+
+       /* have to change the state *before* sending the last packet as RxRPC
+        * might give us the reply before it returns from sending the
+        * request */
+       call->state = AFS_CALL_AWAIT_REPLY;
+       ret = rxrpc_kernel_send_data(rxcall, &msg, call->request_size);
+       if (ret < 0)
+               goto error_do_abort;
+
+       /* at this point, an async call may no longer exist as it may have
+        * already completed */
+       return wait_mode->wait(call);
+
+error_do_abort:
+       rxrpc_kernel_abort_call(rxcall, RX_USER_ABORT);
+       rxrpc_kernel_end_call(rxcall);
+       call->rxcall = NULL;
+error_kill_call:
+       call->type->destructor(call);
+       afs_free_call(call);
+       _leave(" = %d", ret);
+       return ret;
+}
+
+/*
+ * handles intercepted messages that were arriving in the socket's Rx queue
+ * - called with the socket receive queue lock held to ensure message ordering
+ * - called with softirqs disabled
+ */
+static void afs_rx_interceptor(struct sock *sk, unsigned long user_call_ID,
+                              struct sk_buff *skb)
+{
+       struct afs_call *call = (struct afs_call *) user_call_ID;
+
+       _enter("%p,,%u", call, skb->mark);
+
+       _debug("ICPT %p{%u} [%d]",
+              skb, skb->mark, atomic_read(&afs_outstanding_skbs));
+
+       ASSERTCMP(sk, ==, afs_socket->sk);
+       atomic_inc(&afs_outstanding_skbs);
+
+       if (!call) {
+               /* its an incoming call for our callback service */
+               skb_queue_tail(&afs_incoming_calls, skb);
+               schedule_work(&afs_collect_incoming_call_work);
+       } else {
+               /* route the messages directly to the appropriate call */
+               skb_queue_tail(&call->rx_queue, skb);
+               call->wait_mode->rx_wakeup(call);
+       }
+
+       _leave("");
+}
+
+/*
+ * deliver messages to a call
+ */
+static void afs_deliver_to_call(struct afs_call *call)
+{
+       struct sk_buff *skb;
+       bool last;
+       u32 abort_code;
+       int ret;
+
+       _enter("");
+
+       while ((call->state == AFS_CALL_AWAIT_REPLY ||
+               call->state == AFS_CALL_AWAIT_OP_ID ||
+               call->state == AFS_CALL_AWAIT_REQUEST ||
+               call->state == AFS_CALL_AWAIT_ACK) &&
+              (skb = skb_dequeue(&call->rx_queue))) {
+               switch (skb->mark) {
+               case RXRPC_SKB_MARK_DATA:
+                       _debug("Rcv DATA");
+                       last = rxrpc_kernel_is_data_last(skb);
+                       ret = call->type->deliver(call, skb, last);
+                       switch (ret) {
+                       case 0:
+                               if (last &&
+                                   call->state == AFS_CALL_AWAIT_REPLY)
+                                       call->state = AFS_CALL_COMPLETE;
+                               break;
+                       case -ENOTCONN:
+                               abort_code = RX_CALL_DEAD;
+                               goto do_abort;
+                       case -ENOTSUPP:
+                               abort_code = RX_INVALID_OPERATION;
+                               goto do_abort;
+                       default:
+                               abort_code = RXGEN_CC_UNMARSHAL;
+                               if (call->state != AFS_CALL_AWAIT_REPLY)
+                                       abort_code = RXGEN_SS_UNMARSHAL;
+                       do_abort:
+                               rxrpc_kernel_abort_call(call->rxcall,
+                                                       abort_code);
+                               call->error = ret;
+                               call->state = AFS_CALL_ERROR;
+                               break;
+                       }
+                       afs_data_delivered(skb);
+                       skb = NULL;
+                       continue;
+               case RXRPC_SKB_MARK_FINAL_ACK:
+                       _debug("Rcv ACK");
+                       call->state = AFS_CALL_COMPLETE;
+                       break;
+               case RXRPC_SKB_MARK_BUSY:
+                       _debug("Rcv BUSY");
+                       call->error = -EBUSY;
+                       call->state = AFS_CALL_BUSY;
+                       break;
+               case RXRPC_SKB_MARK_REMOTE_ABORT:
+                       abort_code = rxrpc_kernel_get_abort_code(skb);
+                       call->error = call->type->abort_to_error(abort_code);
+                       call->state = AFS_CALL_ABORTED;
+                       _debug("Rcv ABORT %u -> %d", abort_code, call->error);
+                       break;
+               case RXRPC_SKB_MARK_NET_ERROR:
+                       call->error = -rxrpc_kernel_get_error_number(skb);
+                       call->state = AFS_CALL_ERROR;
+                       _debug("Rcv NET ERROR %d", call->error);
+                       break;
+               case RXRPC_SKB_MARK_LOCAL_ERROR:
+                       call->error = -rxrpc_kernel_get_error_number(skb);
+                       call->state = AFS_CALL_ERROR;
+                       _debug("Rcv LOCAL ERROR %d", call->error);
+                       break;
+               default:
+                       BUG();
+                       break;
+               }
+
+               afs_free_skb(skb);
+       }
+
+       /* make sure the queue is empty if the call is done with (we might have
+        * aborted the call early because of an unmarshalling error) */
+       if (call->state >= AFS_CALL_COMPLETE) {
+               while ((skb = skb_dequeue(&call->rx_queue)))
+                       afs_free_skb(skb);
+               if (call->incoming) {
+                       rxrpc_kernel_end_call(call->rxcall);
+                       call->rxcall = NULL;
+                       call->type->destructor(call);
+                       afs_free_call(call);
+               }
+       }
+
+       _leave("");
+}
+
+/*
+ * wait synchronously for a call to complete
+ */
+static int afs_wait_for_call_to_complete(struct afs_call *call)
+{
+       struct sk_buff *skb;
+       int ret;
+
+       DECLARE_WAITQUEUE(myself, current);
+
+       _enter("");
+
+       add_wait_queue(&call->waitq, &myself);
+       for (;;) {
+               set_current_state(TASK_INTERRUPTIBLE);
+
+               /* deliver any messages that are in the queue */
+               if (!skb_queue_empty(&call->rx_queue)) {
+                       __set_current_state(TASK_RUNNING);
+                       afs_deliver_to_call(call);
+                       continue;
+               }
+
+               ret = call->error;
+               if (call->state >= AFS_CALL_COMPLETE)
+                       break;
+               ret = -EINTR;
+               if (signal_pending(current))
+                       break;
+               schedule();
+       }
+
+       remove_wait_queue(&call->waitq, &myself);
+       __set_current_state(TASK_RUNNING);
+
+       /* kill the call */
+       if (call->state < AFS_CALL_COMPLETE) {
+               _debug("call incomplete");
+               rxrpc_kernel_abort_call(call->rxcall, RX_CALL_DEAD);
+               while ((skb = skb_dequeue(&call->rx_queue)))
+                       afs_free_skb(skb);
+       }
+
+       _debug("call complete");
+       rxrpc_kernel_end_call(call->rxcall);
+       call->rxcall = NULL;
+       call->type->destructor(call);
+       afs_free_call(call);
+       _leave(" = %d", ret);
+       return ret;
+}
+
+/*
+ * wake up a waiting call
+ */
+static void afs_wake_up_call_waiter(struct afs_call *call)
+{
+       wake_up(&call->waitq);
+}
+
+/*
+ * wake up an asynchronous call
+ */
+static void afs_wake_up_async_call(struct afs_call *call)
+{
+       _enter("");
+       queue_work(afs_async_calls, &call->async_work);
+}
+
+/*
+ * put a call into asynchronous mode
+ * - mustn't touch the call descriptor as the call my have completed by the
+ *   time we get here
+ */
+static int afs_dont_wait_for_call_to_complete(struct afs_call *call)
+{
+       _enter("");
+       return -EINPROGRESS;
+}
+
+/*
+ * delete an asynchronous call
+ */
+static void afs_delete_async_call(struct work_struct *work)
+{
+       struct afs_call *call =
+               container_of(work, struct afs_call, async_work);
+
+       _enter("");
+
+       afs_free_call(call);
+
+       _leave("");
+}
+
+/*
+ * perform processing on an asynchronous call
+ * - on a multiple-thread workqueue this work item may try to run on several
+ *   CPUs at the same time
+ */
+static void afs_process_async_call(struct work_struct *work)
+{
+       struct afs_call *call =
+               container_of(work, struct afs_call, async_work);
+
+       _enter("");
+
+       if (!skb_queue_empty(&call->rx_queue))
+               afs_deliver_to_call(call);
+
+       if (call->state >= AFS_CALL_COMPLETE && call->wait_mode) {
+               if (call->wait_mode->async_complete)
+                       call->wait_mode->async_complete(call->reply,
+                                                       call->error);
+               call->reply = NULL;
+
+               /* kill the call */
+               rxrpc_kernel_end_call(call->rxcall);
+               call->rxcall = NULL;
+               if (call->type->destructor)
+                       call->type->destructor(call);
+
+               /* we can't just delete the call because the work item may be
+                * queued */
+               PREPARE_WORK(&call->async_work, afs_delete_async_call);
+               queue_work(afs_async_calls, &call->async_work);
+       }
+
+       _leave("");
+}
+
+/*
+ * empty a socket buffer into a flat reply buffer
+ */
+void afs_transfer_reply(struct afs_call *call, struct sk_buff *skb)
+{
+       size_t len = skb->len;
+
+       if (skb_copy_bits(skb, 0, call->buffer + call->reply_size, len) < 0)
+               BUG();
+       call->reply_size += len;
+}
+
+/*
+ * accept the backlog of incoming calls
+ */
+static void afs_collect_incoming_call(struct work_struct *work)
+{
+       struct rxrpc_call *rxcall;
+       struct afs_call *call = NULL;
+       struct sk_buff *skb;
+
+       while ((skb = skb_dequeue(&afs_incoming_calls))) {
+               _debug("new call");
+
+               /* don't need the notification */
+               afs_free_skb(skb);
+
+               if (!call) {
+                       call = kzalloc(sizeof(struct afs_call), GFP_KERNEL);
+                       if (!call) {
+                               rxrpc_kernel_reject_call(afs_socket);
+                               return;
+                       }
+
+                       INIT_WORK(&call->async_work, afs_process_async_call);
+                       call->wait_mode = &afs_async_incoming_call;
+                       call->type = &afs_RXCMxxxx;
+                       init_waitqueue_head(&call->waitq);
+                       skb_queue_head_init(&call->rx_queue);
+                       call->state = AFS_CALL_AWAIT_OP_ID;
+
+                       _debug("CALL %p{%s} [%d]",
+                              call, call->type->name,
+                              atomic_read(&afs_outstanding_calls));
+                       atomic_inc(&afs_outstanding_calls);
+               }
+
+               rxcall = rxrpc_kernel_accept_call(afs_socket,
+                                                 (unsigned long) call);
+               if (!IS_ERR(rxcall)) {
+                       call->rxcall = rxcall;
+                       call = NULL;
+               }
+       }
+
+       if (call)
+               afs_free_call(call);
+}
+
+/*
+ * grab the operation ID from an incoming cache manager call
+ */
+static int afs_deliver_cm_op_id(struct afs_call *call, struct sk_buff *skb,
+                               bool last)
+{
+       size_t len = skb->len;
+       void *oibuf = (void *) &call->operation_ID;
+
+       _enter("{%u},{%zu},%d", call->offset, len, last);
+
+       ASSERTCMP(call->offset, <, 4);
+
+       /* the operation ID forms the first four bytes of the request data */
+       len = min_t(size_t, len, 4 - call->offset);
+       if (skb_copy_bits(skb, 0, oibuf + call->offset, len) < 0)
+               BUG();
+       if (!pskb_pull(skb, len))
+               BUG();
+       call->offset += len;
+
+       if (call->offset < 4) {
+               if (last) {
+                       _leave(" = -EBADMSG [op ID short]");
+                       return -EBADMSG;
+               }
+               _leave(" = 0 [incomplete]");
+               return 0;
+       }
+
+       call->state = AFS_CALL_AWAIT_REQUEST;
+
+       /* ask the cache manager to route the call (it'll change the call type
+        * if successful) */
+       if (!afs_cm_incoming_call(call))
+               return -ENOTSUPP;
+
+       /* pass responsibility for the remainer of this message off to the
+        * cache manager op */
+       return call->type->deliver(call, skb, last);
+}
+
+/*
+ * send an empty reply
+ */
+void afs_send_empty_reply(struct afs_call *call)
+{
+       struct msghdr msg;
+       struct iovec iov[1];
+
+       _enter("");
+
+       iov[0].iov_base         = NULL;
+       iov[0].iov_len          = 0;
+       msg.msg_name            = NULL;
+       msg.msg_namelen         = 0;
+       msg.msg_iov             = iov;
+       msg.msg_iovlen          = 0;
+       msg.msg_control         = NULL;
+       msg.msg_controllen      = 0;
+       msg.msg_flags           = 0;
+
+       call->state = AFS_CALL_AWAIT_ACK;
+       switch (rxrpc_kernel_send_data(call->rxcall, &msg, 0)) {
+       case 0:
+               _leave(" [replied]");
+               return;
+
+       case -ENOMEM:
+               _debug("oom");
+               rxrpc_kernel_abort_call(call->rxcall, RX_USER_ABORT);
+       default:
+               rxrpc_kernel_end_call(call->rxcall);
+               call->rxcall = NULL;
+               call->type->destructor(call);
+               afs_free_call(call);
+               _leave(" [error]");
+               return;
+       }
+}
+
+/*
+ * send a simple reply
+ */
+void afs_send_simple_reply(struct afs_call *call, const void *buf, size_t len)
+{
+       struct msghdr msg;
+       struct iovec iov[1];
+
+       _enter("");
+
+       iov[0].iov_base         = (void *) buf;
+       iov[0].iov_len          = len;
+       msg.msg_name            = NULL;
+       msg.msg_namelen         = 0;
+       msg.msg_iov             = iov;
+       msg.msg_iovlen          = 1;
+       msg.msg_control         = NULL;
+       msg.msg_controllen      = 0;
+       msg.msg_flags           = 0;
+
+       call->state = AFS_CALL_AWAIT_ACK;
+       switch (rxrpc_kernel_send_data(call->rxcall, &msg, len)) {
+       case 0:
+               _leave(" [replied]");
+               return;
+
+       case -ENOMEM:
+               _debug("oom");
+               rxrpc_kernel_abort_call(call->rxcall, RX_USER_ABORT);
+       default:
+               rxrpc_kernel_end_call(call->rxcall);
+               call->rxcall = NULL;
+               call->type->destructor(call);
+               afs_free_call(call);
+               _leave(" [error]");
+               return;
+       }
+}
+
+/*
+ * extract a piece of data from the received data socket buffers
+ */
+int afs_extract_data(struct afs_call *call, struct sk_buff *skb,
+                    bool last, void *buf, size_t count)
+{
+       size_t len = skb->len;
+
+       _enter("{%u},{%zu},%d,,%zu", call->offset, len, last, count);
+
+       ASSERTCMP(call->offset, <, count);
+
+       len = min_t(size_t, len, count - call->offset);
+       if (skb_copy_bits(skb, 0, buf + call->offset, len) < 0 ||
+           !pskb_pull(skb, len))
+               BUG();
+       call->offset += len;
+
+       if (call->offset < count) {
+               if (last) {
+                       _leave(" = -EBADMSG [%d < %lu]", call->offset, count);
+                       return -EBADMSG;
+               }
+               _leave(" = -EAGAIN");
+               return -EAGAIN;
+       }
+       return 0;
+}
diff --git a/fs/afs/security.c b/fs/afs/security.c
new file mode 100644 (file)
index 0000000..f9f424d
--- /dev/null
@@ -0,0 +1,356 @@
+/* AFS security handling
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/ctype.h>
+#include <keys/rxrpc-type.h>
+#include "internal.h"
+
+/*
+ * get a key
+ */
+struct key *afs_request_key(struct afs_cell *cell)
+{
+       struct key *key;
+
+       _enter("{%x}", key_serial(cell->anonymous_key));
+
+       _debug("key %s", cell->anonymous_key->description);
+       key = request_key(&key_type_rxrpc, cell->anonymous_key->description,
+                         NULL);
+       if (IS_ERR(key)) {
+               if (PTR_ERR(key) != -ENOKEY) {
+                       _leave(" = %ld", PTR_ERR(key));
+                       return key;
+               }
+
+               /* act as anonymous user */
+               _leave(" = {%x} [anon]", key_serial(cell->anonymous_key));
+               return key_get(cell->anonymous_key);
+       } else {
+               /* act as authorised user */
+               _leave(" = {%x} [auth]", key_serial(key));
+               return key;
+       }
+}
+
+/*
+ * dispose of a permits list
+ */
+void afs_zap_permits(struct rcu_head *rcu)
+{
+       struct afs_permits *permits =
+               container_of(rcu, struct afs_permits, rcu);
+       int loop;
+
+       _enter("{%d}", permits->count);
+
+       for (loop = permits->count - 1; loop >= 0; loop--)
+               key_put(permits->permits[loop].key);
+       kfree(permits);
+}
+
+/*
+ * dispose of a permits list in which all the key pointers have been copied
+ */
+static void afs_dispose_of_permits(struct rcu_head *rcu)
+{
+       struct afs_permits *permits =
+               container_of(rcu, struct afs_permits, rcu);
+
+       _enter("{%d}", permits->count);
+
+       kfree(permits);
+}
+
+/*
+ * get the authorising vnode - this is the specified inode itself if it's a
+ * directory or it's the parent directory if the specified inode is a file or
+ * symlink
+ * - the caller must release the ref on the inode
+ */
+static struct afs_vnode *afs_get_auth_inode(struct afs_vnode *vnode,
+                                           struct key *key)
+{
+       struct afs_vnode *auth_vnode;
+       struct inode *auth_inode;
+
+       _enter("");
+
+       if (S_ISDIR(vnode->vfs_inode.i_mode)) {
+               auth_inode = igrab(&vnode->vfs_inode);
+               ASSERT(auth_inode != NULL);
+       } else {
+               auth_inode = afs_iget(vnode->vfs_inode.i_sb, key,
+                                     &vnode->status.parent, NULL, NULL);
+               if (IS_ERR(auth_inode))
+                       return ERR_PTR(PTR_ERR(auth_inode));
+       }
+
+       auth_vnode = AFS_FS_I(auth_inode);
+       _leave(" = {%x}", auth_vnode->fid.vnode);
+       return auth_vnode;
+}
+
+/*
+ * clear the permit cache on a directory vnode
+ */
+void afs_clear_permits(struct afs_vnode *vnode)
+{
+       struct afs_permits *permits;
+
+       _enter("{%x}", vnode->fid.vnode);
+
+       mutex_lock(&vnode->permits_lock);
+       permits = vnode->permits;
+       rcu_assign_pointer(vnode->permits, NULL);
+       mutex_unlock(&vnode->permits_lock);
+
+       if (permits)
+               call_rcu(&permits->rcu, afs_zap_permits);
+       _leave("");
+}
+
+/*
+ * add the result obtained for a vnode to its or its parent directory's cache
+ * for the key used to access it
+ */
+void afs_cache_permit(struct afs_vnode *vnode, struct key *key, long acl_order)
+{
+       struct afs_permits *permits, *xpermits;
+       struct afs_permit *permit;
+       struct afs_vnode *auth_vnode;
+       int count, loop;
+
+       _enter("{%x},%x,%lx", vnode->fid.vnode, key_serial(key), acl_order);
+
+       auth_vnode = afs_get_auth_inode(vnode, key);
+       if (IS_ERR(auth_vnode)) {
+               _leave(" [get error %ld]", PTR_ERR(auth_vnode));
+               return;
+       }
+
+       mutex_lock(&auth_vnode->permits_lock);
+
+       /* guard against a rename being detected whilst we waited for the
+        * lock */
+       if (memcmp(&auth_vnode->fid, &vnode->status.parent,
+                  sizeof(struct afs_fid)) != 0) {
+               _debug("renamed");
+               goto out_unlock;
+       }
+
+       /* have to be careful as the directory's callback may be broken between
+        * us receiving the status we're trying to cache and us getting the
+        * lock to update the cache for the status */
+       if (auth_vnode->acl_order - acl_order > 0) {
+               _debug("ACL changed?");
+               goto out_unlock;
+       }
+
+       /* always update the anonymous mask */
+       _debug("anon access %x", vnode->status.anon_access);
+       auth_vnode->status.anon_access = vnode->status.anon_access;
+       if (key == vnode->volume->cell->anonymous_key)
+               goto out_unlock;
+
+       xpermits = auth_vnode->permits;
+       count = 0;
+       if (xpermits) {
+               /* see if the permit is already in the list
+                * - if it is then we just amend the list
+                */
+               count = xpermits->count;
+               permit = xpermits->permits;
+               for (loop = count; loop > 0; loop--) {
+                       if (permit->key == key) {
+                               permit->access_mask =
+                                       vnode->status.caller_access;
+                               goto out_unlock;
+                       }
+                       permit++;
+               }
+       }
+
+       permits = kmalloc(sizeof(*permits) + sizeof(*permit) * (count + 1),
+                         GFP_NOFS);
+       if (!permits)
+               goto out_unlock;
+
+       memcpy(permits->permits, xpermits->permits,
+              count * sizeof(struct afs_permit));
+
+       _debug("key %x access %x",
+              key_serial(key), vnode->status.caller_access);
+       permits->permits[count].access_mask = vnode->status.caller_access;
+       permits->permits[count].key = key_get(key);
+       permits->count = count + 1;
+
+       rcu_assign_pointer(auth_vnode->permits, permits);
+       if (xpermits)
+               call_rcu(&xpermits->rcu, afs_dispose_of_permits);
+
+out_unlock:
+       mutex_unlock(&auth_vnode->permits_lock);
+       iput(&auth_vnode->vfs_inode);
+       _leave("");
+}
+
+/*
+ * check with the fileserver to see if the directory or parent directory is
+ * permitted to be accessed with this authorisation, and if so, what access it
+ * is granted
+ */
+static int afs_check_permit(struct afs_vnode *vnode, struct key *key,
+                           afs_access_t *_access)
+{
+       struct afs_permits *permits;
+       struct afs_permit *permit;
+       struct afs_vnode *auth_vnode;
+       bool valid;
+       int loop, ret;
+
+       _enter("");
+
+       auth_vnode = afs_get_auth_inode(vnode, key);
+       if (IS_ERR(auth_vnode)) {
+               *_access = 0;
+               _leave(" = %ld", PTR_ERR(auth_vnode));
+               return PTR_ERR(auth_vnode);
+       }
+
+       ASSERT(S_ISDIR(auth_vnode->vfs_inode.i_mode));
+
+       /* check the permits to see if we've got one yet */
+       if (key == auth_vnode->volume->cell->anonymous_key) {
+               _debug("anon");
+               *_access = auth_vnode->status.anon_access;
+               valid = true;
+       } else {
+               valid = false;
+               rcu_read_lock();
+               permits = rcu_dereference(auth_vnode->permits);
+               if (permits) {
+                       permit = permits->permits;
+                       for (loop = permits->count; loop > 0; loop--) {
+                               if (permit->key == key) {
+                                       _debug("found in cache");
+                                       *_access = permit->access_mask;
+                                       valid = true;
+                                       break;
+                               }
+                               permit++;
+                       }
+               }
+               rcu_read_unlock();
+       }
+
+       if (!valid) {
+               /* check the status on the file we're actually interested in
+                * (the post-processing will cache the result on auth_vnode) */
+               _debug("no valid permit");
+
+               set_bit(AFS_VNODE_CB_BROKEN, &vnode->flags);
+               ret = afs_vnode_fetch_status(vnode, auth_vnode, key);
+               if (ret < 0) {
+                       iput(&auth_vnode->vfs_inode);
+                       *_access = 0;
+                       _leave(" = %d", ret);
+                       return ret;
+               }
+       }
+
+       *_access = vnode->status.caller_access;
+       iput(&auth_vnode->vfs_inode);
+       _leave(" = 0 [access %x]", *_access);
+       return 0;
+}
+
+/*
+ * check the permissions on an AFS file
+ * - AFS ACLs are attached to directories only, and a file is controlled by its
+ *   parent directory's ACL
+ */
+int afs_permission(struct inode *inode, int mask, struct nameidata *nd)
+{
+       struct afs_vnode *vnode = AFS_FS_I(inode);
+       afs_access_t access;
+       struct key *key;
+       int ret;
+
+       _enter("{{%x:%x},%lx},%x,",
+              vnode->fid.vid, vnode->fid.vnode, vnode->flags, mask);
+
+       key = afs_request_key(vnode->volume->cell);
+       if (IS_ERR(key)) {
+               _leave(" = %ld [key]", PTR_ERR(key));
+               return PTR_ERR(key);
+       }
+
+       /* if the promise has expired, we need to check the server again */
+       if (!vnode->cb_promised) {
+               _debug("not promised");
+               ret = afs_vnode_fetch_status(vnode, NULL, key);
+               if (ret < 0)
+                       goto error;
+               _debug("new promise [fl=%lx]", vnode->flags);
+       }
+
+       /* check the permits to see if we've got one yet */
+       ret = afs_check_permit(vnode, key, &access);
+       if (ret < 0)
+               goto error;
+
+       /* interpret the access mask */
+       _debug("REQ %x ACC %x on %s",
+              mask, access, S_ISDIR(inode->i_mode) ? "dir" : "file");
+
+       if (S_ISDIR(inode->i_mode)) {
+               if (mask & MAY_EXEC) {
+                       if (!(access & AFS_ACE_LOOKUP))
+                               goto permission_denied;
+               } else if (mask & MAY_READ) {
+                       if (!(access & AFS_ACE_READ))
+                               goto permission_denied;
+               } else if (mask & MAY_WRITE) {
+                       if (!(access & (AFS_ACE_DELETE | /* rmdir, unlink, rename from */
+                                       AFS_ACE_INSERT | /* create, mkdir, symlink, rename to */
+                                       AFS_ACE_WRITE))) /* chmod */
+                               goto permission_denied;
+               } else {
+                       BUG();
+               }
+       } else {
+               if (!(access & AFS_ACE_LOOKUP))
+                       goto permission_denied;
+               if (mask & (MAY_EXEC | MAY_READ)) {
+                       if (!(access & AFS_ACE_READ))
+                               goto permission_denied;
+               } else if (mask & MAY_WRITE) {
+                       if (!(access & AFS_ACE_WRITE))
+                               goto permission_denied;
+               }
+       }
+
+       key_put(key);
+       ret = generic_permission(inode, mask, NULL);
+       _leave(" = %d", ret);
+       return ret;
+
+permission_denied:
+       ret = -EACCES;
+error:
+       key_put(key);
+       _leave(" = %d", ret);
+       return ret;
+}
index 44aff81..96bb23b 100644 (file)
@@ -1,6 +1,6 @@
-/* server.c: AFS server record management
+/* AFS server record management
  *
- * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2002, 2007 Red Hat, Inc. All Rights Reserved.
  * Written by David Howells (dhowells@redhat.com)
  *
  * This program is free software; you can redistribute it and/or
 
 #include <linux/sched.h>
 #include <linux/slab.h>
-#include <rxrpc/peer.h>
-#include <rxrpc/connection.h>
-#include "volume.h"
-#include "cell.h"
-#include "server.h"
-#include "transport.h"
-#include "vlclient.h"
-#include "kafstimod.h"
 #include "internal.h"
 
-DEFINE_SPINLOCK(afs_server_peer_lock);
+unsigned afs_server_timeout = 10;      /* server timeout in seconds */
 
-#define FS_SERVICE_ID          1       /* AFS Volume Location Service ID */
-#define VL_SERVICE_ID          52      /* AFS Volume Location Service ID */
+static void afs_reap_server(struct work_struct *);
 
-static void __afs_server_timeout(struct afs_timer *timer)
+/* tree of all the servers, indexed by IP address */
+static struct rb_root afs_servers = RB_ROOT;
+static DEFINE_RWLOCK(afs_servers_lock);
+
+/* LRU list of all the servers not currently in use */
+static LIST_HEAD(afs_server_graveyard);
+static DEFINE_SPINLOCK(afs_server_graveyard_lock);
+static DECLARE_DELAYED_WORK(afs_server_reaper, afs_reap_server);
+
+/*
+ * install a server record in the master tree
+ */
+static int afs_install_server(struct afs_server *server)
 {
-       struct afs_server *server =
-               list_entry(timer, struct afs_server, timeout);
+       struct afs_server *xserver;
+       struct rb_node **pp, *p;
+       int ret;
 
-       _debug("SERVER TIMEOUT [%p{u=%d}]",
-              server, atomic_read(&server->usage));
+       _enter("%p", server);
 
-       afs_server_do_timeout(server);
-}
+       write_lock(&afs_servers_lock);
+
+       ret = -EEXIST;
+       pp = &afs_servers.rb_node;
+       p = NULL;
+       while (*pp) {
+               p = *pp;
+               _debug("- consider %p", p);
+               xserver = rb_entry(p, struct afs_server, master_rb);
+               if (server->addr.s_addr < xserver->addr.s_addr)
+                       pp = &(*pp)->rb_left;
+               else if (server->addr.s_addr > xserver->addr.s_addr)
+                       pp = &(*pp)->rb_right;
+               else
+                       goto error;
+       }
 
-static const struct afs_timer_ops afs_server_timer_ops = {
-       .timed_out      = __afs_server_timeout,
-};
+       rb_link_node(&server->master_rb, p, pp);
+       rb_insert_color(&server->master_rb, &afs_servers);
+       ret = 0;
+
+error:
+       write_unlock(&afs_servers_lock);
+       return ret;
+}
 
-/*****************************************************************************/
 /*
- * lookup a server record in a cell
- * - TODO: search the cell's server list
+ * allocate a new server record
  */
-int afs_server_lookup(struct afs_cell *cell, const struct in_addr *addr,
-                     struct afs_server **_server)
+static struct afs_server *afs_alloc_server(struct afs_cell *cell,
+                                          const struct in_addr *addr)
 {
-       struct afs_server *server, *active, *zombie;
-       int loop;
+       struct afs_server *server;
 
-       _enter("%p,%08x,", cell, ntohl(addr->s_addr));
+       _enter("");
 
-       /* allocate and initialise a server record */
        server = kzalloc(sizeof(struct afs_server), GFP_KERNEL);
-       if (!server) {
-               _leave(" = -ENOMEM");
-               return -ENOMEM;
+       if (server) {
+               atomic_set(&server->usage, 1);
+               server->cell = cell;
+
+               INIT_LIST_HEAD(&server->link);
+               INIT_LIST_HEAD(&server->grave);
+               init_rwsem(&server->sem);
+               spin_lock_init(&server->fs_lock);
+               server->fs_vnodes = RB_ROOT;
+               server->cb_promises = RB_ROOT;
+               spin_lock_init(&server->cb_lock);
+               init_waitqueue_head(&server->cb_break_waitq);
+               INIT_DELAYED_WORK(&server->cb_break_work,
+                                 afs_dispatch_give_up_callbacks);
+
+               memcpy(&server->addr, addr, sizeof(struct in_addr));
+               server->addr.s_addr = addr->s_addr;
        }
 
-       atomic_set(&server->usage, 1);
-
-       INIT_LIST_HEAD(&server->link);
-       init_rwsem(&server->sem);
-       INIT_LIST_HEAD(&server->fs_callq);
-       spin_lock_init(&server->fs_lock);
-       INIT_LIST_HEAD(&server->cb_promises);
-       spin_lock_init(&server->cb_lock);
-
-       for (loop = 0; loop < AFS_SERVER_CONN_LIST_SIZE; loop++)
-               server->fs_conn_cnt[loop] = 4;
+       _leave(" = %p{%d}", server, atomic_read(&server->usage));
+       return server;
+}
 
-       memcpy(&server->addr, addr, sizeof(struct in_addr));
-       server->addr.s_addr = addr->s_addr;
+/*
+ * get an FS-server record for a cell
+ */
+struct afs_server *afs_lookup_server(struct afs_cell *cell,
+                                    const struct in_addr *addr)
+{
+       struct afs_server *server, *candidate;
 
-       afs_timer_init(&server->timeout, &afs_server_timer_ops);
+       _enter("%p,"NIPQUAD_FMT, cell, NIPQUAD(addr->s_addr));
 
-       /* add to the cell */
-       write_lock(&cell->sv_lock);
+       /* quick scan of the list to see if we already have the server */
+       read_lock(&cell->servers_lock);
 
-       /* check the active list */
-       list_for_each_entry(active, &cell->sv_list, link) {
-               if (active->addr.s_addr == addr->s_addr)
-                       goto use_active_server;
+       list_for_each_entry(server, &cell->servers, link) {
+               if (server->addr.s_addr == addr->s_addr)
+                       goto found_server_quickly;
        }
+       read_unlock(&cell->servers_lock);
 
-       /* check the inactive list */
-       spin_lock(&cell->sv_gylock);
-       list_for_each_entry(zombie, &cell->sv_graveyard, link) {
-               if (zombie->addr.s_addr == addr->s_addr)
-                       goto resurrect_server;
+       candidate = afs_alloc_server(cell, addr);
+       if (!candidate) {
+               _leave(" = -ENOMEM");
+               return ERR_PTR(-ENOMEM);
        }
-       spin_unlock(&cell->sv_gylock);
 
-       afs_get_cell(cell);
-       server->cell = cell;
-       list_add_tail(&server->link, &cell->sv_list);
+       write_lock(&cell->servers_lock);
 
-       write_unlock(&cell->sv_lock);
+       /* check the cell's server list again */
+       list_for_each_entry(server, &cell->servers, link) {
+               if (server->addr.s_addr == addr->s_addr)
+                       goto found_server;
+       }
 
-       *_server = server;
-       _leave(" = 0 (%p)", server);
-       return 0;
+       _debug("new");
+       server = candidate;
+       if (afs_install_server(server) < 0)
+               goto server_in_two_cells;
 
-       /* found a matching active server */
- use_active_server:
-       _debug("active server");
-       afs_get_server(active);
-       write_unlock(&cell->sv_lock);
+       afs_get_cell(cell);
+       list_add_tail(&server->link, &cell->servers);
+
+       write_unlock(&cell->servers_lock);
+       _leave(" = %p{%d}", server, atomic_read(&server->usage));
+       return server;
+
+       /* found a matching server quickly */
+found_server_quickly:
+       _debug("found quickly");
+       afs_get_server(server);
+       read_unlock(&cell->servers_lock);
+no_longer_unused:
+       if (!list_empty(&server->grave)) {
+               spin_lock(&afs_server_graveyard_lock);
+               list_del_init(&server->grave);
+               spin_unlock(&afs_server_graveyard_lock);
+       }
+       _leave(" = %p{%d}", server, atomic_read(&server->usage));
+       return server;
+
+       /* found a matching server on the second pass */
+found_server:
+       _debug("found");
+       afs_get_server(server);
+       write_unlock(&cell->servers_lock);
+       kfree(candidate);
+       goto no_longer_unused;
+
+       /* found a server that seems to be in two cells */
+server_in_two_cells:
+       write_unlock(&cell->servers_lock);
+       kfree(candidate);
+       printk(KERN_NOTICE "kAFS:"
+              " Server "NIPQUAD_FMT" appears to be in two cells\n",
+              NIPQUAD(*addr));
+       _leave(" = -EEXIST");
+       return ERR_PTR(-EEXIST);
+}
 
-       kfree(server);
+/*
+ * look up a server by its IP address
+ */
+struct afs_server *afs_find_server(const struct in_addr *_addr)
+{
+       struct afs_server *server = NULL;
+       struct rb_node *p;
+       struct in_addr addr = *_addr;
 
-       *_server = active;
-       _leave(" = 0 (%p)", active);
-       return 0;
+       _enter(NIPQUAD_FMT, NIPQUAD(addr.s_addr));
 
-       /* found a matching server in the graveyard, so resurrect it and
-        * dispose of the new record */
- resurrect_server:
-       _debug("resurrecting server");
+       read_lock(&afs_servers_lock);
 
-       list_move_tail(&zombie->link, &cell->sv_list);
-       afs_get_server(zombie);
-       afs_kafstimod_del_timer(&zombie->timeout);
-       spin_unlock(&cell->sv_gylock);
-       write_unlock(&cell->sv_lock);
+       p = afs_servers.rb_node;
+       while (p) {
+               server = rb_entry(p, struct afs_server, master_rb);
 
-       kfree(server);
+               _debug("- consider %p", p);
 
-       *_server = zombie;
-       _leave(" = 0 (%p)", zombie);
-       return 0;
+               if (addr.s_addr < server->addr.s_addr) {
+                       p = p->rb_left;
+               } else if (addr.s_addr > server->addr.s_addr) {
+                       p = p->rb_right;
+               } else {
+                       afs_get_server(server);
+                       goto found;
+               }
+       }
 
-} /* end afs_server_lookup() */
+       server = NULL;
+found:
+       read_unlock(&afs_servers_lock);
+       ASSERTIFCMP(server, server->addr.s_addr, ==, addr.s_addr);
+       _leave(" = %p", server);
+       return server;
+}
 
-/*****************************************************************************/
 /*
  * destroy a server record
  * - removes from the cell list
  */
 void afs_put_server(struct afs_server *server)
 {
-       struct afs_cell *cell;
-
        if (!server)
                return;
 
-       _enter("%p", server);
-
-       cell = server->cell;
+       _enter("%p{%d}", server, atomic_read(&server->usage));
 
-       /* sanity check */
-       BUG_ON(atomic_read(&server->usage) <= 0);
+       _debug("PUT SERVER %d", atomic_read(&server->usage));
 
-       /* to prevent a race, the decrement and the dequeue must be effectively
-        * atomic */
-       write_lock(&cell->sv_lock);
+       ASSERTCMP(atomic_read(&server->usage), >, 0);
 
        if (likely(!atomic_dec_and_test(&server->usage))) {
-               write_unlock(&cell->sv_lock);
                _leave("");
                return;
        }
 
-       spin_lock(&cell->sv_gylock);
-       list_move_tail(&server->link, &cell->sv_graveyard);
+       afs_flush_callback_breaks(server);
 
-       /* time out in 10 secs */
-       afs_kafstimod_add_timer(&server->timeout, 10 * HZ);
-
-       spin_unlock(&cell->sv_gylock);
-       write_unlock(&cell->sv_lock);
-
-       _leave(" [killed]");
-} /* end afs_put_server() */
+       spin_lock(&afs_server_graveyard_lock);
+       if (atomic_read(&server->usage) == 0) {
+               list_move_tail(&server->grave, &afs_server_graveyard);
+               server->time_of_death = get_seconds();
+               schedule_delayed_work(&afs_server_reaper,
+                                     afs_server_timeout * HZ);
+       }
+       spin_unlock(&afs_server_graveyard_lock);
+       _leave(" [dead]");
+}
 
-/*****************************************************************************/
 /*
- * timeout server record
- * - removes from the cell's graveyard if the usage count is zero
+ * destroy a dead server
  */
-void afs_server_do_timeout(struct afs_server *server)
+static void afs_destroy_server(struct afs_server *server)
 {
-       struct rxrpc_peer *peer;
-       struct afs_cell *cell;
-       int loop;
-
        _enter("%p", server);
 
-       cell = server->cell;
-
-       BUG_ON(atomic_read(&server->usage) < 0);
-
-       /* remove from graveyard if still dead */
-       spin_lock(&cell->vl_gylock);
-       if (atomic_read(&server->usage) == 0)
-               list_del_init(&server->link);
-       else
-               server = NULL;
-       spin_unlock(&cell->vl_gylock);
-
-       if (!server) {
-               _leave("");
-               return; /* resurrected */
-       }
-
-       /* we can now destroy it properly */
-       afs_put_cell(cell);
-
-       /* uncross-point the structs under a global lock */
-       spin_lock(&afs_server_peer_lock);
-       peer = server->peer;
-       if (peer) {
-               server->peer = NULL;
-               peer->user = NULL;
-       }
-       spin_unlock(&afs_server_peer_lock);
-
-       /* finish cleaning up the server */
-       for (loop = AFS_SERVER_CONN_LIST_SIZE - 1; loop >= 0; loop--)
-               if (server->fs_conn[loop])
-                       rxrpc_put_connection(server->fs_conn[loop]);
-
-       if (server->vlserver)
-               rxrpc_put_connection(server->vlserver);
+       ASSERTCMP(server->fs_vnodes.rb_node, ==, NULL);
+       ASSERTCMP(server->cb_promises.rb_node, ==, NULL);
+       ASSERTCMP(server->cb_break_head, ==, server->cb_break_tail);
+       ASSERTCMP(atomic_read(&server->cb_break_n), ==, 0);
 
+       afs_put_cell(server->cell);
        kfree(server);
+}
 
-       _leave(" [destroyed]");
-} /* end afs_server_do_timeout() */
-
-/*****************************************************************************/
 /*
- * get a callslot on a connection to the fileserver on the specified server
+ * reap dead server records
  */
-int afs_server_request_callslot(struct afs_server *server,
-                               struct afs_server_callslot *callslot)
+static void afs_reap_server(struct work_struct *work)
 {
-       struct afs_server_callslot *pcallslot;
-       struct rxrpc_connection *conn;
-       int nconn, ret;
-
-       _enter("%p,",server);
-
-       INIT_LIST_HEAD(&callslot->link);
-       callslot->task = current;
-       callslot->conn = NULL;
-       callslot->nconn = -1;
-       callslot->ready = 0;
-
-       ret = 0;
-       conn = NULL;
-
-       /* get hold of a callslot first */
-       spin_lock(&server->fs_lock);
-
-       /* resurrect the server if it's death timeout has expired */
-       if (server->fs_state) {
-               if (time_before(jiffies, server->fs_dead_jif)) {
-                       ret = server->fs_state;
-                       spin_unlock(&server->fs_lock);
-                       _leave(" = %d [still dead]", ret);
-                       return ret;
+       LIST_HEAD(corpses);
+       struct afs_server *server;
+       unsigned long delay, expiry;
+       time_t now;
+
+       now = get_seconds();
+       spin_lock(&afs_server_graveyard_lock);
+
+       while (!list_empty(&afs_server_graveyard)) {
+               server = list_entry(afs_server_graveyard.next,
+                                   struct afs_server, grave);
+
+               /* the queue is ordered most dead first */
+               expiry = server->time_of_death + afs_server_timeout;
+               if (expiry > now) {
+                       delay = (expiry - now) * HZ;
+                       if (!schedule_delayed_work(&afs_server_reaper, delay)) {
+                               cancel_delayed_work(&afs_server_reaper);
+                               schedule_delayed_work(&afs_server_reaper,
+                                                     delay);
+                       }
+                       break;
                }
 
-               server->fs_state = 0;
-       }
-
-       /* try and find a connection that has spare callslots */
-       for (nconn = 0; nconn < AFS_SERVER_CONN_LIST_SIZE; nconn++) {
-               if (server->fs_conn_cnt[nconn] > 0) {
-                       server->fs_conn_cnt[nconn]--;
-                       spin_unlock(&server->fs_lock);
-                       callslot->nconn = nconn;
-                       goto obtained_slot;
+               write_lock(&server->cell->servers_lock);
+               write_lock(&afs_servers_lock);
+               if (atomic_read(&server->usage) > 0) {
+                       list_del_init(&server->grave);
+               } else {
+                       list_move_tail(&server->grave, &corpses);
+                       list_del_init(&server->link);
+                       rb_erase(&server->master_rb, &afs_servers);
                }
+               write_unlock(&afs_servers_lock);
+               write_unlock(&server->cell->servers_lock);
        }
 
-       /* none were available - wait interruptibly for one to become
-        * available */
-       set_current_state(TASK_INTERRUPTIBLE);
-       list_add_tail(&callslot->link, &server->fs_callq);
-       spin_unlock(&server->fs_lock);
-
-       while (!callslot->ready && !signal_pending(current)) {
-               schedule();
-               set_current_state(TASK_INTERRUPTIBLE);
-       }
-
-       set_current_state(TASK_RUNNING);
-
-       /* even if we were interrupted we may still be queued */
-       if (!callslot->ready) {
-               spin_lock(&server->fs_lock);
-               list_del_init(&callslot->link);
-               spin_unlock(&server->fs_lock);
-       }
-
-       nconn = callslot->nconn;
+       spin_unlock(&afs_server_graveyard_lock);
 
-       /* if interrupted, we must release any slot we also got before
-        * returning an error */
-       if (signal_pending(current)) {
-               ret = -EINTR;
-               goto error_release;
+       /* now reap the corpses we've extracted */
+       while (!list_empty(&corpses)) {
+               server = list_entry(corpses.next, struct afs_server, grave);
+               list_del(&server->grave);
+               afs_destroy_server(server);
        }
+}
 
-       /* if we were woken up with an error, then pass that error back to the
-        * called */
-       if (nconn < 0) {
-               _leave(" = %d", callslot->errno);
-               return callslot->errno;
-       }
-
-       /* were we given a connection directly? */
-       if (callslot->conn) {
-               /* yes - use it */
-               _leave(" = 0 (nc=%d)", nconn);
-               return 0;
-       }
-
-       /* got a callslot, but no connection */
- obtained_slot:
-
-       /* need to get hold of the RxRPC connection */
-       down_write(&server->sem);
-
-       /* quick check to see if there's an outstanding error */
-       ret = server->fs_state;
-       if (ret)
-               goto error_release_upw;
-
-       if (server->fs_conn[nconn]) {
-               /* reuse an existing connection */
-               rxrpc_get_connection(server->fs_conn[nconn]);
-               callslot->conn = server->fs_conn[nconn];
-       }
-       else {
-               /* create a new connection */
-               ret = rxrpc_create_connection(afs_transport,
-                                             htons(7000),
-                                             server->addr.s_addr,
-                                             FS_SERVICE_ID,
-                                             NULL,
-                                             &server->fs_conn[nconn]);
-
-               if (ret < 0)
-                       goto error_release_upw;
-
-               callslot->conn = server->fs_conn[0];
-               rxrpc_get_connection(callslot->conn);
-       }
-
-       up_write(&server->sem);
-
-       _leave(" = 0");
-       return 0;
-
-       /* handle an error occurring */
- error_release_upw:
-       up_write(&server->sem);
-
- error_release:
-       /* either release the callslot or pass it along to another deserving
-        * task */
-       spin_lock(&server->fs_lock);
-
-       if (nconn < 0) {
-               /* no callslot allocated */
-       }
-       else if (list_empty(&server->fs_callq)) {
-               /* no one waiting */
-               server->fs_conn_cnt[nconn]++;
-               spin_unlock(&server->fs_lock);
-       }
-       else {
-               /* someone's waiting - dequeue them and wake them up */
-               pcallslot = list_entry(server->fs_callq.next,
-                                      struct afs_server_callslot, link);
-               list_del_init(&pcallslot->link);
-
-               pcallslot->errno = server->fs_state;
-               if (!pcallslot->errno) {
-                       /* pass them out callslot details */
-                       callslot->conn = xchg(&pcallslot->conn,
-                                             callslot->conn);
-                       pcallslot->nconn = nconn;
-                       callslot->nconn = nconn = -1;
-               }
-               pcallslot->ready = 1;
-               wake_up_process(pcallslot->task);
-               spin_unlock(&server->fs_lock);
-       }
-
-       rxrpc_put_connection(callslot->conn);
-       callslot->conn = NULL;
-
-       _leave(" = %d", ret);
-       return ret;
-
-} /* end afs_server_request_callslot() */
-
-/*****************************************************************************/
-/*
- * release a callslot back to the server
- * - transfers the RxRPC connection to the next pending callslot if possible
- */
-void afs_server_release_callslot(struct afs_server *server,
-                                struct afs_server_callslot *callslot)
-{
-       struct afs_server_callslot *pcallslot;
-
-       _enter("{ad=%08x,cnt=%u},{%d}",
-              ntohl(server->addr.s_addr),
-              server->fs_conn_cnt[callslot->nconn],
-              callslot->nconn);
-
-       BUG_ON(callslot->nconn < 0);
-
-       spin_lock(&server->fs_lock);
-
-       if (list_empty(&server->fs_callq)) {
-               /* no one waiting */
-               server->fs_conn_cnt[callslot->nconn]++;
-               spin_unlock(&server->fs_lock);
-       }
-       else {
-               /* someone's waiting - dequeue them and wake them up */
-               pcallslot = list_entry(server->fs_callq.next,
-                                      struct afs_server_callslot, link);
-               list_del_init(&pcallslot->link);
-
-               pcallslot->errno = server->fs_state;
-               if (!pcallslot->errno) {
-                       /* pass them out callslot details */
-                       callslot->conn = xchg(&pcallslot->conn, callslot->conn);
-                       pcallslot->nconn = callslot->nconn;
-                       callslot->nconn = -1;
-               }
-
-               pcallslot->ready = 1;
-               wake_up_process(pcallslot->task);
-               spin_unlock(&server->fs_lock);
-       }
-
-       rxrpc_put_connection(callslot->conn);
-
-       _leave("");
-} /* end afs_server_release_callslot() */
-
-/*****************************************************************************/
 /*
- * get a handle to a connection to the vlserver (volume location) on the
- * specified server
+ * discard all the server records for rmmod
  */
-int afs_server_get_vlconn(struct afs_server *server,
-                         struct rxrpc_connection **_conn)
+void __exit afs_purge_servers(void)
 {
-       struct rxrpc_connection *conn;
-       int ret;
-
-       _enter("%p,", server);
-
-       ret = 0;
-       conn = NULL;
-       down_read(&server->sem);
-
-       if (server->vlserver) {
-               /* reuse an existing connection */
-               rxrpc_get_connection(server->vlserver);
-               conn = server->vlserver;
-               up_read(&server->sem);
-       }
-       else {
-               /* create a new connection */
-               up_read(&server->sem);
-               down_write(&server->sem);
-               if (!server->vlserver) {
-                       ret = rxrpc_create_connection(afs_transport,
-                                                     htons(7003),
-                                                     server->addr.s_addr,
-                                                     VL_SERVICE_ID,
-                                                     NULL,
-                                                     &server->vlserver);
-               }
-               if (ret == 0) {
-                       rxrpc_get_connection(server->vlserver);
-                       conn = server->vlserver;
-               }
-               up_write(&server->sem);
-       }
-
-       *_conn = conn;
-       _leave(" = %d", ret);
-       return ret;
-} /* end afs_server_get_vlconn() */
+       afs_server_timeout = 0;
+       cancel_delayed_work(&afs_server_reaper);
+       schedule_delayed_work(&afs_server_reaper, 0);
+}
diff --git a/fs/afs/server.h b/fs/afs/server.h
deleted file mode 100644 (file)
index c3d2411..0000000
+++ /dev/null
@@ -1,102 +0,0 @@
-/* server.h: AFS server record
- *
- * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#ifndef _LINUX_AFS_SERVER_H
-#define _LINUX_AFS_SERVER_H
-
-#include "types.h"
-#include "kafstimod.h"
-#include <rxrpc/peer.h>
-#include <linux/rwsem.h>
-
-extern spinlock_t afs_server_peer_lock;
-
-/*****************************************************************************/
-/*
- * AFS server record
- */
-struct afs_server
-{
-       atomic_t                usage;
-       struct afs_cell         *cell;          /* cell in which server resides */
-       struct list_head        link;           /* link in cell's server list */
-       struct rw_semaphore     sem;            /* access lock */
-       struct afs_timer        timeout;        /* graveyard timeout */
-       struct in_addr          addr;           /* server address */
-       struct rxrpc_peer       *peer;          /* peer record for this server */
-       struct rxrpc_connection *vlserver;      /* connection to the volume location service */
-
-       /* file service access */
-#define AFS_SERVER_CONN_LIST_SIZE 2
-       struct rxrpc_connection *fs_conn[AFS_SERVER_CONN_LIST_SIZE]; /* FS connections */
-       unsigned                fs_conn_cnt[AFS_SERVER_CONN_LIST_SIZE]; /* per conn call count */
-       struct list_head        fs_callq;       /* queue of processes waiting to make a call */
-       spinlock_t              fs_lock;        /* access lock */
-       int                     fs_state;       /* 0 or reason FS currently marked dead (-errno) */
-       unsigned                fs_rtt;         /* FS round trip time */
-       unsigned long           fs_act_jif;     /* time at which last activity occurred */
-       unsigned long           fs_dead_jif;    /* time at which no longer to be considered dead */
-
-       /* callback promise management */
-       struct list_head        cb_promises;    /* as yet unbroken promises from this server */
-       spinlock_t              cb_lock;        /* access lock */
-};
-
-extern int afs_server_lookup(struct afs_cell *cell,
-                            const struct in_addr *addr,
-                            struct afs_server **_server);
-
-#define afs_get_server(S) do { atomic_inc(&(S)->usage); } while(0)
-
-extern void afs_put_server(struct afs_server *server);
-extern void afs_server_do_timeout(struct afs_server *server);
-
-extern int afs_server_find_by_peer(const struct rxrpc_peer *peer,
-                                  struct afs_server **_server);
-
-extern int afs_server_get_vlconn(struct afs_server *server,
-                                struct rxrpc_connection **_conn);
-
-static inline
-struct afs_server *afs_server_get_from_peer(struct rxrpc_peer *peer)
-{
-       struct afs_server *server;
-
-       spin_lock(&afs_server_peer_lock);
-       server = peer->user;
-       if (server)
-               afs_get_server(server);
-       spin_unlock(&afs_server_peer_lock);
-
-       return server;
-}
-
-/*****************************************************************************/
-/*
- * AFS server callslot grant record
- */
-struct afs_server_callslot
-{
-       struct list_head        link;           /* link in server's list */
-       struct task_struct      *task;          /* process waiting to make call */
-       struct rxrpc_connection *conn;          /* connection to use (or NULL on error) */
-       short                   nconn;          /* connection slot number (-1 on error) */
-       char                    ready;          /* T when ready */
-       int                     errno;          /* error number if nconn==-1 */
-};
-
-extern int afs_server_request_callslot(struct afs_server *server,
-                                      struct afs_server_callslot *callslot);
-
-extern void afs_server_release_callslot(struct afs_server *server,
-                                       struct afs_server_callslot *callslot);
-
-#endif /* _LINUX_AFS_SERVER_H */
index eb7e323..cebd03c 100644 (file)
@@ -1,5 +1,6 @@
-/*
- * Copyright (c) 2002 Red Hat, Inc. All rights reserved.
+/* AFS superblock handling
+ *
+ * Copyright (c) 2002, 2007 Red Hat, Inc. All rights reserved.
  *
  * This software may be freely redistributed under the terms of the
  * GNU General Public License.
@@ -9,7 +10,7 @@
  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  *
  * Authors: David Howells <dhowells@redhat.com>
- *          David Woodhouse <dwmw2@cambridge.redhat.com>
+ *          David Woodhouse <dwmw2@redhat.com>
  *
  */
 
 #include <linux/slab.h>
 #include <linux/fs.h>
 #include <linux/pagemap.h>
-#include "vnode.h"
-#include "volume.h"
-#include "cell.h"
-#include "cmservice.h"
-#include "fsclient.h"
-#include "super.h"
 #include "internal.h"
 
 #define AFS_FS_MAGIC 0x6B414653 /* 'kAFS' */
 
-struct afs_mount_params {
-       int                     rwpath;
-       struct afs_cell         *default_cell;
-       struct afs_volume       *volume;
-};
-
 static void afs_i_init_once(void *foo, struct kmem_cache *cachep,
                            unsigned long flags);
 
@@ -62,13 +51,13 @@ static const struct super_operations afs_super_ops = {
        .drop_inode     = generic_delete_inode,
        .destroy_inode  = afs_destroy_inode,
        .clear_inode    = afs_clear_inode,
+       .umount_begin   = afs_umount_begin,
        .put_super      = afs_put_super,
 };
 
 static struct kmem_cache *afs_inode_cachep;
 static atomic_t afs_count_active_inodes;
 
-/*****************************************************************************/
 /*
  * initialise the filesystem
  */
@@ -78,8 +67,6 @@ int __init afs_fs_init(void)
 
        _enter("");
 
-       afs_timer_init(&afs_mntpt_expiry_timer, &afs_mntpt_expiry_timer_ops);
-
        /* create ourselves an inode cache */
        atomic_set(&afs_count_active_inodes, 0);
 
@@ -99,20 +86,22 @@ int __init afs_fs_init(void)
        ret = register_filesystem(&afs_fs_type);
        if (ret < 0) {
                kmem_cache_destroy(afs_inode_cachep);
-               kleave(" = %d", ret);
+               _leave(" = %d", ret);
                return ret;
        }
 
-       kleave(" = 0");
+       _leave(" = 0");
        return 0;
-} /* end afs_fs_init() */
+}
 
-/*****************************************************************************/
 /*
  * clean up the filesystem
  */
 void __exit afs_fs_exit(void)
 {
+       _enter("");
+
+       afs_mntpt_kill_timer();
        unregister_filesystem(&afs_fs_type);
 
        if (atomic_read(&afs_count_active_inodes) != 0) {
@@ -122,10 +111,9 @@ void __exit afs_fs_exit(void)
        }
 
        kmem_cache_destroy(afs_inode_cachep);
+       _leave("");
+}
 
-} /* end afs_fs_exit() */
-
-/*****************************************************************************/
 /*
  * check that an argument has a value
  */
@@ -136,9 +124,8 @@ static int want_arg(char **_value, const char *option)
                return 0;
        }
        return 1;
-} /* end want_arg() */
+}
 
-/*****************************************************************************/
 /*
  * check that there's no subsequent value
  */
@@ -150,18 +137,17 @@ static int want_no_value(char *const *_value, const char *option)
                return 0;
        }
        return 1;
-} /* end want_no_value() */
+}
 
-/*****************************************************************************/
 /*
  * parse the mount options
  * - this function has been shamelessly adapted from the ext3 fs which
  *   shamelessly adapted it from the msdos fs
  */
-static int afs_super_parse_options(struct afs_mount_params *params,
-                                  char *options,
-                                  const char **devname)
+static int afs_parse_options(struct afs_mount_params *params,
+                            char *options, const char **devname)
 {
+       struct afs_cell *cell;
        char *key, *value;
        int ret;
 
@@ -170,51 +156,135 @@ static int afs_super_parse_options(struct afs_mount_params *params,
        options[PAGE_SIZE - 1] = 0;
 
        ret = 0;
-       while ((key = strsep(&options, ",")) != 0)
-       {
+       while ((key = strsep(&options, ","))) {
                value = strchr(key, '=');
                if (value)
                        *value++ = 0;
 
-               printk("kAFS: KEY: %s, VAL:%s\n", key, value ?: "-");
+               _debug("kAFS: KEY: %s, VAL:%s", key, value ?: "-");
 
                if (strcmp(key, "rwpath") == 0) {
                        if (!want_no_value(&value, "rwpath"))
                                return -EINVAL;
                        params->rwpath = 1;
-                       continue;
-               }
-               else if (strcmp(key, "vol") == 0) {
+               } else if (strcmp(key, "vol") == 0) {
                        if (!want_arg(&value, "vol"))
                                return -EINVAL;
                        *devname = value;
-                       continue;
-               }
-               else if (strcmp(key, "cell") == 0) {
+               } else if (strcmp(key, "cell") == 0) {
                        if (!want_arg(&value, "cell"))
                                return -EINVAL;
-                       afs_put_cell(params->default_cell);
-                       ret = afs_cell_lookup(value,
-                                             strlen(value),
-                                             &params->default_cell);
-                       if (ret < 0)
-                               return -EINVAL;
-                       continue;
+                       cell = afs_cell_lookup(value, strlen(value));
+                       if (IS_ERR(cell))
+                               return PTR_ERR(cell);
+                       afs_put_cell(params->cell);
+                       params->cell = cell;
+               } else {
+                       printk("kAFS: Unknown mount option: '%s'\n",  key);
+                       ret = -EINVAL;
+                       goto error;
                }
-
-               printk("kAFS: Unknown mount option: '%s'\n",  key);
-               ret = -EINVAL;
-               goto error;
        }
 
        ret = 0;
-
- error:
+error:
        _leave(" = %d", ret);
        return ret;
-} /* end afs_super_parse_options() */
+}
+
+/*
+ * parse a device name to get cell name, volume name, volume type and R/W
+ * selector
+ * - this can be one of the following:
+ *     "%[cell:]volume[.]"             R/W volume
+ *     "#[cell:]volume[.]"             R/O or R/W volume (rwpath=0),
+ *                                      or R/W (rwpath=1) volume
+ *     "%[cell:]volume.readonly"       R/O volume
+ *     "#[cell:]volume.readonly"       R/O volume
+ *     "%[cell:]volume.backup"         Backup volume
+ *     "#[cell:]volume.backup"         Backup volume
+ */
+static int afs_parse_device_name(struct afs_mount_params *params,
+                                const char *name)
+{
+       struct afs_cell *cell;
+       const char *cellname, *suffix;
+       int cellnamesz;
+
+       _enter(",%s", name);
+
+       if (!name) {
+               printk(KERN_ERR "kAFS: no volume name specified\n");
+               return -EINVAL;
+       }
+
+       if ((name[0] != '%' && name[0] != '#') || !name[1]) {
+               printk(KERN_ERR "kAFS: unparsable volume name\n");
+               return -EINVAL;
+       }
+
+       /* determine the type of volume we're looking for */
+       params->type = AFSVL_ROVOL;
+       params->force = false;
+       if (params->rwpath || name[0] == '%') {
+               params->type = AFSVL_RWVOL;
+               params->force = true;
+       }
+       name++;
+
+       /* split the cell name out if there is one */
+       params->volname = strchr(name, ':');
+       if (params->volname) {
+               cellname = name;
+               cellnamesz = params->volname - name;
+               params->volname++;
+       } else {
+               params->volname = name;
+               cellname = NULL;
+               cellnamesz = 0;
+       }
+
+       /* the volume type is further affected by a possible suffix */
+       suffix = strrchr(params->volname, '.');
+       if (suffix) {
+               if (strcmp(suffix, ".readonly") == 0) {
+                       params->type = AFSVL_ROVOL;
+                       params->force = true;
+               } else if (strcmp(suffix, ".backup") == 0) {
+                       params->type = AFSVL_BACKVOL;
+                       params->force = true;
+               } else if (suffix[1] == 0) {
+               } else {
+                       suffix = NULL;
+               }
+       }
+
+       params->volnamesz = suffix ?
+               suffix - params->volname : strlen(params->volname);
+
+       _debug("cell %*.*s [%p]",
+              cellnamesz, cellnamesz, cellname ?: "", params->cell);
+
+       /* lookup the cell record */
+       if (cellname || !params->cell) {
+               cell = afs_cell_lookup(cellname, cellnamesz);
+               if (IS_ERR(cell)) {
+                       printk(KERN_ERR "kAFS: unable to lookup cell '%s'\n",
+                              cellname ?: "");
+                       return PTR_ERR(cell);
+               }
+               afs_put_cell(params->cell);
+               params->cell = cell;
+       }
+
+       _debug("CELL:%s [%p] VOLUME:%*.*s SUFFIX:%s TYPE:%d%s",
+              params->cell->name, params->cell,
+              params->volnamesz, params->volnamesz, params->volname,
+              suffix ?: "-", params->type, params->force ? " FORCE" : "");
+
+       return 0;
+}
 
-/*****************************************************************************/
 /*
  * check a superblock to see if it's the one we're looking for
  */
@@ -224,13 +294,12 @@ static int afs_test_super(struct super_block *sb, void *data)
        struct afs_super_info *as = sb->s_fs_info;
 
        return as->volume == params->volume;
-} /* end afs_test_super() */
+}
 
-/*****************************************************************************/
 /*
  * fill in the superblock
  */
-static int afs_fill_super(struct super_block *sb, void *data, int silent)
+static int afs_fill_super(struct super_block *sb, void *data)
 {
        struct afs_mount_params *params = data;
        struct afs_super_info *as = NULL;
@@ -239,7 +308,7 @@ static int afs_fill_super(struct super_block *sb, void *data, int silent)
        struct inode *inode = NULL;
        int ret;
 
-       kenter("");
+       _enter("");
 
        /* allocate a superblock info record */
        as = kzalloc(sizeof(struct afs_super_info), GFP_KERNEL);
@@ -262,9 +331,9 @@ static int afs_fill_super(struct super_block *sb, void *data, int silent)
        fid.vid         = as->volume->vid;
        fid.vnode       = 1;
        fid.unique      = 1;
-       ret = afs_iget(sb, &fid, &inode);
-       if (ret < 0)
-               goto error;
+       inode = afs_iget(sb, params->key, &fid, NULL, NULL);
+       if (IS_ERR(inode))
+               goto error_inode;
 
        ret = -ENOMEM;
        root = d_alloc_root(inode);
@@ -273,21 +342,23 @@ static int afs_fill_super(struct super_block *sb, void *data, int silent)
 
        sb->s_root = root;
 
-       kleave(" = 0");
+       _leave(" = 0");
        return 0;
 
- error:
+error_inode:
+       ret = PTR_ERR(inode);
+       inode = NULL;
+error:
        iput(inode);
        afs_put_volume(as->volume);
        kfree(as);
 
        sb->s_fs_info = NULL;
 
-       kleave(" = %d", ret);
+       _leave(" = %d", ret);
        return ret;
-} /* end afs_fill_super() */
+}
 
-/*****************************************************************************/
 /*
  * get an AFS superblock
  * - TODO: don't use get_sb_nodev(), but rather call sget() directly
@@ -300,69 +371,80 @@ static int afs_get_sb(struct file_system_type *fs_type,
 {
        struct afs_mount_params params;
        struct super_block *sb;
+       struct afs_volume *vol;
+       struct key *key;
        int ret;
 
        _enter(",,%s,%p", dev_name, options);
 
        memset(&params, 0, sizeof(params));
 
-       /* start the cache manager */
-       ret = afscm_start();
-       if (ret < 0) {
-               _leave(" = %d", ret);
-               return ret;
-       }
-
-       /* parse the options */
+       /* parse the options and device name */
        if (options) {
-               ret = afs_super_parse_options(&params, options, &dev_name);
+               ret = afs_parse_options(&params, options, &dev_name);
                if (ret < 0)
                        goto error;
-               if (!dev_name) {
-                       printk("kAFS: no volume name specified\n");
-                       ret = -EINVAL;
-                       goto error;
-               }
        }
 
-       /* parse the device name */
-       ret = afs_volume_lookup(dev_name,
-                               params.default_cell,
-                               params.rwpath,
-                               &params.volume);
+
+       ret = afs_parse_device_name(&params, dev_name);
        if (ret < 0)
                goto error;
 
-       /* allocate a deviceless superblock */
-       sb = sget(fs_type, afs_test_super, set_anon_super, &params);
-       if (IS_ERR(sb))
+       /* try and do the mount securely */
+       key = afs_request_key(params.cell);
+       if (IS_ERR(key)) {
+               _leave(" = %ld [key]", PTR_ERR(key));
+               ret = PTR_ERR(key);
                goto error;
+       }
+       params.key = key;
 
-       sb->s_flags = flags;
+       /* parse the device name */
+       vol = afs_volume_lookup(&params);
+       if (IS_ERR(vol)) {
+               ret = PTR_ERR(vol);
+               goto error;
+       }
+       params.volume = vol;
 
-       ret = afs_fill_super(sb, &params, flags & MS_SILENT ? 1 : 0);
-       if (ret < 0) {
-               up_write(&sb->s_umount);
-               deactivate_super(sb);
+       /* allocate a deviceless superblock */
+       sb = sget(fs_type, afs_test_super, set_anon_super, &params);
+       if (IS_ERR(sb)) {
+               ret = PTR_ERR(sb);
                goto error;
        }
-       sb->s_flags |= MS_ACTIVE;
-       simple_set_mnt(mnt, sb);
 
+       if (!sb->s_root) {
+               /* initial superblock/root creation */
+               _debug("create");
+               sb->s_flags = flags;
+               ret = afs_fill_super(sb, &params);
+               if (ret < 0) {
+                       up_write(&sb->s_umount);
+                       deactivate_super(sb);
+                       goto error;
+               }
+               sb->s_flags |= MS_ACTIVE;
+       } else {
+               _debug("reuse");
+               ASSERTCMP(sb->s_flags, &, MS_ACTIVE);
+       }
+
+       simple_set_mnt(mnt, sb);
        afs_put_volume(params.volume);
-       afs_put_cell(params.default_cell);
-       _leave(" = 0 [%p]", 0, sb);
+       afs_put_cell(params.cell);
+       _leave(" = 0 [%p]", sb);
        return 0;
 
- error:
+error:
        afs_put_volume(params.volume);
-       afs_put_cell(params.default_cell);
-       afscm_stop();
+       afs_put_cell(params.cell);
+       key_put(params.key);
        _leave(" = %d", ret);
        return ret;
-} /* end afs_get_sb() */
+}
 
-/*****************************************************************************/
 /*
  * finish the unmounting process on the superblock
  */
@@ -373,35 +455,30 @@ static void afs_put_super(struct super_block *sb)
        _enter("");
 
        afs_put_volume(as->volume);
-       afscm_stop();
 
        _leave("");
-} /* end afs_put_super() */
+}
 
-/*****************************************************************************/
 /*
  * initialise an inode cache slab element prior to any use
  */
 static void afs_i_init_once(void *_vnode, struct kmem_cache *cachep,
                            unsigned long flags)
 {
-       struct afs_vnode *vnode = (struct afs_vnode *) _vnode;
+       struct afs_vnode *vnode = _vnode;
 
        if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
            SLAB_CTOR_CONSTRUCTOR) {
                memset(vnode, 0, sizeof(*vnode));
                inode_init_once(&vnode->vfs_inode);
                init_waitqueue_head(&vnode->update_waitq);
+               mutex_init(&vnode->permits_lock);
+               mutex_init(&vnode->validate_lock);
                spin_lock_init(&vnode->lock);
-               INIT_LIST_HEAD(&vnode->cb_link);
-               INIT_LIST_HEAD(&vnode->cb_hash_link);
-               afs_timer_init(&vnode->cb_timeout,
-                              &afs_vnode_cb_timed_out_ops);
+               INIT_WORK(&vnode->cb_broken_work, afs_broken_callback_work);
        }
+}
 
-} /* end afs_i_init_once() */
-
-/*****************************************************************************/
 /*
  * allocate an AFS inode struct from our slab cache
  */
@@ -409,8 +486,7 @@ static struct inode *afs_alloc_inode(struct super_block *sb)
 {
        struct afs_vnode *vnode;
 
-       vnode = (struct afs_vnode *)
-               kmem_cache_alloc(afs_inode_cachep, GFP_KERNEL);
+       vnode = kmem_cache_alloc(afs_inode_cachep, GFP_KERNEL);
        if (!vnode)
                return NULL;
 
@@ -421,21 +497,25 @@ static struct inode *afs_alloc_inode(struct super_block *sb)
 
        vnode->volume           = NULL;
        vnode->update_cnt       = 0;
-       vnode->flags            = 0;
+       vnode->flags            = 1 << AFS_VNODE_UNSET;
+       vnode->cb_promised      = false;
 
        return &vnode->vfs_inode;
-} /* end afs_alloc_inode() */
+}
 
-/*****************************************************************************/
 /*
  * destroy an AFS inode struct
  */
 static void afs_destroy_inode(struct inode *inode)
 {
+       struct afs_vnode *vnode = AFS_FS_I(inode);
+
        _enter("{%lu}", inode->i_ino);
 
-       kmem_cache_free(afs_inode_cachep, AFS_FS_I(inode));
+       _debug("DESTROY INODE %p", inode);
 
-       atomic_dec(&afs_count_active_inodes);
+       ASSERTCMP(vnode->server, ==, NULL);
 
-} /* end afs_destroy_inode() */
+       kmem_cache_free(afs_inode_cachep, vnode);
+       atomic_dec(&afs_count_active_inodes);
+}
diff --git a/fs/afs/super.h b/fs/afs/super.h
deleted file mode 100644 (file)
index 32de8cc..0000000
+++ /dev/null
@@ -1,45 +0,0 @@
-/* super.h: AFS filesystem internal private data
- *
- * Copyright (c) 2002 Red Hat, Inc. All rights reserved.
- *
- * This software may be freely redistributed under the terms of the
- * GNU General Public License.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * Authors: David Woodhouse <dwmw2@cambridge.redhat.com>
- *          David Howells <dhowells@redhat.com>
- *
- */
-
-#ifndef _LINUX_AFS_SUPER_H
-#define _LINUX_AFS_SUPER_H
-
-#include <linux/fs.h>
-#include "server.h"
-
-#ifdef __KERNEL__
-
-/*****************************************************************************/
-/*
- * AFS superblock private data
- * - there's one superblock per volume
- */
-struct afs_super_info
-{
-       struct afs_volume       *volume;        /* volume record */
-       char                    rwparent;       /* T if parent is R/W AFS volume */
-};
-
-static inline struct afs_super_info *AFS_FS_S(struct super_block *sb)
-{
-       return sb->s_fs_info;
-}
-
-extern struct file_system_type afs_fs_type;
-
-#endif /* __KERNEL__ */
-
-#endif /* _LINUX_AFS_SUPER_H */
diff --git a/fs/afs/transport.h b/fs/afs/transport.h
deleted file mode 100644 (file)
index 7013ae6..0000000
+++ /dev/null
@@ -1,21 +0,0 @@
-/* transport.h: AFS transport management
- *
- * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#ifndef _LINUX_AFS_TRANSPORT_H
-#define _LINUX_AFS_TRANSPORT_H
-
-#include "types.h"
-#include <rxrpc/transport.h>
-
-/* the cache manager transport endpoint */
-extern struct rxrpc_transport *afs_transport;
-
-#endif /* _LINUX_AFS_TRANSPORT_H */
diff --git a/fs/afs/types.h b/fs/afs/types.h
deleted file mode 100644 (file)
index b1a2367..0000000
+++ /dev/null
@@ -1,125 +0,0 @@
-/* types.h: AFS types
- *
- * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#ifndef _LINUX_AFS_TYPES_H
-#define _LINUX_AFS_TYPES_H
-
-#ifdef __KERNEL__
-#include <rxrpc/types.h>
-#endif /* __KERNEL__ */
-
-typedef unsigned                       afs_volid_t;
-typedef unsigned                       afs_vnodeid_t;
-typedef unsigned long long             afs_dataversion_t;
-
-typedef enum {
-       AFSVL_RWVOL,                    /* read/write volume */
-       AFSVL_ROVOL,                    /* read-only volume */
-       AFSVL_BACKVOL,                  /* backup volume */
-} __attribute__((packed)) afs_voltype_t;
-
-typedef enum {
-       AFS_FTYPE_INVALID       = 0,
-       AFS_FTYPE_FILE          = 1,
-       AFS_FTYPE_DIR           = 2,
-       AFS_FTYPE_SYMLINK       = 3,
-} afs_file_type_t;
-
-#ifdef __KERNEL__
-
-struct afs_cell;
-struct afs_vnode;
-
-/*****************************************************************************/
-/*
- * AFS file identifier
- */
-struct afs_fid
-{
-       afs_volid_t     vid;            /* volume ID */
-       afs_vnodeid_t   vnode;          /* file index within volume */
-       unsigned        unique;         /* unique ID number (file index version) */
-};
-
-/*****************************************************************************/
-/*
- * AFS callback notification
- */
-typedef enum {
-       AFSCM_CB_UNTYPED        = 0,    /* no type set on CB break */
-       AFSCM_CB_EXCLUSIVE      = 1,    /* CB exclusive to CM [not implemented] */
-       AFSCM_CB_SHARED         = 2,    /* CB shared by other CM's */
-       AFSCM_CB_DROPPED        = 3,    /* CB promise cancelled by file server */
-} afs_callback_type_t;
-
-struct afs_callback
-{
-       struct afs_server       *server;        /* server that made the promise */
-       struct afs_fid          fid;            /* file identifier */
-       unsigned                version;        /* callback version */
-       unsigned                expiry;         /* time at which expires */
-       afs_callback_type_t     type;           /* type of callback */
-};
-
-#define AFSCBMAX 50
-
-/*****************************************************************************/
-/*
- * AFS volume information
- */
-struct afs_volume_info
-{
-       afs_volid_t             vid;            /* volume ID */
-       afs_voltype_t           type;           /* type of this volume */
-       afs_volid_t             type_vids[5];   /* volume ID's for possible types for this vol */
-       
-       /* list of fileservers serving this volume */
-       size_t                  nservers;       /* number of entries used in servers[] */
-       struct {
-               struct in_addr  addr;           /* fileserver address */
-       } servers[8];
-};
-
-/*****************************************************************************/
-/*
- * AFS file status information
- */
-struct afs_file_status
-{
-       unsigned                if_version;     /* interface version */
-#define AFS_FSTATUS_VERSION    1
-
-       afs_file_type_t         type;           /* file type */
-       unsigned                nlink;          /* link count */
-       size_t                  size;           /* file size */
-       afs_dataversion_t       version;        /* current data version */
-       unsigned                author;         /* author ID */
-       unsigned                owner;          /* owner ID */
-       unsigned                caller_access;  /* access rights for authenticated caller */
-       unsigned                anon_access;    /* access rights for unauthenticated caller */
-       umode_t                 mode;           /* UNIX mode */
-       struct afs_fid          parent;         /* parent file ID */
-       time_t                  mtime_client;   /* last time client changed data */
-       time_t                  mtime_server;   /* last time server changed data */
-};
-
-/*****************************************************************************/
-/*
- * AFS volume synchronisation information
- */
-struct afs_volsync
-{
-       time_t                  creation;       /* volume creation time */
-};
-
-#endif /* __KERNEL__ */
-
-#endif /* _LINUX_AFS_TYPES_H */
diff --git a/fs/afs/use-rtnetlink.c b/fs/afs/use-rtnetlink.c
new file mode 100644 (file)
index 0000000..82f0daa
--- /dev/null
@@ -0,0 +1,473 @@
+/* RTNETLINK client
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/netlink.h>
+#include <linux/rtnetlink.h>
+#include <linux/if_addr.h>
+#include <linux/if_arp.h>
+#include <linux/inetdevice.h>
+#include <net/netlink.h>
+#include "internal.h"
+
+struct afs_rtm_desc {
+       struct socket           *nlsock;
+       struct afs_interface    *bufs;
+       u8                      *mac;
+       size_t                  nbufs;
+       size_t                  maxbufs;
+       void                    *data;
+       ssize_t                 datalen;
+       size_t                  datamax;
+       int                     msg_seq;
+       unsigned                mac_index;
+       bool                    wantloopback;
+       int (*parse)(struct afs_rtm_desc *, struct nlmsghdr *);
+};
+
+/*
+ * parse an RTM_GETADDR response
+ */
+static int afs_rtm_getaddr_parse(struct afs_rtm_desc *desc,
+                                struct nlmsghdr *nlhdr)
+{
+       struct afs_interface *this;
+       struct ifaddrmsg *ifa;
+       struct rtattr *rtattr;
+       const char *name;
+       size_t len;
+
+       ifa = (struct ifaddrmsg *) NLMSG_DATA(nlhdr);
+
+       _enter("{ix=%d,af=%d}", ifa->ifa_index, ifa->ifa_family);
+
+       if (ifa->ifa_family != AF_INET) {
+               _leave(" = 0 [family %d]", ifa->ifa_family);
+               return 0;
+       }
+       if (desc->nbufs >= desc->maxbufs) {
+               _leave(" = 0 [max %zu/%zu]", desc->nbufs, desc->maxbufs);
+               return 0;
+       }
+
+       this = &desc->bufs[desc->nbufs];
+
+       this->index = ifa->ifa_index;
+       this->netmask.s_addr = inet_make_mask(ifa->ifa_prefixlen);
+       this->mtu = 0;
+
+       rtattr = NLMSG_DATA(nlhdr) + NLMSG_ALIGN(sizeof(struct ifaddrmsg));
+       len = NLMSG_PAYLOAD(nlhdr, sizeof(struct ifaddrmsg));
+
+       name = "unknown";
+       for (; RTA_OK(rtattr, len); rtattr = RTA_NEXT(rtattr, len)) {
+               switch (rtattr->rta_type) {
+               case IFA_ADDRESS:
+                       memcpy(&this->address, RTA_DATA(rtattr), 4);
+                       break;
+               case IFA_LABEL:
+                       name = RTA_DATA(rtattr);
+                       break;
+               }
+       }
+
+       _debug("%s: "NIPQUAD_FMT"/"NIPQUAD_FMT,
+              name, NIPQUAD(this->address), NIPQUAD(this->netmask));
+
+       desc->nbufs++;
+       _leave(" = 0");
+       return 0;
+}
+
+/*
+ * parse an RTM_GETLINK response for MTUs
+ */
+static int afs_rtm_getlink_if_parse(struct afs_rtm_desc *desc,
+                                   struct nlmsghdr *nlhdr)
+{
+       struct afs_interface *this;
+       struct ifinfomsg *ifi;
+       struct rtattr *rtattr;
+       const char *name;
+       size_t len, loop;
+
+       ifi = (struct ifinfomsg *) NLMSG_DATA(nlhdr);
+
+       _enter("{ix=%d}", ifi->ifi_index);
+
+       for (loop = 0; loop < desc->nbufs; loop++) {
+               this = &desc->bufs[loop];
+               if (this->index == ifi->ifi_index)
+                       goto found;
+       }
+
+       _leave(" = 0 [no match]");
+       return 0;
+
+found:
+       if (ifi->ifi_type == ARPHRD_LOOPBACK && !desc->wantloopback) {
+               _leave(" = 0 [loopback]");
+               return 0;
+       }
+
+       rtattr = NLMSG_DATA(nlhdr) + NLMSG_ALIGN(sizeof(struct ifinfomsg));
+       len = NLMSG_PAYLOAD(nlhdr, sizeof(struct ifinfomsg));
+
+       name = "unknown";
+       for (; RTA_OK(rtattr, len); rtattr = RTA_NEXT(rtattr, len)) {
+               switch (rtattr->rta_type) {
+               case IFLA_MTU:
+                       memcpy(&this->mtu, RTA_DATA(rtattr), 4);
+                       break;
+               case IFLA_IFNAME:
+                       name = RTA_DATA(rtattr);
+                       break;
+               }
+       }
+
+       _debug("%s: "NIPQUAD_FMT"/"NIPQUAD_FMT" mtu %u",
+              name, NIPQUAD(this->address), NIPQUAD(this->netmask),
+              this->mtu);
+
+       _leave(" = 0");
+       return 0;
+}
+
+/*
+ * parse an RTM_GETLINK response for the MAC address belonging to the lowest
+ * non-internal interface
+ */
+static int afs_rtm_getlink_mac_parse(struct afs_rtm_desc *desc,
+                                    struct nlmsghdr *nlhdr)
+{
+       struct ifinfomsg *ifi;
+       struct rtattr *rtattr;
+       const char *name;
+       size_t remain, len;
+       bool set;
+
+       ifi = (struct ifinfomsg *) NLMSG_DATA(nlhdr);
+
+       _enter("{ix=%d}", ifi->ifi_index);
+
+       if (ifi->ifi_index >= desc->mac_index) {
+               _leave(" = 0 [high]");
+               return 0;
+       }
+       if (ifi->ifi_type == ARPHRD_LOOPBACK) {
+               _leave(" = 0 [loopback]");
+               return 0;
+       }
+
+       rtattr = NLMSG_DATA(nlhdr) + NLMSG_ALIGN(sizeof(struct ifinfomsg));
+       remain = NLMSG_PAYLOAD(nlhdr, sizeof(struct ifinfomsg));
+
+       name = "unknown";
+       set = false;
+       for (; RTA_OK(rtattr, remain); rtattr = RTA_NEXT(rtattr, remain)) {
+               switch (rtattr->rta_type) {
+               case IFLA_ADDRESS:
+                       len = RTA_PAYLOAD(rtattr);
+                       memcpy(desc->mac, RTA_DATA(rtattr),
+                              min_t(size_t, len, 6));
+                       desc->mac_index = ifi->ifi_index;
+                       set = true;
+                       break;
+               case IFLA_IFNAME:
+                       name = RTA_DATA(rtattr);
+                       break;
+               }
+       }
+
+       if (set)
+               _debug("%s: %02x:%02x:%02x:%02x:%02x:%02x",
+                      name,
+                      desc->mac[0], desc->mac[1], desc->mac[2],
+                      desc->mac[3], desc->mac[4], desc->mac[5]);
+
+       _leave(" = 0");
+       return 0;
+}
+
+/*
+ * read the rtnetlink response and pass to parsing routine
+ */
+static int afs_read_rtm(struct afs_rtm_desc *desc)
+{
+       struct nlmsghdr *nlhdr, tmphdr;
+       struct msghdr msg;
+       struct kvec iov[1];
+       void *data;
+       bool last = false;
+       int len, ret, remain;
+
+       _enter("");
+
+       do {
+               /* first of all peek to see how big the packet is */
+               memset(&msg, 0, sizeof(msg));
+               iov[0].iov_base = &tmphdr;
+               iov[0].iov_len = sizeof(tmphdr);
+               len = kernel_recvmsg(desc->nlsock, &msg, iov, 1,
+                                    sizeof(tmphdr), MSG_PEEK | MSG_TRUNC);
+               if (len < 0) {
+                       _leave(" = %d [peek]", len);
+                       return len;
+               }
+               if (len == 0)
+                       continue;
+               if (len < sizeof(tmphdr) || len < NLMSG_PAYLOAD(&tmphdr, 0)) {
+                       _leave(" = -EMSGSIZE");
+                       return -EMSGSIZE;
+               }
+
+               if (desc->datamax < len) {
+                       kfree(desc->data);
+                       desc->data = NULL;
+                       data = kmalloc(len, GFP_KERNEL);
+                       if (!data)
+                               return -ENOMEM;
+                       desc->data = data;
+               }
+               desc->datamax = len;
+
+               /* read all the data from this packet */
+               iov[0].iov_base = desc->data;
+               iov[0].iov_len = desc->datamax;
+               desc->datalen = kernel_recvmsg(desc->nlsock, &msg, iov, 1,
+                                              desc->datamax, 0);
+               if (desc->datalen < 0) {
+                       _leave(" = %ld [recv]", desc->datalen);
+                       return desc->datalen;
+               }
+
+               nlhdr = desc->data;
+
+               /* check if the header is valid */
+               if (!NLMSG_OK(nlhdr, desc->datalen) ||
+                   nlhdr->nlmsg_type == NLMSG_ERROR) {
+                       _leave(" = -EIO");
+                       return -EIO;
+               }
+
+               /* see if this is the last message */
+               if (nlhdr->nlmsg_type == NLMSG_DONE ||
+                   !(nlhdr->nlmsg_flags & NLM_F_MULTI))
+                       last = true;
+
+               /* parse the bits we got this time */
+               nlmsg_for_each_msg(nlhdr, desc->data, desc->datalen, remain) {
+                       ret = desc->parse(desc, nlhdr);
+                       if (ret < 0) {
+                               _leave(" = %d [parse]", ret);
+                               return ret;
+                       }
+               }
+
+       } while (!last);
+
+       _leave(" = 0");
+       return 0;
+}
+
+/*
+ * list the interface bound addresses to get the address and netmask
+ */
+static int afs_rtm_getaddr(struct afs_rtm_desc *desc)
+{
+       struct msghdr msg;
+       struct kvec iov[1];
+       int ret;
+
+       struct {
+               struct nlmsghdr nl_msg __attribute__((aligned(NLMSG_ALIGNTO)));
+               struct ifaddrmsg addr_msg __attribute__((aligned(NLMSG_ALIGNTO)));
+       } request;
+
+       _enter("");
+
+       memset(&request, 0, sizeof(request));
+
+       request.nl_msg.nlmsg_len = NLMSG_LENGTH(sizeof(struct ifaddrmsg));
+       request.nl_msg.nlmsg_type = RTM_GETADDR;
+       request.nl_msg.nlmsg_flags = NLM_F_REQUEST | NLM_F_DUMP;
+       request.nl_msg.nlmsg_seq = desc->msg_seq++;
+       request.nl_msg.nlmsg_pid = 0;
+
+       memset(&msg, 0, sizeof(msg));
+       iov[0].iov_base = &request;
+       iov[0].iov_len = sizeof(request);
+
+       ret = kernel_sendmsg(desc->nlsock, &msg, iov, 1, iov[0].iov_len);
+       _leave(" = %d", ret);
+       return ret;
+}
+
+/*
+ * list the interface link statuses to get the MTUs
+ */
+static int afs_rtm_getlink(struct afs_rtm_desc *desc)
+{
+       struct msghdr msg;
+       struct kvec iov[1];
+       int ret;
+
+       struct {
+               struct nlmsghdr nl_msg __attribute__((aligned(NLMSG_ALIGNTO)));
+               struct ifinfomsg link_msg __attribute__((aligned(NLMSG_ALIGNTO)));
+       } request;
+
+       _enter("");
+
+       memset(&request, 0, sizeof(request));
+
+       request.nl_msg.nlmsg_len = NLMSG_LENGTH(sizeof(struct ifinfomsg));
+       request.nl_msg.nlmsg_type = RTM_GETLINK;
+       request.nl_msg.nlmsg_flags = NLM_F_REQUEST | NLM_F_ROOT;
+       request.nl_msg.nlmsg_seq = desc->msg_seq++;
+       request.nl_msg.nlmsg_pid = 0;
+
+       memset(&msg, 0, sizeof(msg));
+       iov[0].iov_base = &request;
+       iov[0].iov_len = sizeof(request);
+
+       ret = kernel_sendmsg(desc->nlsock, &msg, iov, 1, iov[0].iov_len);
+       _leave(" = %d", ret);
+       return ret;
+}
+
+/*
+ * cull any interface records for which there isn't an MTU value
+ */
+static void afs_cull_interfaces(struct afs_rtm_desc *desc)
+{
+       struct afs_interface *bufs = desc->bufs;
+       size_t nbufs = desc->nbufs;
+       int loop, point = 0;
+
+       _enter("{%zu}", nbufs);
+
+       for (loop = 0; loop < nbufs; loop++) {
+               if (desc->bufs[loop].mtu != 0) {
+                       if (loop != point) {
+                               ASSERTCMP(loop, >, point);
+                               bufs[point] = bufs[loop];
+                       }
+                       point++;
+               }
+       }
+
+       desc->nbufs = point;
+       _leave(" [%zu/%zu]", desc->nbufs, nbufs);
+}
+
+/*
+ * get a list of this system's interface IPv4 addresses, netmasks and MTUs
+ * - returns the number of interface records in the buffer
+ */
+int afs_get_ipv4_interfaces(struct afs_interface *bufs, size_t maxbufs,
+                           bool wantloopback)
+{
+       struct afs_rtm_desc desc;
+       int ret, loop;
+
+       _enter("");
+
+       memset(&desc, 0, sizeof(desc));
+       desc.bufs = bufs;
+       desc.maxbufs = maxbufs;
+       desc.wantloopback = wantloopback;
+
+       ret = sock_create_kern(AF_NETLINK, SOCK_DGRAM, NETLINK_ROUTE,
+                              &desc.nlsock);
+       if (ret < 0) {
+               _leave(" = %d [sock]", ret);
+               return ret;
+       }
+
+       /* issue RTM_GETADDR */
+       desc.parse = afs_rtm_getaddr_parse;
+       ret = afs_rtm_getaddr(&desc);
+       if (ret < 0)
+               goto error;
+       ret = afs_read_rtm(&desc);
+       if (ret < 0)
+               goto error;
+
+       /* issue RTM_GETLINK */
+       desc.parse = afs_rtm_getlink_if_parse;
+       ret = afs_rtm_getlink(&desc);
+       if (ret < 0)
+               goto error;
+       ret = afs_read_rtm(&desc);
+       if (ret < 0)
+               goto error;
+
+       afs_cull_interfaces(&desc);
+       ret = desc.nbufs;
+
+       for (loop = 0; loop < ret; loop++)
+               _debug("[%d] "NIPQUAD_FMT"/"NIPQUAD_FMT" mtu %u",
+                      bufs[loop].index,
+                      NIPQUAD(bufs[loop].address),
+                      NIPQUAD(bufs[loop].netmask),
+                      bufs[loop].mtu);
+
+error:
+       kfree(desc.data);
+       sock_release(desc.nlsock);
+       _leave(" = %d", ret);
+       return ret;
+}
+
+/*
+ * get a MAC address from a random ethernet interface that has a real one
+ * - the buffer should be 6 bytes in size
+ */
+int afs_get_MAC_address(u8 mac[6])
+{
+       struct afs_rtm_desc desc;
+       int ret;
+
+       _enter("");
+
+       memset(&desc, 0, sizeof(desc));
+       desc.mac = mac;
+       desc.mac_index = UINT_MAX;
+
+       ret = sock_create_kern(AF_NETLINK, SOCK_DGRAM, NETLINK_ROUTE,
+                              &desc.nlsock);
+       if (ret < 0) {
+               _leave(" = %d [sock]", ret);
+               return ret;
+       }
+
+       /* issue RTM_GETLINK */
+       desc.parse = afs_rtm_getlink_mac_parse;
+       ret = afs_rtm_getlink(&desc);
+       if (ret < 0)
+               goto error;
+       ret = afs_read_rtm(&desc);
+       if (ret < 0)
+               goto error;
+
+       if (desc.mac_index < UINT_MAX) {
+               /* got a MAC address */
+               _debug("[%d] %02x:%02x:%02x:%02x:%02x:%02x",
+                      desc.mac_index,
+                      mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
+       } else {
+               ret = -ENONET;
+       }
+
+error:
+       sock_release(desc.nlsock);
+       _leave(" = %d", ret);
+       return ret;
+}
index 7b0e319..36c1306 100644 (file)
@@ -1,4 +1,4 @@
-/* vlclient.c: AFS Volume Location Service client
+/* AFS Volume Location Service client
  *
  * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
  * Written by David Howells (dhowells@redhat.com)
 
 #include <linux/init.h>
 #include <linux/sched.h>
-#include <rxrpc/rxrpc.h>
-#include <rxrpc/transport.h>
-#include <rxrpc/connection.h>
-#include <rxrpc/call.h>
-#include "server.h"
-#include "volume.h"
-#include "vlclient.h"
-#include "kafsasyncd.h"
-#include "kafstimod.h"
-#include "errors.h"
 #include "internal.h"
 
-#define VLGETENTRYBYID         503     /* AFS Get Cache Entry By ID operation ID */
-#define VLGETENTRYBYNAME       504     /* AFS Get Cache Entry By Name operation ID */
-#define VLPROBE                        514     /* AFS Probe Volume Location Service operation ID */
-
-static void afs_rxvl_get_entry_by_id_attn(struct rxrpc_call *call);
-static void afs_rxvl_get_entry_by_id_error(struct rxrpc_call *call);
-
-/*****************************************************************************/
 /*
- * map afs VL abort codes to/from Linux error codes
- * - called with call->lock held
+ * map volume locator abort codes to error codes
  */
-static void afs_rxvl_aemap(struct rxrpc_call *call)
+static int afs_vl_abort_to_error(u32 abort_code)
 {
-       int err;
-
-       _enter("{%u,%u,%d}",
-              call->app_err_state, call->app_abort_code, call->app_errno);
-
-       switch (call->app_err_state) {
-       case RXRPC_ESTATE_LOCAL_ABORT:
-               call->app_abort_code = -call->app_errno;
-               return;
-
-       case RXRPC_ESTATE_PEER_ABORT:
-               switch (call->app_abort_code) {
-               case AFSVL_IDEXIST:             err = -EEXIST;          break;
-               case AFSVL_IO:                  err = -EREMOTEIO;       break;
-               case AFSVL_NAMEEXIST:           err = -EEXIST;          break;
-               case AFSVL_CREATEFAIL:          err = -EREMOTEIO;       break;
-               case AFSVL_NOENT:               err = -ENOMEDIUM;       break;
-               case AFSVL_EMPTY:               err = -ENOMEDIUM;       break;
-               case AFSVL_ENTDELETED:          err = -ENOMEDIUM;       break;
-               case AFSVL_BADNAME:             err = -EINVAL;          break;
-               case AFSVL_BADINDEX:            err = -EINVAL;          break;
-               case AFSVL_BADVOLTYPE:          err = -EINVAL;          break;
-               case AFSVL_BADSERVER:           err = -EINVAL;          break;
-               case AFSVL_BADPARTITION:        err = -EINVAL;          break;
-               case AFSVL_REPSFULL:            err = -EFBIG;           break;
-               case AFSVL_NOREPSERVER:         err = -ENOENT;          break;
-               case AFSVL_DUPREPSERVER:        err = -EEXIST;          break;
-               case AFSVL_RWNOTFOUND:          err = -ENOENT;          break;
-               case AFSVL_BADREFCOUNT:         err = -EINVAL;          break;
-               case AFSVL_SIZEEXCEEDED:        err = -EINVAL;          break;
-               case AFSVL_BADENTRY:            err = -EINVAL;          break;
-               case AFSVL_BADVOLIDBUMP:        err = -EINVAL;          break;
-               case AFSVL_IDALREADYHASHED:     err = -EINVAL;          break;
-               case AFSVL_ENTRYLOCKED:         err = -EBUSY;           break;
-               case AFSVL_BADVOLOPER:          err = -EBADRQC;         break;
-               case AFSVL_BADRELLOCKTYPE:      err = -EINVAL;          break;
-               case AFSVL_RERELEASE:           err = -EREMOTEIO;       break;
-               case AFSVL_BADSERVERFLAG:       err = -EINVAL;          break;
-               case AFSVL_PERM:                err = -EACCES;          break;
-               case AFSVL_NOMEM:               err = -EREMOTEIO;       break;
-               default:
-                       err = afs_abort_to_error(call->app_abort_code);
-                       break;
-               }
-               call->app_errno = err;
-               return;
-
+       _enter("%u", abort_code);
+
+       switch (abort_code) {
+       case AFSVL_IDEXIST:             return -EEXIST;
+       case AFSVL_IO:                  return -EREMOTEIO;
+       case AFSVL_NAMEEXIST:           return -EEXIST;
+       case AFSVL_CREATEFAIL:          return -EREMOTEIO;
+       case AFSVL_NOENT:               return -ENOMEDIUM;
+       case AFSVL_EMPTY:               return -ENOMEDIUM;
+       case AFSVL_ENTDELETED:          return -ENOMEDIUM;
+       case AFSVL_BADNAME:             return -EINVAL;
+       case AFSVL_BADINDEX:            return -EINVAL;
+       case AFSVL_BADVOLTYPE:          return -EINVAL;
+       case AFSVL_BADSERVER:           return -EINVAL;
+       case AFSVL_BADPARTITION:        return -EINVAL;
+       case AFSVL_REPSFULL:            return -EFBIG;
+       case AFSVL_NOREPSERVER:         return -ENOENT;
+       case AFSVL_DUPREPSERVER:        return -EEXIST;
+       case AFSVL_RWNOTFOUND:          return -ENOENT;
+       case AFSVL_BADREFCOUNT:         return -EINVAL;
+       case AFSVL_SIZEEXCEEDED:        return -EINVAL;
+       case AFSVL_BADENTRY:            return -EINVAL;
+       case AFSVL_BADVOLIDBUMP:        return -EINVAL;
+       case AFSVL_IDALREADYHASHED:     return -EINVAL;
+       case AFSVL_ENTRYLOCKED:         return -EBUSY;
+       case AFSVL_BADVOLOPER:          return -EBADRQC;
+       case AFSVL_BADRELLOCKTYPE:      return -EINVAL;
+       case AFSVL_RERELEASE:           return -EREMOTEIO;
+       case AFSVL_BADSERVERFLAG:       return -EINVAL;
+       case AFSVL_PERM:                return -EACCES;
+       case AFSVL_NOMEM:               return -EREMOTEIO;
        default:
-               return;
+               return afs_abort_to_error(abort_code);
        }
-} /* end afs_rxvl_aemap() */
+}
 
-#if 0
-/*****************************************************************************/
 /*
- * probe a volume location server to see if it is still alive -- unused
+ * deliver reply data to a VL.GetEntryByXXX call
  */
-static int afs_rxvl_probe(struct afs_server *server, int alloc_flags)
+static int afs_deliver_vl_get_entry_by_xxx(struct afs_call *call,
+                                          struct sk_buff *skb, bool last)
 {
-       struct rxrpc_connection *conn;
-       struct rxrpc_call *call;
-       struct kvec piov[1];
-       size_t sent;
-       int ret;
-       __be32 param[1];
-
-       DECLARE_WAITQUEUE(myself, current);
-
-       /* get hold of the vlserver connection */
-       ret = afs_server_get_vlconn(server, &conn);
-       if (ret < 0)
-               goto out;
-
-       /* create a call through that connection */
-       ret = rxrpc_create_call(conn, NULL, NULL, afs_rxvl_aemap, &call);
-       if (ret < 0) {
-               printk("kAFS: Unable to create call: %d\n", ret);
-               goto out_put_conn;
-       }
-       call->app_opcode = VLPROBE;
-
-       /* we want to get event notifications from the call */
-       add_wait_queue(&call->waitq, &myself);
-
-       /* marshall the parameters */
-       param[0] = htonl(VLPROBE);
-       piov[0].iov_len = sizeof(param);
-       piov[0].iov_base = param;
-
-       /* send the parameters to the server */
-       ret = rxrpc_call_write_data(call, 1, piov, RXRPC_LAST_PACKET,
-                                   alloc_flags, 0, &sent);
-       if (ret < 0)
-               goto abort;
-
-       /* wait for the reply to completely arrive */
-       for (;;) {
-               set_current_state(TASK_INTERRUPTIBLE);
-               if (call->app_call_state != RXRPC_CSTATE_CLNT_RCV_REPLY ||
-                   signal_pending(current))
-                       break;
-               schedule();
-       }
-       set_current_state(TASK_RUNNING);
-
-       ret = -EINTR;
-       if (signal_pending(current))
-               goto abort;
-
-       switch (call->app_call_state) {
-       case RXRPC_CSTATE_ERROR:
-               ret = call->app_errno;
-               goto out_unwait;
-
-       case RXRPC_CSTATE_CLNT_GOT_REPLY:
-               ret = 0;
-               goto out_unwait;
-
-       default:
-               BUG();
-       }
-
- abort:
-       set_current_state(TASK_UNINTERRUPTIBLE);
-       rxrpc_call_abort(call, ret);
-       schedule();
-
- out_unwait:
-       set_current_state(TASK_RUNNING);
-       remove_wait_queue(&call->waitq, &myself);
-       rxrpc_put_call(call);
- out_put_conn:
-       rxrpc_put_connection(conn);
- out:
-       return ret;
+       struct afs_cache_vlocation *entry;
+       __be32 *bp;
+       u32 tmp;
+       int loop;
 
-} /* end afs_rxvl_probe() */
-#endif
+       _enter(",,%u", last);
 
-/*****************************************************************************/
-/*
- * look up a volume location database entry by name
- */
-int afs_rxvl_get_entry_by_name(struct afs_server *server,
-                              const char *volname,
-                              unsigned volnamesz,
-                              struct afs_cache_vlocation *entry)
-{
-       DECLARE_WAITQUEUE(myself, current);
-
-       struct rxrpc_connection *conn;
-       struct rxrpc_call *call;
-       struct kvec piov[3];
-       unsigned tmp;
-       size_t sent;
-       int ret, loop;
-       __be32 *bp, param[2], zero;
-
-       _enter(",%*.*s,%u,", volnamesz, volnamesz, volname, volnamesz);
-
-       memset(entry, 0, sizeof(*entry));
-
-       /* get hold of the vlserver connection */
-       ret = afs_server_get_vlconn(server, &conn);
-       if (ret < 0)
-               goto out;
-
-       /* create a call through that connection */
-       ret = rxrpc_create_call(conn, NULL, NULL, afs_rxvl_aemap, &call);
-       if (ret < 0) {
-               printk("kAFS: Unable to create call: %d\n", ret);
-               goto out_put_conn;
-       }
-       call->app_opcode = VLGETENTRYBYNAME;
+       afs_transfer_reply(call, skb);
+       if (!last)
+               return 0;
 
-       /* we want to get event notifications from the call */
-       add_wait_queue(&call->waitq, &myself);
+       if (call->reply_size != call->reply_max)
+               return -EBADMSG;
 
-       /* marshall the parameters */
-       piov[1].iov_len = volnamesz;
-       piov[1].iov_base = (char *) volname;
-
-       zero = 0;
-       piov[2].iov_len = (4 - (piov[1].iov_len & 3)) & 3;
-       piov[2].iov_base = &zero;
-
-       param[0] = htonl(VLGETENTRYBYNAME);
-       param[1] = htonl(piov[1].iov_len);
-
-       piov[0].iov_len = sizeof(param);
-       piov[0].iov_base = param;
-
-       /* send the parameters to the server */
-       ret = rxrpc_call_write_data(call, 3, piov, RXRPC_LAST_PACKET, GFP_NOFS,
-                                   0, &sent);
-       if (ret < 0)
-               goto abort;
-
-       /* wait for the reply to completely arrive */
-       bp = rxrpc_call_alloc_scratch(call, 384);
-
-       ret = rxrpc_call_read_data(call, bp, 384,
-                                  RXRPC_CALL_READ_BLOCK |
-                                  RXRPC_CALL_READ_ALL);
-       if (ret < 0) {
-               if (ret == -ECONNABORTED) {
-                       ret = call->app_errno;
-                       goto out_unwait;
-               }
-               goto abort;
-       }
+       /* unmarshall the reply once we've received all of it */
+       entry = call->reply;
+       bp = call->buffer;
 
-       /* unmarshall the reply */
        for (loop = 0; loop < 64; loop++)
                entry->name[loop] = ntohl(*bp++);
+       entry->name[loop] = 0;
        bp++; /* final NUL */
 
        bp++; /* type */
@@ -264,6 +93,7 @@ int afs_rxvl_get_entry_by_name(struct afs_server *server,
 
        for (loop = 0; loop < 8; loop++) {
                tmp = ntohl(*bp++);
+               entry->srvtmask[loop] = 0;
                if (tmp & AFS_VLSF_RWVOL)
                        entry->srvtmask[loop] |= AFS_VOL_VTM_RW;
                if (tmp & AFS_VLSF_ROVOL)
@@ -279,417 +109,110 @@ int afs_rxvl_get_entry_by_name(struct afs_server *server,
        bp++; /* clone ID */
 
        tmp = ntohl(*bp++); /* flags */
+       entry->vidmask = 0;
        if (tmp & AFS_VLF_RWEXISTS)
                entry->vidmask |= AFS_VOL_VTM_RW;
        if (tmp & AFS_VLF_ROEXISTS)
                entry->vidmask |= AFS_VOL_VTM_RO;
        if (tmp & AFS_VLF_BACKEXISTS)
                entry->vidmask |= AFS_VOL_VTM_BAK;
-
-       ret = -ENOMEDIUM;
        if (!entry->vidmask)
-               goto abort;
-
-       /* success */
-       entry->rtime = get_seconds();
-       ret = 0;
-
- out_unwait:
-       set_current_state(TASK_RUNNING);
-       remove_wait_queue(&call->waitq, &myself);
-       rxrpc_put_call(call);
- out_put_conn:
-       rxrpc_put_connection(conn);
- out:
-       _leave(" = %d", ret);
-       return ret;
-
- abort:
-       set_current_state(TASK_UNINTERRUPTIBLE);
-       rxrpc_call_abort(call, ret);
-       schedule();
-       goto out_unwait;
-} /* end afs_rxvl_get_entry_by_name() */
-
-/*****************************************************************************/
-/*
- * look up a volume location database entry by ID
- */
-int afs_rxvl_get_entry_by_id(struct afs_server *server,
-                            afs_volid_t volid,
-                            afs_voltype_t voltype,
-                            struct afs_cache_vlocation *entry)
-{
-       DECLARE_WAITQUEUE(myself, current);
-
-       struct rxrpc_connection *conn;
-       struct rxrpc_call *call;
-       struct kvec piov[1];
-       unsigned tmp;
-       size_t sent;
-       int ret, loop;
-       __be32 *bp, param[3];
-
-       _enter(",%x,%d,", volid, voltype);
-
-       memset(entry, 0, sizeof(*entry));
-
-       /* get hold of the vlserver connection */
-       ret = afs_server_get_vlconn(server, &conn);
-       if (ret < 0)
-               goto out;
-
-       /* create a call through that connection */
-       ret = rxrpc_create_call(conn, NULL, NULL, afs_rxvl_aemap, &call);
-       if (ret < 0) {
-               printk("kAFS: Unable to create call: %d\n", ret);
-               goto out_put_conn;
-       }
-       call->app_opcode = VLGETENTRYBYID;
-
-       /* we want to get event notifications from the call */
-       add_wait_queue(&call->waitq, &myself);
-
-       /* marshall the parameters */
-       param[0] = htonl(VLGETENTRYBYID);
-       param[1] = htonl(volid);
-       param[2] = htonl(voltype);
-
-       piov[0].iov_len = sizeof(param);
-       piov[0].iov_base = param;
-
-       /* send the parameters to the server */
-       ret = rxrpc_call_write_data(call, 1, piov, RXRPC_LAST_PACKET, GFP_NOFS,
-                                   0, &sent);
-       if (ret < 0)
-               goto abort;
-
-       /* wait for the reply to completely arrive */
-       bp = rxrpc_call_alloc_scratch(call, 384);
-
-       ret = rxrpc_call_read_data(call, bp, 384,
-                                  RXRPC_CALL_READ_BLOCK |
-                                  RXRPC_CALL_READ_ALL);
-       if (ret < 0) {
-               if (ret == -ECONNABORTED) {
-                       ret = call->app_errno;
-                       goto out_unwait;
-               }
-               goto abort;
-       }
-
-       /* unmarshall the reply */
-       for (loop = 0; loop < 64; loop++)
-               entry->name[loop] = ntohl(*bp++);
-       bp++; /* final NUL */
+               return -EBADMSG;
 
-       bp++; /* type */
-       entry->nservers = ntohl(*bp++);
-
-       for (loop = 0; loop < 8; loop++)
-               entry->servers[loop].s_addr = *bp++;
-
-       bp += 8; /* partition IDs */
+       _leave(" = 0 [done]");
+       return 0;
+}
 
-       for (loop = 0; loop < 8; loop++) {
-               tmp = ntohl(*bp++);
-               if (tmp & AFS_VLSF_RWVOL)
-                       entry->srvtmask[loop] |= AFS_VOL_VTM_RW;
-               if (tmp & AFS_VLSF_ROVOL)
-                       entry->srvtmask[loop] |= AFS_VOL_VTM_RO;
-               if (tmp & AFS_VLSF_BACKVOL)
-                       entry->srvtmask[loop] |= AFS_VOL_VTM_BAK;
-       }
-
-       entry->vid[0] = ntohl(*bp++);
-       entry->vid[1] = ntohl(*bp++);
-       entry->vid[2] = ntohl(*bp++);
-
-       bp++; /* clone ID */
-
-       tmp = ntohl(*bp++); /* flags */
-       if (tmp & AFS_VLF_RWEXISTS)
-               entry->vidmask |= AFS_VOL_VTM_RW;
-       if (tmp & AFS_VLF_ROEXISTS)
-               entry->vidmask |= AFS_VOL_VTM_RO;
-       if (tmp & AFS_VLF_BACKEXISTS)
-               entry->vidmask |= AFS_VOL_VTM_BAK;
-
-       ret = -ENOMEDIUM;
-       if (!entry->vidmask)
-               goto abort;
-
-#if 0 /* TODO: remove */
-       entry->nservers = 3;
-       entry->servers[0].s_addr = htonl(0xac101249);
-       entry->servers[1].s_addr = htonl(0xac101243);
-       entry->servers[2].s_addr = htonl(0xac10125b /*0xac10125b*/);
-
-       entry->srvtmask[0] = AFS_VOL_VTM_RO;
-       entry->srvtmask[1] = AFS_VOL_VTM_RO;
-       entry->srvtmask[2] = AFS_VOL_VTM_RO | AFS_VOL_VTM_RW;
-#endif
-
-       /* success */
-       entry->rtime = get_seconds();
-       ret = 0;
-
- out_unwait:
-       set_current_state(TASK_RUNNING);
-       remove_wait_queue(&call->waitq, &myself);
-       rxrpc_put_call(call);
- out_put_conn:
-       rxrpc_put_connection(conn);
- out:
-       _leave(" = %d", ret);
-       return ret;
-
- abort:
-       set_current_state(TASK_UNINTERRUPTIBLE);
-       rxrpc_call_abort(call, ret);
-       schedule();
-       goto out_unwait;
-} /* end afs_rxvl_get_entry_by_id() */
-
-/*****************************************************************************/
 /*
- * look up a volume location database entry by ID asynchronously
+ * VL.GetEntryByName operation type
  */
-int afs_rxvl_get_entry_by_id_async(struct afs_async_op *op,
-                                  afs_volid_t volid,
-                                  afs_voltype_t voltype)
-{
-       struct rxrpc_connection *conn;
-       struct rxrpc_call *call;
-       struct kvec piov[1];
-       size_t sent;
-       int ret;
-       __be32 param[3];
-
-       _enter(",%x,%d,", volid, voltype);
-
-       /* get hold of the vlserver connection */
-       ret = afs_server_get_vlconn(op->server, &conn);
-       if (ret < 0) {
-               _leave(" = %d", ret);
-               return ret;
-       }
-
-       /* create a call through that connection */
-       ret = rxrpc_create_call(conn,
-                               afs_rxvl_get_entry_by_id_attn,
-                               afs_rxvl_get_entry_by_id_error,
-                               afs_rxvl_aemap,
-                               &op->call);
-       rxrpc_put_connection(conn);
-
-       if (ret < 0) {
-               printk("kAFS: Unable to create call: %d\n", ret);
-               _leave(" = %d", ret);
-               return ret;
-       }
+static const struct afs_call_type afs_RXVLGetEntryByName = {
+       .name           = "VL.GetEntryByName",
+       .deliver        = afs_deliver_vl_get_entry_by_xxx,
+       .abort_to_error = afs_vl_abort_to_error,
+       .destructor     = afs_flat_call_destructor,
+};
 
-       op->call->app_opcode = VLGETENTRYBYID;
-       op->call->app_user = op;
-
-       call = op->call;
-       rxrpc_get_call(call);
-
-       /* send event notifications from the call to kafsasyncd */
-       afs_kafsasyncd_begin_op(op);
-
-       /* marshall the parameters */
-       param[0] = htonl(VLGETENTRYBYID);
-       param[1] = htonl(volid);
-       param[2] = htonl(voltype);
-
-       piov[0].iov_len = sizeof(param);
-       piov[0].iov_base = param;
-
-       /* allocate result read buffer in scratch space */
-       call->app_scr_ptr = rxrpc_call_alloc_scratch(op->call, 384);
-
-       /* send the parameters to the server */
-       ret = rxrpc_call_write_data(call, 1, piov, RXRPC_LAST_PACKET, GFP_NOFS,
-                                   0, &sent);
-       if (ret < 0) {
-               rxrpc_call_abort(call, ret); /* handle from kafsasyncd */
-               ret = 0;
-               goto out;
-       }
-
-       /* wait for the reply to completely arrive */
-       ret = rxrpc_call_read_data(call, call->app_scr_ptr, 384, 0);
-       switch (ret) {
-       case 0:
-       case -EAGAIN:
-       case -ECONNABORTED:
-               ret = 0;
-               break;  /* all handled by kafsasyncd */
-
-       default:
-               rxrpc_call_abort(call, ret); /* make kafsasyncd handle it */
-               ret = 0;
-               break;
-       }
-
- out:
-       rxrpc_put_call(call);
-       _leave(" = %d", ret);
-       return ret;
-
-} /* end afs_rxvl_get_entry_by_id_async() */
+/*
+ * VL.GetEntryById operation type
+ */
+static const struct afs_call_type afs_RXVLGetEntryById = {
+       .name           = "VL.GetEntryById",
+       .deliver        = afs_deliver_vl_get_entry_by_xxx,
+       .abort_to_error = afs_vl_abort_to_error,
+       .destructor     = afs_flat_call_destructor,
+};
 
-/*****************************************************************************/
 /*
- * attend to the asynchronous get VLDB entry by ID
+ * dispatch a get volume entry by name operation
  */
-int afs_rxvl_get_entry_by_id_async2(struct afs_async_op *op,
-                                   struct afs_cache_vlocation *entry)
+int afs_vl_get_entry_by_name(struct in_addr *addr,
+                            struct key *key,
+                            const char *volname,
+                            struct afs_cache_vlocation *entry,
+                            const struct afs_wait_mode *wait_mode)
 {
+       struct afs_call *call;
+       size_t volnamesz, reqsz, padsz;
        __be32 *bp;
-       __u32 tmp;
-       int loop, ret;
-
-       _enter("{op=%p cst=%u}", op, op->call->app_call_state);
-
-       memset(entry, 0, sizeof(*entry));
-
-       if (op->call->app_call_state == RXRPC_CSTATE_COMPLETE) {
-               /* operation finished */
-               afs_kafsasyncd_terminate_op(op);
-
-               bp = op->call->app_scr_ptr;
-
-               /* unmarshall the reply */
-               for (loop = 0; loop < 64; loop++)
-                       entry->name[loop] = ntohl(*bp++);
-               bp++; /* final NUL */
-
-               bp++; /* type */
-               entry->nservers = ntohl(*bp++);
-
-               for (loop = 0; loop < 8; loop++)
-                       entry->servers[loop].s_addr = *bp++;
-
-               bp += 8; /* partition IDs */
-
-               for (loop = 0; loop < 8; loop++) {
-                       tmp = ntohl(*bp++);
-                       if (tmp & AFS_VLSF_RWVOL)
-                               entry->srvtmask[loop] |= AFS_VOL_VTM_RW;
-                       if (tmp & AFS_VLSF_ROVOL)
-                               entry->srvtmask[loop] |= AFS_VOL_VTM_RO;
-                       if (tmp & AFS_VLSF_BACKVOL)
-                               entry->srvtmask[loop] |= AFS_VOL_VTM_BAK;
-               }
-
-               entry->vid[0] = ntohl(*bp++);
-               entry->vid[1] = ntohl(*bp++);
-               entry->vid[2] = ntohl(*bp++);
-
-               bp++; /* clone ID */
-
-               tmp = ntohl(*bp++); /* flags */
-               if (tmp & AFS_VLF_RWEXISTS)
-                       entry->vidmask |= AFS_VOL_VTM_RW;
-               if (tmp & AFS_VLF_ROEXISTS)
-                       entry->vidmask |= AFS_VOL_VTM_RO;
-               if (tmp & AFS_VLF_BACKEXISTS)
-                       entry->vidmask |= AFS_VOL_VTM_BAK;
-
-               ret = -ENOMEDIUM;
-               if (!entry->vidmask) {
-                       rxrpc_call_abort(op->call, ret);
-                       goto done;
-               }
-
-#if 0 /* TODO: remove */
-               entry->nservers = 3;
-               entry->servers[0].s_addr = htonl(0xac101249);
-               entry->servers[1].s_addr = htonl(0xac101243);
-               entry->servers[2].s_addr = htonl(0xac10125b /*0xac10125b*/);
-
-               entry->srvtmask[0] = AFS_VOL_VTM_RO;
-               entry->srvtmask[1] = AFS_VOL_VTM_RO;
-               entry->srvtmask[2] = AFS_VOL_VTM_RO | AFS_VOL_VTM_RW;
-#endif
-
-               /* success */
-               entry->rtime = get_seconds();
-               ret = 0;
-               goto done;
-       }
 
-       if (op->call->app_call_state == RXRPC_CSTATE_ERROR) {
-               /* operation error */
-               ret = op->call->app_errno;
-               goto done;
-       }
+       _enter("");
 
-       _leave(" = -EAGAIN");
-       return -EAGAIN;
+       volnamesz = strlen(volname);
+       padsz = (4 - (volnamesz & 3)) & 3;
+       reqsz = 8 + volnamesz + padsz;
 
- done:
-       rxrpc_put_call(op->call);
-       op->call = NULL;
-       _leave(" = %d", ret);
-       return ret;
-} /* end afs_rxvl_get_entry_by_id_async2() */
+       call = afs_alloc_flat_call(&afs_RXVLGetEntryByName, reqsz, 384);
+       if (!call)
+               return -ENOMEM;
 
-/*****************************************************************************/
-/*
- * handle attention events on an async get-entry-by-ID op
- * - called from krxiod
- */
-static void afs_rxvl_get_entry_by_id_attn(struct rxrpc_call *call)
-{
-       struct afs_async_op *op = call->app_user;
-
-       _enter("{op=%p cst=%u}", op, call->app_call_state);
-
-       switch (call->app_call_state) {
-       case RXRPC_CSTATE_COMPLETE:
-               afs_kafsasyncd_attend_op(op);
-               break;
-       case RXRPC_CSTATE_CLNT_RCV_REPLY:
-               if (call->app_async_read)
-                       break;
-       case RXRPC_CSTATE_CLNT_GOT_REPLY:
-               if (call->app_read_count == 0)
-                       break;
-               printk("kAFS: Reply bigger than expected"
-                      " {cst=%u asyn=%d mark=%Zu rdy=%Zu pr=%u%s}",
-                      call->app_call_state,
-                      call->app_async_read,
-                      call->app_mark,
-                      call->app_ready_qty,
-                      call->pkt_rcv_count,
-                      call->app_last_rcv ? " last" : "");
-
-               rxrpc_call_abort(call, -EBADMSG);
-               break;
-       default:
-               BUG();
-       }
+       call->key = key;
+       call->reply = entry;
+       call->service_id = VL_SERVICE;
+       call->port = htons(AFS_VL_PORT);
 
-       _leave("");
+       /* marshall the parameters */
+       bp = call->request;
+       *bp++ = htonl(VLGETENTRYBYNAME);
+       *bp++ = htonl(volnamesz);
+       memcpy(bp, volname, volnamesz);
+       if (padsz > 0)
+               memset((void *) bp + volnamesz, 0, padsz);
 
-} /* end afs_rxvl_get_entry_by_id_attn() */
+       /* initiate the call */
+       return afs_make_call(addr, call, GFP_KERNEL, wait_mode);
+}
 
-/*****************************************************************************/
 /*
- * handle error events on an async get-entry-by-ID op
- * - called from krxiod
+ * dispatch a get volume entry by ID operation
  */
-static void afs_rxvl_get_entry_by_id_error(struct rxrpc_call *call)
+int afs_vl_get_entry_by_id(struct in_addr *addr,
+                          struct key *key,
+                          afs_volid_t volid,
+                          afs_voltype_t voltype,
+                          struct afs_cache_vlocation *entry,
+                          const struct afs_wait_mode *wait_mode)
 {
-       struct afs_async_op *op = call->app_user;
+       struct afs_call *call;
+       __be32 *bp;
 
-       _enter("{op=%p cst=%u}", op, call->app_call_state);
+       _enter("");
 
-       afs_kafsasyncd_attend_op(op);
+       call = afs_alloc_flat_call(&afs_RXVLGetEntryById, 12, 384);
+       if (!call)
+               return -ENOMEM;
 
-       _leave("");
+       call->key = key;
+       call->reply = entry;
+       call->service_id = VL_SERVICE;
+       call->port = htons(AFS_VL_PORT);
 
-} /* end afs_rxvl_get_entry_by_id_error() */
+       /* marshall the parameters */
+       bp = call->request;
+       *bp++ = htonl(VLGETENTRYBYID);
+       *bp++ = htonl(volid);
+       *bp   = htonl(voltype);
+
+       /* initiate the call */
+       return afs_make_call(addr, call, GFP_KERNEL, wait_mode);
+}
index 782ee7c..74cce17 100644 (file)
@@ -1,6 +1,6 @@
-/* vlocation.c: volume location management
+/* AFS volume location management
  *
- * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2002, 2007 Red Hat, Inc. All Rights Reserved.
  * Written by David Howells (dhowells@redhat.com)
  *
  * This program is free software; you can redistribute it and/or
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/fs.h>
-#include <linux/pagemap.h>
-#include "volume.h"
-#include "cell.h"
-#include "cmservice.h"
-#include "fsclient.h"
-#include "vlclient.h"
-#include "kafstimod.h"
-#include <rxrpc/connection.h>
 #include "internal.h"
 
-#define AFS_VLDB_TIMEOUT HZ*1000
+unsigned afs_vlocation_timeout = 10;   /* volume location timeout in seconds */
+unsigned afs_vlocation_update_timeout = 10 * 60;
 
-static void afs_vlocation_update_timer(struct afs_timer *timer);
-static void afs_vlocation_update_attend(struct afs_async_op *op);
-static void afs_vlocation_update_discard(struct afs_async_op *op);
-static void __afs_put_vlocation(struct afs_vlocation *vlocation);
+static void afs_vlocation_reaper(struct work_struct *);
+static void afs_vlocation_updater(struct work_struct *);
 
-static void __afs_vlocation_timeout(struct afs_timer *timer)
-{
-       struct afs_vlocation *vlocation =
-               list_entry(timer, struct afs_vlocation, timeout);
-
-       _debug("VL TIMEOUT [%s{u=%d}]",
-              vlocation->vldb.name, atomic_read(&vlocation->usage));
-
-       afs_vlocation_do_timeout(vlocation);
-}
-
-static const struct afs_timer_ops afs_vlocation_timer_ops = {
-       .timed_out      = __afs_vlocation_timeout,
-};
+static LIST_HEAD(afs_vlocation_updates);
+static LIST_HEAD(afs_vlocation_graveyard);
+static DEFINE_SPINLOCK(afs_vlocation_updates_lock);
+static DEFINE_SPINLOCK(afs_vlocation_graveyard_lock);
+static DECLARE_DELAYED_WORK(afs_vlocation_reap, afs_vlocation_reaper);
+static DECLARE_DELAYED_WORK(afs_vlocation_update, afs_vlocation_updater);
+static struct workqueue_struct *afs_vlocation_update_worker;
 
-static const struct afs_timer_ops afs_vlocation_update_timer_ops = {
-       .timed_out      = afs_vlocation_update_timer,
-};
-
-static const struct afs_async_op_ops afs_vlocation_update_op_ops = {
-       .attend         = afs_vlocation_update_attend,
-       .discard        = afs_vlocation_update_discard,
-};
-
-static LIST_HEAD(afs_vlocation_update_pendq);  /* queue of VLs awaiting update */
-static struct afs_vlocation *afs_vlocation_update;     /* VL currently being updated */
-static DEFINE_SPINLOCK(afs_vlocation_update_lock); /* lock guarding update queue */
-
-#ifdef AFS_CACHING_SUPPORT
-static cachefs_match_val_t afs_vlocation_cache_match(void *target,
-                                                    const void *entry);
-static void afs_vlocation_cache_update(void *source, void *entry);
-
-struct cachefs_index_def afs_vlocation_cache_index_def = {
-       .name           = "vldb",
-       .data_size      = sizeof(struct afs_cache_vlocation),
-       .keys[0]        = { CACHEFS_INDEX_KEYS_ASCIIZ, 64 },
-       .match          = afs_vlocation_cache_match,
-       .update         = afs_vlocation_cache_update,
-};
-#endif
-
-/*****************************************************************************/
 /*
  * iterate through the VL servers in a cell until one of them admits knowing
  * about the volume in question
- * - caller must have cell->vl_sem write-locked
  */
-static int afs_vlocation_access_vl_by_name(struct afs_vlocation *vlocation,
-                                          const char *name,
-                                          unsigned namesz,
+static int afs_vlocation_access_vl_by_name(struct afs_vlocation *vl,
+                                          struct key *key,
                                           struct afs_cache_vlocation *vldb)
 {
-       struct afs_server *server = NULL;
-       struct afs_cell *cell = vlocation->cell;
+       struct afs_cell *cell = vl->cell;
+       struct in_addr addr;
        int count, ret;
 
-       _enter("%s,%*.*s,%u", cell->name, namesz, namesz, name, namesz);
+       _enter("%s,%s", cell->name, vl->vldb.name);
 
+       down_write(&vl->cell->vl_sem);
        ret = -ENOMEDIUM;
        for (count = cell->vl_naddrs; count > 0; count--) {
-               _debug("CellServ[%hu]: %08x",
-                      cell->vl_curr_svix,
-                      cell->vl_addrs[cell->vl_curr_svix].s_addr);
-
-               /* try and create a server */
-               ret = afs_server_lookup(cell,
-                                       &cell->vl_addrs[cell->vl_curr_svix],
-                                       &server);
-               switch (ret) {
-               case 0:
-                       break;
-               case -ENOMEM:
-               case -ENONET:
-                       goto out;
-               default:
-                       goto rotate;
-               }
+               addr = cell->vl_addrs[cell->vl_curr_svix];
+
+               _debug("CellServ[%hu]: %08x", cell->vl_curr_svix, addr.s_addr);
 
                /* attempt to access the VL server */
-               ret = afs_rxvl_get_entry_by_name(server, name, namesz, vldb);
+               ret = afs_vl_get_entry_by_name(&addr, key, vl->vldb.name, vldb,
+                                              &afs_sync_call);
                switch (ret) {
                case 0:
-                       afs_put_server(server);
                        goto out;
                case -ENOMEM:
                case -ENONET:
                case -ENETUNREACH:
                case -EHOSTUNREACH:
                case -ECONNREFUSED:
-                       down_write(&server->sem);
-                       if (server->vlserver) {
-                               rxrpc_put_connection(server->vlserver);
-                               server->vlserver = NULL;
-                       }
-                       up_write(&server->sem);
-                       afs_put_server(server);
                        if (ret == -ENOMEM || ret == -ENONET)
                                goto out;
                        goto rotate;
                case -ENOMEDIUM:
-                       afs_put_server(server);
                        goto out;
                default:
-                       afs_put_server(server);
-                       ret = -ENOMEDIUM;
+                       ret = -EIO;
                        goto rotate;
                }
 
@@ -146,76 +76,66 @@ static int afs_vlocation_access_vl_by_name(struct afs_vlocation *vlocation,
                cell->vl_curr_svix %= cell->vl_naddrs;
        }
 
- out:
+out:
+       up_write(&vl->cell->vl_sem);
        _leave(" = %d", ret);
        return ret;
+}
 
-} /* end afs_vlocation_access_vl_by_name() */
-
-/*****************************************************************************/
 /*
  * iterate through the VL servers in a cell until one of them admits knowing
  * about the volume in question
- * - caller must have cell->vl_sem write-locked
  */
-static int afs_vlocation_access_vl_by_id(struct afs_vlocation *vlocation,
+static int afs_vlocation_access_vl_by_id(struct afs_vlocation *vl,
+                                        struct key *key,
                                         afs_volid_t volid,
                                         afs_voltype_t voltype,
                                         struct afs_cache_vlocation *vldb)
 {
-       struct afs_server *server = NULL;
-       struct afs_cell *cell = vlocation->cell;
+       struct afs_cell *cell = vl->cell;
+       struct in_addr addr;
        int count, ret;
 
        _enter("%s,%x,%d,", cell->name, volid, voltype);
 
+       down_write(&vl->cell->vl_sem);
        ret = -ENOMEDIUM;
        for (count = cell->vl_naddrs; count > 0; count--) {
-               _debug("CellServ[%hu]: %08x",
-                      cell->vl_curr_svix,
-                      cell->vl_addrs[cell->vl_curr_svix].s_addr);
-
-               /* try and create a server */
-               ret = afs_server_lookup(cell,
-                                       &cell->vl_addrs[cell->vl_curr_svix],
-                                       &server);
-               switch (ret) {
-               case 0:
-                       break;
-               case -ENOMEM:
-               case -ENONET:
-                       goto out;
-               default:
-                       goto rotate;
-               }
+               addr = cell->vl_addrs[cell->vl_curr_svix];
+
+               _debug("CellServ[%hu]: %08x", cell->vl_curr_svix, addr.s_addr);
 
                /* attempt to access the VL server */
-               ret = afs_rxvl_get_entry_by_id(server, volid, voltype, vldb);
+               ret = afs_vl_get_entry_by_id(&addr, key, volid, voltype, vldb,
+                                            &afs_sync_call);
                switch (ret) {
                case 0:
-                       afs_put_server(server);
                        goto out;
                case -ENOMEM:
                case -ENONET:
                case -ENETUNREACH:
                case -EHOSTUNREACH:
                case -ECONNREFUSED:
-                       down_write(&server->sem);
-                       if (server->vlserver) {
-                               rxrpc_put_connection(server->vlserver);
-                               server->vlserver = NULL;
-                       }
-                       up_write(&server->sem);
-                       afs_put_server(server);
                        if (ret == -ENOMEM || ret == -ENONET)
                                goto out;
                        goto rotate;
+               case -EBUSY:
+                       vl->upd_busy_cnt++;
+                       if (vl->upd_busy_cnt <= 3) {
+                               if (vl->upd_busy_cnt > 1) {
+                                       /* second+ BUSY - sleep a little bit */
+                                       set_current_state(TASK_UNINTERRUPTIBLE);
+                                       schedule_timeout(1);
+                                       __set_current_state(TASK_RUNNING);
+                               }
+                               continue;
+                       }
+                       break;
                case -ENOMEDIUM:
-                       afs_put_server(server);
-                       goto out;
+                       vl->upd_rej_cnt++;
+                       goto rotate;
                default:
-                       afs_put_server(server);
-                       ret = -ENOMEDIUM;
+                       ret = -EIO;
                        goto rotate;
                }
 
@@ -223,729 +143,580 @@ static int afs_vlocation_access_vl_by_id(struct afs_vlocation *vlocation,
        rotate:
                cell->vl_curr_svix++;
                cell->vl_curr_svix %= cell->vl_naddrs;
+               vl->upd_busy_cnt = 0;
        }
 
- out:
+out:
+       if (ret < 0 && vl->upd_rej_cnt > 0) {
+               printk(KERN_NOTICE "kAFS:"
+                      " Active volume no longer valid '%s'\n",
+                      vl->vldb.name);
+               vl->valid = 0;
+               ret = -ENOMEDIUM;
+       }
+
+       up_write(&vl->cell->vl_sem);
        _leave(" = %d", ret);
        return ret;
+}
 
-} /* end afs_vlocation_access_vl_by_id() */
-
-/*****************************************************************************/
 /*
- * lookup volume location
- * - caller must have cell->vol_sem write-locked
- * - iterate through the VL servers in a cell until one of them admits knowing
- *   about the volume in question
- * - lookup in the local cache if not able to find on the VL server
- * - insert/update in the local cache if did get a VL response
+ * allocate a volume location record
  */
-int afs_vlocation_lookup(struct afs_cell *cell,
-                        const char *name,
-                        unsigned namesz,
-                        struct afs_vlocation **_vlocation)
+static struct afs_vlocation *afs_vlocation_alloc(struct afs_cell *cell,
+                                                const char *name,
+                                                size_t namesz)
 {
-       struct afs_cache_vlocation vldb;
-       struct afs_vlocation *vlocation;
-       afs_voltype_t voltype;
-       afs_volid_t vid;
-       int active = 0, ret;
-
-       _enter("{%s},%*.*s,%u,", cell->name, namesz, namesz, name, namesz);
-
-       if (namesz > sizeof(vlocation->vldb.name)) {
-               _leave(" = -ENAMETOOLONG");
-               return -ENAMETOOLONG;
-       }
-
-       /* search the cell's active list first */
-       list_for_each_entry(vlocation, &cell->vl_list, link) {
-               if (namesz < sizeof(vlocation->vldb.name) &&
-                   vlocation->vldb.name[namesz] != '\0')
-                       continue;
-
-               if (memcmp(vlocation->vldb.name, name, namesz) == 0)
-                       goto found_in_memory;
-       }
-
-       /* search the cell's graveyard list second */
-       spin_lock(&cell->vl_gylock);
-       list_for_each_entry(vlocation, &cell->vl_graveyard, link) {
-               if (namesz < sizeof(vlocation->vldb.name) &&
-                   vlocation->vldb.name[namesz] != '\0')
-                       continue;
-
-               if (memcmp(vlocation->vldb.name, name, namesz) == 0)
-                       goto found_in_graveyard;
-       }
-       spin_unlock(&cell->vl_gylock);
-
-       /* not in the cell's in-memory lists - create a new record */
-       vlocation = kzalloc(sizeof(struct afs_vlocation), GFP_KERNEL);
-       if (!vlocation)
-               return -ENOMEM;
-
-       atomic_set(&vlocation->usage, 1);
-       INIT_LIST_HEAD(&vlocation->link);
-       rwlock_init(&vlocation->lock);
-       memcpy(vlocation->vldb.name, name, namesz);
-
-       afs_timer_init(&vlocation->timeout, &afs_vlocation_timer_ops);
-       afs_timer_init(&vlocation->upd_timer, &afs_vlocation_update_timer_ops);
-       afs_async_op_init(&vlocation->upd_op, &afs_vlocation_update_op_ops);
-
-       afs_get_cell(cell);
-       vlocation->cell = cell;
-
-       list_add_tail(&vlocation->link, &cell->vl_list);
-
-#ifdef AFS_CACHING_SUPPORT
-       /* we want to store it in the cache, plus it might already be
-        * encached */
-       cachefs_acquire_cookie(cell->cache,
-                              &afs_volume_cache_index_def,
-                              vlocation,
-                              &vlocation->cache);
-
-       if (vlocation->valid)
-               goto found_in_cache;
-#endif
-
-       /* try to look up an unknown volume in the cell VL databases by name */
-       ret = afs_vlocation_access_vl_by_name(vlocation, name, namesz, &vldb);
-       if (ret < 0) {
-               printk("kAFS: failed to locate '%*.*s' in cell '%s'\n",
-                      namesz, namesz, name, cell->name);
-               goto error;
+       struct afs_vlocation *vl;
+
+       vl = kzalloc(sizeof(struct afs_vlocation), GFP_KERNEL);
+       if (vl) {
+               vl->cell = cell;
+               vl->state = AFS_VL_NEW;
+               atomic_set(&vl->usage, 1);
+               INIT_LIST_HEAD(&vl->link);
+               INIT_LIST_HEAD(&vl->grave);
+               INIT_LIST_HEAD(&vl->update);
+               init_waitqueue_head(&vl->waitq);
+               spin_lock_init(&vl->lock);
+               memcpy(vl->vldb.name, name, namesz);
        }
 
-       goto found_on_vlserver;
-
- found_in_graveyard:
-       /* found in the graveyard - resurrect */
-       _debug("found in graveyard");
-       atomic_inc(&vlocation->usage);
-       list_move_tail(&vlocation->link, &cell->vl_list);
-       spin_unlock(&cell->vl_gylock);
-
-       afs_kafstimod_del_timer(&vlocation->timeout);
-       goto active;
-
- found_in_memory:
-       /* found in memory - check to see if it's active */
-       _debug("found in memory");
-       atomic_inc(&vlocation->usage);
+       _leave(" = %p", vl);
+       return vl;
+}
 
- active:
-       active = 1;
+/*
+ * update record if we found it in the cache
+ */
+static int afs_vlocation_update_record(struct afs_vlocation *vl,
+                                      struct key *key,
+                                      struct afs_cache_vlocation *vldb)
+{
+       afs_voltype_t voltype;
+       afs_volid_t vid;
+       int ret;
 
-#ifdef AFS_CACHING_SUPPORT
- found_in_cache:
-#endif
        /* try to look up a cached volume in the cell VL databases by ID */
-       _debug("found in cache");
-
        _debug("Locally Cached: %s %02x { %08x(%x) %08x(%x) %08x(%x) }",
-              vlocation->vldb.name,
-              vlocation->vldb.vidmask,
-              ntohl(vlocation->vldb.servers[0].s_addr),
-              vlocation->vldb.srvtmask[0],
-              ntohl(vlocation->vldb.servers[1].s_addr),
-              vlocation->vldb.srvtmask[1],
-              ntohl(vlocation->vldb.servers[2].s_addr),
-              vlocation->vldb.srvtmask[2]
-              );
+              vl->vldb.name,
+              vl->vldb.vidmask,
+              ntohl(vl->vldb.servers[0].s_addr),
+              vl->vldb.srvtmask[0],
+              ntohl(vl->vldb.servers[1].s_addr),
+              vl->vldb.srvtmask[1],
+              ntohl(vl->vldb.servers[2].s_addr),
+              vl->vldb.srvtmask[2]);
 
        _debug("Vids: %08x %08x %08x",
-              vlocation->vldb.vid[0],
-              vlocation->vldb.vid[1],
-              vlocation->vldb.vid[2]);
+              vl->vldb.vid[0],
+              vl->vldb.vid[1],
+              vl->vldb.vid[2]);
 
-       if (vlocation->vldb.vidmask & AFS_VOL_VTM_RW) {
-               vid = vlocation->vldb.vid[0];
+       if (vl->vldb.vidmask & AFS_VOL_VTM_RW) {
+               vid = vl->vldb.vid[0];
                voltype = AFSVL_RWVOL;
-       }
-       else if (vlocation->vldb.vidmask & AFS_VOL_VTM_RO) {
-               vid = vlocation->vldb.vid[1];
+       } else if (vl->vldb.vidmask & AFS_VOL_VTM_RO) {
+               vid = vl->vldb.vid[1];
                voltype = AFSVL_ROVOL;
-       }
-       else if (vlocation->vldb.vidmask & AFS_VOL_VTM_BAK) {
-               vid = vlocation->vldb.vid[2];
+       } else if (vl->vldb.vidmask & AFS_VOL_VTM_BAK) {
+               vid = vl->vldb.vid[2];
                voltype = AFSVL_BACKVOL;
-       }
-       else {
+       } else {
                BUG();
                vid = 0;
                voltype = 0;
        }
 
-       ret = afs_vlocation_access_vl_by_id(vlocation, vid, voltype, &vldb);
+       /* contact the server to make sure the volume is still available
+        * - TODO: need to handle disconnected operation here
+        */
+       ret = afs_vlocation_access_vl_by_id(vl, key, vid, voltype, vldb);
        switch (ret) {
                /* net error */
        default:
-               printk("kAFS: failed to volume '%*.*s' (%x) up in '%s': %d\n",
-                      namesz, namesz, name, vid, cell->name, ret);
-               goto error;
+               printk(KERN_WARNING "kAFS:"
+                      " failed to update volume '%s' (%x) up in '%s': %d\n",
+                      vl->vldb.name, vid, vl->cell->name, ret);
+               _leave(" = %d", ret);
+               return ret;
 
                /* pulled from local cache into memory */
        case 0:
-               goto found_on_vlserver;
+               _leave(" = 0");
+               return 0;
 
                /* uh oh... looks like the volume got deleted */
        case -ENOMEDIUM:
-               printk("kAFS: volume '%*.*s' (%x) does not exist '%s'\n",
-                      namesz, namesz, name, vid, cell->name);
+               printk(KERN_ERR "kAFS:"
+                      " volume '%s' (%x) does not exist '%s'\n",
+                      vl->vldb.name, vid, vl->cell->name);
 
                /* TODO: make existing record unavailable */
-               goto error;
+               _leave(" = %d", ret);
+               return ret;
        }
+}
 
- found_on_vlserver:
-       _debug("Done VL Lookup: %*.*s %02x { %08x(%x) %08x(%x) %08x(%x) }",
-              namesz, namesz, name,
-              vldb.vidmask,
-              ntohl(vldb.servers[0].s_addr), vldb.srvtmask[0],
-              ntohl(vldb.servers[1].s_addr), vldb.srvtmask[1],
-              ntohl(vldb.servers[2].s_addr), vldb.srvtmask[2]
-              );
-
-       _debug("Vids: %08x %08x %08x", vldb.vid[0], vldb.vid[1], vldb.vid[2]);
+/*
+ * apply the update to a VL record
+ */
+static void afs_vlocation_apply_update(struct afs_vlocation *vl,
+                                      struct afs_cache_vlocation *vldb)
+{
+       _debug("Done VL Lookup: %s %02x { %08x(%x) %08x(%x) %08x(%x) }",
+              vldb->name, vldb->vidmask,
+              ntohl(vldb->servers[0].s_addr), vldb->srvtmask[0],
+              ntohl(vldb->servers[1].s_addr), vldb->srvtmask[1],
+              ntohl(vldb->servers[2].s_addr), vldb->srvtmask[2]);
 
-       if ((namesz < sizeof(vlocation->vldb.name) &&
-            vlocation->vldb.name[namesz] != '\0') ||
-           memcmp(vldb.name, name, namesz) != 0)
-               printk("kAFS: name of volume '%*.*s' changed to '%s' on server\n",
-                      namesz, namesz, name, vldb.name);
+       _debug("Vids: %08x %08x %08x",
+              vldb->vid[0], vldb->vid[1], vldb->vid[2]);
 
-       memcpy(&vlocation->vldb, &vldb, sizeof(vlocation->vldb));
+       if (strcmp(vldb->name, vl->vldb.name) != 0)
+               printk(KERN_NOTICE "kAFS:"
+                      " name of volume '%s' changed to '%s' on server\n",
+                      vl->vldb.name, vldb->name);
 
-       afs_kafstimod_add_timer(&vlocation->upd_timer, 10 * HZ);
+       vl->vldb = *vldb;
 
 #ifdef AFS_CACHING_SUPPORT
        /* update volume entry in local cache */
-       cachefs_update_cookie(vlocation->cache);
-#endif
-
-       *_vlocation = vlocation;
-       _leave(" = 0 (%p)",vlocation);
-       return 0;
-
- error:
-       if (vlocation) {
-               if (active) {
-                       __afs_put_vlocation(vlocation);
-               }
-               else {
-                       list_del(&vlocation->link);
-#ifdef AFS_CACHING_SUPPORT
-                       cachefs_relinquish_cookie(vlocation->cache, 0);
+       cachefs_update_cookie(vl->cache);
 #endif
-                       afs_put_cell(vlocation->cell);
-                       kfree(vlocation);
-               }
-       }
-
-       _leave(" = %d", ret);
-       return ret;
-} /* end afs_vlocation_lookup() */
+}
 
-/*****************************************************************************/
 /*
- * finish using a volume location record
- * - caller must have cell->vol_sem write-locked
+ * fill in a volume location record, consulting the cache and the VL server
+ * both
  */
-static void __afs_put_vlocation(struct afs_vlocation *vlocation)
+static int afs_vlocation_fill_in_record(struct afs_vlocation *vl,
+                                       struct key *key)
 {
-       struct afs_cell *cell;
+       struct afs_cache_vlocation vldb;
+       int ret;
 
-       if (!vlocation)
-               return;
+       _enter("");
 
-       _enter("%s", vlocation->vldb.name);
+       ASSERTCMP(vl->valid, ==, 0);
 
-       cell = vlocation->cell;
+       memset(&vldb, 0, sizeof(vldb));
 
-       /* sanity check */
-       BUG_ON(atomic_read(&vlocation->usage) <= 0);
+       /* see if we have an in-cache copy (will set vl->valid if there is) */
+#ifdef AFS_CACHING_SUPPORT
+       cachefs_acquire_cookie(cell->cache,
+                              &afs_volume_cache_index_def,
+                              vlocation,
+                              &vl->cache);
+#endif
 
-       spin_lock(&cell->vl_gylock);
-       if (likely(!atomic_dec_and_test(&vlocation->usage))) {
-               spin_unlock(&cell->vl_gylock);
-               _leave("");
-               return;
+       if (vl->valid) {
+               /* try to update a known volume in the cell VL databases by
+                * ID as the name may have changed */
+               _debug("found in cache");
+               ret = afs_vlocation_update_record(vl, key, &vldb);
+       } else {
+               /* try to look up an unknown volume in the cell VL databases by
+                * name */
+               ret = afs_vlocation_access_vl_by_name(vl, key, &vldb);
+               if (ret < 0) {
+                       printk("kAFS: failed to locate '%s' in cell '%s'\n",
+                              vl->vldb.name, vl->cell->name);
+                       return ret;
+               }
        }
 
-       /* move to graveyard queue */
-       list_move_tail(&vlocation->link,&cell->vl_graveyard);
-
-       /* remove from pending timeout queue (refcounted if actually being
-        * updated) */
-       list_del_init(&vlocation->upd_op.link);
-
-       /* time out in 10 secs */
-       afs_kafstimod_del_timer(&vlocation->upd_timer);
-       afs_kafstimod_add_timer(&vlocation->timeout, 10 * HZ);
-
-       spin_unlock(&cell->vl_gylock);
-
-       _leave(" [killed]");
-} /* end __afs_put_vlocation() */
-
-/*****************************************************************************/
-/*
- * finish using a volume location record
- */
-void afs_put_vlocation(struct afs_vlocation *vlocation)
-{
-       if (vlocation) {
-               struct afs_cell *cell = vlocation->cell;
-
-               down_write(&cell->vl_sem);
-               __afs_put_vlocation(vlocation);
-               up_write(&cell->vl_sem);
-       }
-} /* end afs_put_vlocation() */
+       afs_vlocation_apply_update(vl, &vldb);
+       _leave(" = 0");
+       return 0;
+}
 
-/*****************************************************************************/
 /*
- * timeout vlocation record
- * - removes from the cell's graveyard if the usage count is zero
+ * queue a vlocation record for updates
  */
-void afs_vlocation_do_timeout(struct afs_vlocation *vlocation)
+void afs_vlocation_queue_for_updates(struct afs_vlocation *vl)
 {
-       struct afs_cell *cell;
+       struct afs_vlocation *xvl;
 
-       _enter("%s", vlocation->vldb.name);
+       /* wait at least 10 minutes before updating... */
+       vl->update_at = get_seconds() + afs_vlocation_update_timeout;
 
-       cell = vlocation->cell;
+       spin_lock(&afs_vlocation_updates_lock);
 
-       BUG_ON(atomic_read(&vlocation->usage) < 0);
-
-       /* remove from graveyard if still dead */
-       spin_lock(&cell->vl_gylock);
-       if (atomic_read(&vlocation->usage) == 0)
-               list_del_init(&vlocation->link);
-       else
-               vlocation = NULL;
-       spin_unlock(&cell->vl_gylock);
-
-       if (!vlocation) {
-               _leave("");
-               return; /* resurrected */
+       if (!list_empty(&afs_vlocation_updates)) {
+               /* ... but wait at least 1 second more than the newest record
+                * already queued so that we don't spam the VL server suddenly
+                * with lots of requests
+                */
+               xvl = list_entry(afs_vlocation_updates.prev,
+                                struct afs_vlocation, update);
+               if (vl->update_at <= xvl->update_at)
+                       vl->update_at = xvl->update_at + 1;
+       } else {
+               queue_delayed_work(afs_vlocation_update_worker,
+                                  &afs_vlocation_update,
+                                  afs_vlocation_update_timeout * HZ);
        }
 
-       /* we can now destroy it properly */
-#ifdef AFS_CACHING_SUPPORT
-       cachefs_relinquish_cookie(vlocation->cache, 0);
-#endif
-       afs_put_cell(cell);
-
-       kfree(vlocation);
-
-       _leave(" [destroyed]");
-} /* end afs_vlocation_do_timeout() */
+       list_add_tail(&vl->update, &afs_vlocation_updates);
+       spin_unlock(&afs_vlocation_updates_lock);
+}
 
-/*****************************************************************************/
 /*
- * send an update operation to the currently selected server
+ * lookup volume location
+ * - iterate through the VL servers in a cell until one of them admits knowing
+ *   about the volume in question
+ * - lookup in the local cache if not able to find on the VL server
+ * - insert/update in the local cache if did get a VL response
  */
-static int afs_vlocation_update_begin(struct afs_vlocation *vlocation)
+struct afs_vlocation *afs_vlocation_lookup(struct afs_cell *cell,
+                                          struct key *key,
+                                          const char *name,
+                                          size_t namesz)
 {
-       afs_voltype_t voltype;
-       afs_volid_t vid;
+       struct afs_vlocation *vl;
        int ret;
 
-       _enter("%s{ufs=%u ucs=%u}",
-              vlocation->vldb.name,
-              vlocation->upd_first_svix,
-              vlocation->upd_curr_svix);
+       _enter("{%s},{%x},%*.*s,%zu",
+              cell->name, key_serial(key),
+              (int) namesz, (int) namesz, name, namesz);
 
-       /* try to look up a cached volume in the cell VL databases by ID */
-       if (vlocation->vldb.vidmask & AFS_VOL_VTM_RW) {
-               vid = vlocation->vldb.vid[0];
-               voltype = AFSVL_RWVOL;
-       }
-       else if (vlocation->vldb.vidmask & AFS_VOL_VTM_RO) {
-               vid = vlocation->vldb.vid[1];
-               voltype = AFSVL_ROVOL;
+       if (namesz > sizeof(vl->vldb.name)) {
+               _leave(" = -ENAMETOOLONG");
+               return ERR_PTR(-ENAMETOOLONG);
        }
-       else if (vlocation->vldb.vidmask & AFS_VOL_VTM_BAK) {
-               vid = vlocation->vldb.vid[2];
-               voltype = AFSVL_BACKVOL;
+
+       /* see if we have an in-memory copy first */
+       down_write(&cell->vl_sem);
+       spin_lock(&cell->vl_lock);
+       list_for_each_entry(vl, &cell->vl_list, link) {
+               if (vl->vldb.name[namesz] != '\0')
+                       continue;
+               if (memcmp(vl->vldb.name, name, namesz) == 0)
+                       goto found_in_memory;
        }
-       else {
-               BUG();
-               vid = 0;
-               voltype = 0;
+       spin_unlock(&cell->vl_lock);
+
+       /* not in the cell's in-memory lists - create a new record */
+       vl = afs_vlocation_alloc(cell, name, namesz);
+       if (!vl) {
+               up_write(&cell->vl_sem);
+               return ERR_PTR(-ENOMEM);
        }
 
-       /* contact the chosen server */
-       ret = afs_server_lookup(
-               vlocation->cell,
-               &vlocation->cell->vl_addrs[vlocation->upd_curr_svix],
-               &vlocation->upd_op.server);
+       afs_get_cell(cell);
 
-       switch (ret) {
-       case 0:
-               break;
-       case -ENOMEM:
-       case -ENONET:
-       default:
-               _leave(" = %d", ret);
-               return ret;
-       }
+       list_add_tail(&vl->link, &cell->vl_list);
+       vl->state = AFS_VL_CREATING;
+       up_write(&cell->vl_sem);
 
-       /* initiate the update operation */
-       ret = afs_rxvl_get_entry_by_id_async(&vlocation->upd_op, vid, voltype);
-       if (ret < 0) {
-               _leave(" = %d", ret);
-               return ret;
+fill_in_record:
+       ret = afs_vlocation_fill_in_record(vl, key);
+       if (ret < 0)
+               goto error_abandon;
+       spin_lock(&vl->lock);
+       vl->state = AFS_VL_VALID;
+       wake_up(&vl->waitq);
+       spin_unlock(&vl->lock);
+
+       /* schedule for regular updates */
+       afs_vlocation_queue_for_updates(vl);
+       goto success;
+
+found_in_memory:
+       /* found in memory */
+       _debug("found in memory");
+       atomic_inc(&vl->usage);
+       spin_unlock(&cell->vl_lock);
+       if (!list_empty(&vl->grave)) {
+               spin_lock(&afs_vlocation_graveyard_lock);
+               list_del_init(&vl->grave);
+               spin_unlock(&afs_vlocation_graveyard_lock);
        }
+       up_write(&cell->vl_sem);
+
+       /* see if it was an abandoned record that we might try filling in */
+       spin_lock(&vl->lock);
+       while (vl->state != AFS_VL_VALID) {
+               afs_vlocation_state_t state = vl->state;
+
+               _debug("invalid [state %d]", state);
+
+               if ((state == AFS_VL_NEW || state == AFS_VL_NO_VOLUME)) {
+                       vl->state = AFS_VL_CREATING;
+                       spin_unlock(&vl->lock);
+                       goto fill_in_record;
+               }
+
+               /* must now wait for creation or update by someone else to
+                * complete */
+               _debug("wait");
 
+               spin_unlock(&vl->lock);
+               ret = wait_event_interruptible(
+                       vl->waitq,
+                       vl->state == AFS_VL_NEW ||
+                       vl->state == AFS_VL_VALID ||
+                       vl->state == AFS_VL_NO_VOLUME);
+               if (ret < 0)
+                       goto error;
+               spin_lock(&vl->lock);
+       }
+       spin_unlock(&vl->lock);
+
+success:
+       _leave(" = %p",vl);
+       return vl;
+
+error_abandon:
+       spin_lock(&vl->lock);
+       vl->state = AFS_VL_NEW;
+       wake_up(&vl->waitq);
+       spin_unlock(&vl->lock);
+error:
+       ASSERT(vl != NULL);
+       afs_put_vlocation(vl);
        _leave(" = %d", ret);
-       return ret;
-} /* end afs_vlocation_update_begin() */
+       return ERR_PTR(ret);
+}
 
-/*****************************************************************************/
 /*
- * abandon updating a VL record
- * - does not restart the update timer
+ * finish using a volume location record
  */
-static void afs_vlocation_update_abandon(struct afs_vlocation *vlocation,
-                                        afs_vlocation_upd_t state,
-                                        int ret)
+void afs_put_vlocation(struct afs_vlocation *vl)
 {
-       _enter("%s,%u", vlocation->vldb.name, state);
-
-       if (ret < 0)
-               printk("kAFS: Abandoning VL update '%s': %d\n",
-                      vlocation->vldb.name, ret);
-
-       /* discard the server record */
-       afs_put_server(vlocation->upd_op.server);
-       vlocation->upd_op.server = NULL;
+       if (!vl)
+               return;
 
-       spin_lock(&afs_vlocation_update_lock);
-       afs_vlocation_update = NULL;
-       vlocation->upd_state = state;
+       _enter("%s", vl->vldb.name);
 
-       /* TODO: start updating next VL record on pending list */
+       ASSERTCMP(atomic_read(&vl->usage), >, 0);
 
-       spin_unlock(&afs_vlocation_update_lock);
+       if (likely(!atomic_dec_and_test(&vl->usage))) {
+               _leave("");
+               return;
+       }
 
-       _leave("");
-} /* end afs_vlocation_update_abandon() */
+       spin_lock(&afs_vlocation_graveyard_lock);
+       if (atomic_read(&vl->usage) == 0) {
+               _debug("buried");
+               list_move_tail(&vl->grave, &afs_vlocation_graveyard);
+               vl->time_of_death = get_seconds();
+               schedule_delayed_work(&afs_vlocation_reap,
+                                     afs_vlocation_timeout * HZ);
+
+               /* suspend updates on this record */
+               if (!list_empty(&vl->update)) {
+                       spin_lock(&afs_vlocation_updates_lock);
+                       list_del_init(&vl->update);
+                       spin_unlock(&afs_vlocation_updates_lock);
+               }
+       }
+       spin_unlock(&afs_vlocation_graveyard_lock);
+       _leave(" [killed?]");
+}
 
-/*****************************************************************************/
 /*
- * handle periodic update timeouts and busy retry timeouts
- * - called from kafstimod
+ * destroy a dead volume location record
  */
-static void afs_vlocation_update_timer(struct afs_timer *timer)
+static void afs_vlocation_destroy(struct afs_vlocation *vl)
 {
-       struct afs_vlocation *vlocation =
-               list_entry(timer, struct afs_vlocation, upd_timer);
-       int ret;
+       _enter("%p", vl);
 
-       _enter("%s", vlocation->vldb.name);
+#ifdef AFS_CACHING_SUPPORT
+       cachefs_relinquish_cookie(vl->cache, 0);
+#endif
 
-       /* only update if not in the graveyard (defend against putting too) */
-       spin_lock(&vlocation->cell->vl_gylock);
+       afs_put_cell(vl->cell);
+       kfree(vl);
+}
 
-       if (!atomic_read(&vlocation->usage))
-               goto out_unlock1;
+/*
+ * reap dead volume location records
+ */
+static void afs_vlocation_reaper(struct work_struct *work)
+{
+       LIST_HEAD(corpses);
+       struct afs_vlocation *vl;
+       unsigned long delay, expiry;
+       time_t now;
 
-       spin_lock(&afs_vlocation_update_lock);
+       _enter("");
 
-       /* if we were woken up due to EBUSY sleep then restart immediately if
-        * possible or else jump to front of pending queue */
-       if (vlocation->upd_state == AFS_VLUPD_BUSYSLEEP) {
-               if (afs_vlocation_update) {
-                       list_add(&vlocation->upd_op.link,
-                                &afs_vlocation_update_pendq);
+       now = get_seconds();
+       spin_lock(&afs_vlocation_graveyard_lock);
+
+       while (!list_empty(&afs_vlocation_graveyard)) {
+               vl = list_entry(afs_vlocation_graveyard.next,
+                               struct afs_vlocation, grave);
+
+               _debug("check %p", vl);
+
+               /* the queue is ordered most dead first */
+               expiry = vl->time_of_death + afs_vlocation_timeout;
+               if (expiry > now) {
+                       delay = (expiry - now) * HZ;
+                       _debug("delay %lu", delay);
+                       if (!schedule_delayed_work(&afs_vlocation_reap,
+                                                  delay)) {
+                               cancel_delayed_work(&afs_vlocation_reap);
+                               schedule_delayed_work(&afs_vlocation_reap,
+                                                     delay);
+                       }
+                       break;
                }
-               else {
-                       afs_get_vlocation(vlocation);
-                       afs_vlocation_update = vlocation;
-                       vlocation->upd_state = AFS_VLUPD_INPROGRESS;
+
+               spin_lock(&vl->cell->vl_lock);
+               if (atomic_read(&vl->usage) > 0) {
+                       _debug("no reap");
+                       list_del_init(&vl->grave);
+               } else {
+                       _debug("reap");
+                       list_move_tail(&vl->grave, &corpses);
+                       list_del_init(&vl->link);
                }
-               goto out_unlock2;
+               spin_unlock(&vl->cell->vl_lock);
        }
 
-       /* put on pending queue if there's already another update in progress */
-       if (afs_vlocation_update) {
-               vlocation->upd_state = AFS_VLUPD_PENDING;
-               list_add_tail(&vlocation->upd_op.link,
-                             &afs_vlocation_update_pendq);
-               goto out_unlock2;
-       }
+       spin_unlock(&afs_vlocation_graveyard_lock);
 
-       /* hold a ref on it while actually updating */
-       afs_get_vlocation(vlocation);
-       afs_vlocation_update = vlocation;
-       vlocation->upd_state = AFS_VLUPD_INPROGRESS;
-
-       spin_unlock(&afs_vlocation_update_lock);
-       spin_unlock(&vlocation->cell->vl_gylock);
-
-       /* okay... we can start the update */
-       _debug("BEGIN VL UPDATE [%s]", vlocation->vldb.name);
-       vlocation->upd_first_svix = vlocation->cell->vl_curr_svix;
-       vlocation->upd_curr_svix = vlocation->upd_first_svix;
-       vlocation->upd_rej_cnt = 0;
-       vlocation->upd_busy_cnt = 0;
-
-       ret = afs_vlocation_update_begin(vlocation);
-       if (ret < 0) {
-               afs_vlocation_update_abandon(vlocation, AFS_VLUPD_SLEEP, ret);
-               afs_kafstimod_add_timer(&vlocation->upd_timer,
-                                       AFS_VLDB_TIMEOUT);
-               afs_put_vlocation(vlocation);
+       /* now reap the corpses we've extracted */
+       while (!list_empty(&corpses)) {
+               vl = list_entry(corpses.next, struct afs_vlocation, grave);
+               list_del(&vl->grave);
+               afs_vlocation_destroy(vl);
        }
 
        _leave("");
-       return;
+}
 
- out_unlock2:
-       spin_unlock(&afs_vlocation_update_lock);
- out_unlock1:
-       spin_unlock(&vlocation->cell->vl_gylock);
-       _leave("");
-       return;
+/*
+ * initialise the VL update process
+ */
+int __init afs_vlocation_update_init(void)
+{
+       afs_vlocation_update_worker =
+               create_singlethread_workqueue("kafs_vlupdated");
+       return afs_vlocation_update_worker ? 0 : -ENOMEM;
+}
 
-} /* end afs_vlocation_update_timer() */
+/*
+ * discard all the volume location records for rmmod
+ */
+void __exit afs_vlocation_purge(void)
+{
+       afs_vlocation_timeout = 0;
+
+       spin_lock(&afs_vlocation_updates_lock);
+       list_del_init(&afs_vlocation_updates);
+       spin_unlock(&afs_vlocation_updates_lock);
+       cancel_delayed_work(&afs_vlocation_update);
+       queue_delayed_work(afs_vlocation_update_worker,
+                          &afs_vlocation_update, 0);
+       destroy_workqueue(afs_vlocation_update_worker);
+
+       cancel_delayed_work(&afs_vlocation_reap);
+       schedule_delayed_work(&afs_vlocation_reap, 0);
+}
 
-/*****************************************************************************/
 /*
- * attend to an update operation upon which an event happened
- * - called in kafsasyncd context
+ * update a volume location
  */
-static void afs_vlocation_update_attend(struct afs_async_op *op)
+static void afs_vlocation_updater(struct work_struct *work)
 {
        struct afs_cache_vlocation vldb;
-       struct afs_vlocation *vlocation =
-               list_entry(op, struct afs_vlocation, upd_op);
-       unsigned tmp;
+       struct afs_vlocation *vl, *xvl;
+       time_t now;
+       long timeout;
        int ret;
 
-       _enter("%s", vlocation->vldb.name);
-
-       ret = afs_rxvl_get_entry_by_id_async2(op, &vldb);
-       switch (ret) {
-       case -EAGAIN:
-               _leave(" [unfinished]");
-               return;
-
-       case 0:
-               _debug("END VL UPDATE: %d\n", ret);
-               vlocation->valid = 1;
-
-               _debug("Done VL Lookup: %02x { %08x(%x) %08x(%x) %08x(%x) }",
-                      vldb.vidmask,
-                      ntohl(vldb.servers[0].s_addr), vldb.srvtmask[0],
-                      ntohl(vldb.servers[1].s_addr), vldb.srvtmask[1],
-                      ntohl(vldb.servers[2].s_addr), vldb.srvtmask[2]
-                      );
-
-               _debug("Vids: %08x %08x %08x",
-                      vldb.vid[0], vldb.vid[1], vldb.vid[2]);
-
-               afs_vlocation_update_abandon(vlocation, AFS_VLUPD_SLEEP, 0);
-
-               down_write(&vlocation->cell->vl_sem);
-
-               /* actually update the cache */
-               if (strncmp(vldb.name, vlocation->vldb.name,
-                           sizeof(vlocation->vldb.name)) != 0)
-                       printk("kAFS: name of volume '%s'"
-                              " changed to '%s' on server\n",
-                              vlocation->vldb.name, vldb.name);
-
-               memcpy(&vlocation->vldb, &vldb, sizeof(vlocation->vldb));
-
-#if 0
-               /* TODO update volume entry in local cache */
-#endif
-
-               up_write(&vlocation->cell->vl_sem);
-
-               if (ret < 0)
-                       printk("kAFS: failed to update local cache: %d\n", ret);
-
-               afs_kafstimod_add_timer(&vlocation->upd_timer,
-                                       AFS_VLDB_TIMEOUT);
-               afs_put_vlocation(vlocation);
-               _leave(" [found]");
-               return;
-
-       case -ENOMEDIUM:
-               vlocation->upd_rej_cnt++;
-               goto try_next;
-
-               /* the server is locked - retry in a very short while */
-       case -EBUSY:
-               vlocation->upd_busy_cnt++;
-               if (vlocation->upd_busy_cnt > 3)
-                       goto try_next; /* too many retries */
-
-               afs_vlocation_update_abandon(vlocation,
-                                            AFS_VLUPD_BUSYSLEEP, 0);
-               afs_kafstimod_add_timer(&vlocation->upd_timer, HZ / 2);
-               afs_put_vlocation(vlocation);
-               _leave(" [busy]");
-               return;
-
-       case -ENETUNREACH:
-       case -EHOSTUNREACH:
-       case -ECONNREFUSED:
-       case -EREMOTEIO:
-               /* record bad vlserver info in the cell too
-                * - TODO: use down_write_trylock() if available
-                */
-               if (vlocation->upd_curr_svix == vlocation->cell->vl_curr_svix)
-                       vlocation->cell->vl_curr_svix =
-                               vlocation->cell->vl_curr_svix %
-                               vlocation->cell->vl_naddrs;
-
-       case -EBADRQC:
-       case -EINVAL:
-       case -EACCES:
-       case -EBADMSG:
-               goto try_next;
-
-       default:
-               goto abandon;
-       }
-
-       /* try contacting the next server */
- try_next:
-       vlocation->upd_busy_cnt = 0;
-
-       /* discard the server record */
-       afs_put_server(vlocation->upd_op.server);
-       vlocation->upd_op.server = NULL;
+       _enter("");
 
-       tmp = vlocation->cell->vl_naddrs;
-       if (tmp == 0)
-               goto abandon;
+       now = get_seconds();
 
-       vlocation->upd_curr_svix++;
-       if (vlocation->upd_curr_svix >= tmp)
-               vlocation->upd_curr_svix = 0;
-       if (vlocation->upd_first_svix >= tmp)
-               vlocation->upd_first_svix = tmp - 1;
+       /* find a record to update */
+       spin_lock(&afs_vlocation_updates_lock);
+       for (;;) {
+               if (list_empty(&afs_vlocation_updates)) {
+                       spin_unlock(&afs_vlocation_updates_lock);
+                       _leave(" [nothing]");
+                       return;
+               }
 
-       /* move to the next server */
-       if (vlocation->upd_curr_svix != vlocation->upd_first_svix) {
-               afs_vlocation_update_begin(vlocation);
-               _leave(" [next]");
-               return;
+               vl = list_entry(afs_vlocation_updates.next,
+                               struct afs_vlocation, update);
+               if (atomic_read(&vl->usage) > 0)
+                       break;
+               list_del_init(&vl->update);
        }
 
-       /* run out of servers to try - was the volume rejected? */
-       if (vlocation->upd_rej_cnt > 0) {
-               printk("kAFS: Active volume no longer valid '%s'\n",
-                      vlocation->vldb.name);
-               vlocation->valid = 0;
-               afs_vlocation_update_abandon(vlocation, AFS_VLUPD_SLEEP, 0);
-               afs_kafstimod_add_timer(&vlocation->upd_timer,
-                                       AFS_VLDB_TIMEOUT);
-               afs_put_vlocation(vlocation);
-               _leave(" [invalidated]");
+       timeout = vl->update_at - now;
+       if (timeout > 0) {
+               queue_delayed_work(afs_vlocation_update_worker,
+                                  &afs_vlocation_update, timeout * HZ);
+               spin_unlock(&afs_vlocation_updates_lock);
+               _leave(" [nothing]");
                return;
        }
 
-       /* abandon the update */
- abandon:
-       afs_vlocation_update_abandon(vlocation, AFS_VLUPD_SLEEP, ret);
-       afs_kafstimod_add_timer(&vlocation->upd_timer, HZ * 10);
-       afs_put_vlocation(vlocation);
-       _leave(" [abandoned]");
-
-} /* end afs_vlocation_update_attend() */
-
-/*****************************************************************************/
-/*
- * deal with an update operation being discarded
- * - called in kafsasyncd context when it's dying due to rmmod
- * - the call has already been aborted and put()'d
- */
-static void afs_vlocation_update_discard(struct afs_async_op *op)
-{
-       struct afs_vlocation *vlocation =
-               list_entry(op, struct afs_vlocation, upd_op);
+       list_del_init(&vl->update);
+       atomic_inc(&vl->usage);
+       spin_unlock(&afs_vlocation_updates_lock);
 
-       _enter("%s", vlocation->vldb.name);
+       /* we can now perform the update */
+       _debug("update %s", vl->vldb.name);
+       vl->state = AFS_VL_UPDATING;
+       vl->upd_rej_cnt = 0;
+       vl->upd_busy_cnt = 0;
 
-       afs_put_server(op->server);
-       op->server = NULL;
+       ret = afs_vlocation_update_record(vl, NULL, &vldb);
+       spin_lock(&vl->lock);
+       switch (ret) {
+       case 0:
+               afs_vlocation_apply_update(vl, &vldb);
+               vl->state = AFS_VL_VALID;
+               wake_up(&vl->waitq);
+               break;
+       case -ENOMEDIUM:
+               vl->state = AFS_VL_VOLUME_DELETED;
+               break;
+       default:
+               vl->state = AFS_VL_UNCERTAIN;
+               break;
+       }
+       spin_unlock(&vl->lock);
 
-       afs_put_vlocation(vlocation);
+       /* and then reschedule */
+       _debug("reschedule");
+       vl->update_at = get_seconds() + afs_vlocation_update_timeout;
 
-       _leave("");
-} /* end afs_vlocation_update_discard() */
+       spin_lock(&afs_vlocation_updates_lock);
 
-/*****************************************************************************/
-/*
- * match a VLDB record stored in the cache
- * - may also load target from entry
- */
-#ifdef AFS_CACHING_SUPPORT
-static cachefs_match_val_t afs_vlocation_cache_match(void *target,
-                                                    const void *entry)
-{
-       const struct afs_cache_vlocation *vldb = entry;
-       struct afs_vlocation *vlocation = target;
-
-       _enter("{%s},{%s}", vlocation->vldb.name, vldb->name);
-
-       if (strncmp(vlocation->vldb.name, vldb->name, sizeof(vldb->name)) == 0
-           ) {
-               if (!vlocation->valid ||
-                   vlocation->vldb.rtime == vldb->rtime
-                   ) {
-                       vlocation->vldb = *vldb;
-                       vlocation->valid = 1;
-                       _leave(" = SUCCESS [c->m]");
-                       return CACHEFS_MATCH_SUCCESS;
-               }
-               /* need to update cache if cached info differs */
-               else if (memcmp(&vlocation->vldb, vldb, sizeof(*vldb)) != 0) {
-                       /* delete if VIDs for this name differ */
-                       if (memcmp(&vlocation->vldb.vid,
-                                  &vldb->vid,
-                                  sizeof(vldb->vid)) != 0) {
-                               _leave(" = DELETE");
-                               return CACHEFS_MATCH_SUCCESS_DELETE;
-                       }
-
-                       _leave(" = UPDATE");
-                       return CACHEFS_MATCH_SUCCESS_UPDATE;
-               }
-               else {
-                       _leave(" = SUCCESS");
-                       return CACHEFS_MATCH_SUCCESS;
-               }
+       if (!list_empty(&afs_vlocation_updates)) {
+               /* next update in 10 minutes, but wait at least 1 second more
+                * than the newest record already queued so that we don't spam
+                * the VL server suddenly with lots of requests
+                */
+               xvl = list_entry(afs_vlocation_updates.prev,
+                                struct afs_vlocation, update);
+               if (vl->update_at <= xvl->update_at)
+                       vl->update_at = xvl->update_at + 1;
+               xvl = list_entry(afs_vlocation_updates.next,
+                                struct afs_vlocation, update);
+               timeout = xvl->update_at - now;
+               if (timeout < 0)
+                       timeout = 0;
+       } else {
+               timeout = afs_vlocation_update_timeout;
        }
 
-       _leave(" = FAILED");
-       return CACHEFS_MATCH_FAILED;
-} /* end afs_vlocation_cache_match() */
-#endif
-
-/*****************************************************************************/
-/*
- * update a VLDB record stored in the cache
- */
-#ifdef AFS_CACHING_SUPPORT
-static void afs_vlocation_cache_update(void *source, void *entry)
-{
-       struct afs_cache_vlocation *vldb = entry;
-       struct afs_vlocation *vlocation = source;
+       ASSERT(list_empty(&vl->update));
 
-       _enter("");
-
-       *vldb = vlocation->vldb;
+       list_add_tail(&vl->update, &afs_vlocation_updates);
 
-} /* end afs_vlocation_cache_update() */
-#endif
+       _debug("timeout %ld", timeout);
+       queue_delayed_work(afs_vlocation_update_worker,
+                          &afs_vlocation_update, timeout * HZ);
+       spin_unlock(&afs_vlocation_updates_lock);
+       afs_put_vlocation(vl);
+}
index cf62da5..a1904ab 100644 (file)
@@ -1,6 +1,6 @@
-/* vnode.c: AFS vnode management
+/* AFS vnode management
  *
- * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2002, 2007 Red Hat, Inc. All Rights Reserved.
  * Written by David Howells (dhowells@redhat.com)
  *
  * This program is free software; you can redistribute it and/or
 #include <linux/init.h>
 #include <linux/slab.h>
 #include <linux/fs.h>
-#include <linux/pagemap.h>
-#include "volume.h"
-#include "cell.h"
-#include "cmservice.h"
-#include "fsclient.h"
-#include "vlclient.h"
-#include "vnode.h"
 #include "internal.h"
 
-static void afs_vnode_cb_timed_out(struct afs_timer *timer);
+#if 0
+static noinline bool dump_tree_aux(struct rb_node *node, struct rb_node *parent,
+                                  int depth, char lr)
+{
+       struct afs_vnode *vnode;
+       bool bad = false;
+
+       if (!node)
+               return false;
+
+       if (node->rb_left)
+               bad = dump_tree_aux(node->rb_left, node, depth + 2, '/');
+
+       vnode = rb_entry(node, struct afs_vnode, cb_promise);
+       _debug("%c %*.*s%c%p {%d}",
+              rb_is_red(node) ? 'R' : 'B',
+              depth, depth, "", lr,
+              vnode, vnode->cb_expires_at);
+       if (rb_parent(node) != parent) {
+               printk("BAD: %p != %p\n", rb_parent(node), parent);
+               bad = true;
+       }
 
-struct afs_timer_ops afs_vnode_cb_timed_out_ops = {
-       .timed_out      = afs_vnode_cb_timed_out,
-};
+       if (node->rb_right)
+               bad |= dump_tree_aux(node->rb_right, node, depth + 2, '\\');
 
-#ifdef AFS_CACHING_SUPPORT
-static cachefs_match_val_t afs_vnode_cache_match(void *target,
-                                                const void *entry);
-static void afs_vnode_cache_update(void *source, void *entry);
+       return bad;
+}
 
-struct cachefs_index_def afs_vnode_cache_index_def = {
-       .name           = "vnode",
-       .data_size      = sizeof(struct afs_cache_vnode),
-       .keys[0]        = { CACHEFS_INDEX_KEYS_BIN, 4 },
-       .match          = afs_vnode_cache_match,
-       .update         = afs_vnode_cache_update,
-};
+static noinline void dump_tree(const char *name, struct afs_server *server)
+{
+       _enter("%s", name);
+       if (dump_tree_aux(server->cb_promises.rb_node, NULL, 0, '-'))
+               BUG();
+}
 #endif
 
-/*****************************************************************************/
 /*
- * handle a callback timing out
- * TODO: retain a ref to vnode struct for an outstanding callback timeout
+ * insert a vnode into the backing server's vnode tree
  */
-static void afs_vnode_cb_timed_out(struct afs_timer *timer)
+static void afs_install_vnode(struct afs_vnode *vnode,
+                             struct afs_server *server)
 {
-       struct afs_server *oldserver;
-       struct afs_vnode *vnode;
+       struct afs_server *old_server = vnode->server;
+       struct afs_vnode *xvnode;
+       struct rb_node *parent, **p;
 
-       vnode = list_entry(timer, struct afs_vnode, cb_timeout);
+       _enter("%p,%p", vnode, server);
 
-       _enter("%p", vnode);
+       if (old_server) {
+               spin_lock(&old_server->fs_lock);
+               rb_erase(&vnode->server_rb, &old_server->fs_vnodes);
+               spin_unlock(&old_server->fs_lock);
+       }
 
-       /* set the changed flag in the vnode and release the server */
-       spin_lock(&vnode->lock);
+       afs_get_server(server);
+       vnode->server = server;
+       afs_put_server(old_server);
+
+       /* insert into the server's vnode tree in FID order */
+       spin_lock(&server->fs_lock);
+
+       parent = NULL;
+       p = &server->fs_vnodes.rb_node;
+       while (*p) {
+               parent = *p;
+               xvnode = rb_entry(parent, struct afs_vnode, server_rb);
+               if (vnode->fid.vid < xvnode->fid.vid)
+                       p = &(*p)->rb_left;
+               else if (vnode->fid.vid > xvnode->fid.vid)
+                       p = &(*p)->rb_right;
+               else if (vnode->fid.vnode < xvnode->fid.vnode)
+                       p = &(*p)->rb_left;
+               else if (vnode->fid.vnode > xvnode->fid.vnode)
+                       p = &(*p)->rb_right;
+               else if (vnode->fid.unique < xvnode->fid.unique)
+                       p = &(*p)->rb_left;
+               else if (vnode->fid.unique > xvnode->fid.unique)
+                       p = &(*p)->rb_right;
+               else
+                       BUG(); /* can't happen unless afs_iget() malfunctions */
+       }
+
+       rb_link_node(&vnode->server_rb, parent, p);
+       rb_insert_color(&vnode->server_rb, &server->fs_vnodes);
 
-       oldserver = xchg(&vnode->cb_server, NULL);
-       if (oldserver) {
-               vnode->flags |= AFS_VNODE_CHANGED;
+       spin_unlock(&server->fs_lock);
+       _leave("");
+}
 
-               spin_lock(&afs_cb_hash_lock);
-               list_del_init(&vnode->cb_hash_link);
-               spin_unlock(&afs_cb_hash_lock);
+/*
+ * insert a vnode into the promising server's update/expiration tree
+ * - caller must hold vnode->lock
+ */
+static void afs_vnode_note_promise(struct afs_vnode *vnode,
+                                  struct afs_server *server)
+{
+       struct afs_server *old_server;
+       struct afs_vnode *xvnode;
+       struct rb_node *parent, **p;
 
-               spin_lock(&oldserver->cb_lock);
-               list_del_init(&vnode->cb_link);
-               spin_unlock(&oldserver->cb_lock);
+       _enter("%p,%p", vnode, server);
+
+       ASSERT(server != NULL);
+
+       old_server = vnode->server;
+       if (vnode->cb_promised) {
+               if (server == old_server &&
+                   vnode->cb_expires == vnode->cb_expires_at) {
+                       _leave(" [no change]");
+                       return;
+               }
+
+               spin_lock(&old_server->cb_lock);
+               if (vnode->cb_promised) {
+                       _debug("delete");
+                       rb_erase(&vnode->cb_promise, &old_server->cb_promises);
+                       vnode->cb_promised = false;
+               }
+               spin_unlock(&old_server->cb_lock);
        }
 
-       spin_unlock(&vnode->lock);
+       if (vnode->server != server)
+               afs_install_vnode(vnode, server);
+
+       vnode->cb_expires_at = vnode->cb_expires;
+       _debug("PROMISE on %p {%lu}",
+              vnode, (unsigned long) vnode->cb_expires_at);
+
+       /* abuse an RB-tree to hold the expiration order (we may have multiple
+        * items with the same expiration time) */
+       spin_lock(&server->cb_lock);
+
+       parent = NULL;
+       p = &server->cb_promises.rb_node;
+       while (*p) {
+               parent = *p;
+               xvnode = rb_entry(parent, struct afs_vnode, cb_promise);
+               if (vnode->cb_expires_at < xvnode->cb_expires_at)
+                       p = &(*p)->rb_left;
+               else
+                       p = &(*p)->rb_right;
+       }
 
-       afs_put_server(oldserver);
+       rb_link_node(&vnode->cb_promise, parent, p);
+       rb_insert_color(&vnode->cb_promise, &server->cb_promises);
+       vnode->cb_promised = true;
 
+       spin_unlock(&server->cb_lock);
        _leave("");
-} /* end afs_vnode_cb_timed_out() */
+}
 
-/*****************************************************************************/
 /*
- * finish off updating the recorded status of a file
+ * handle remote file deletion by discarding the callback promise
+ */
+static void afs_vnode_deleted_remotely(struct afs_vnode *vnode)
+{
+       struct afs_server *server;
+
+       set_bit(AFS_VNODE_DELETED, &vnode->flags);
+
+       server = vnode->server;
+       if (vnode->cb_promised) {
+               spin_lock(&server->cb_lock);
+               if (vnode->cb_promised) {
+                       rb_erase(&vnode->cb_promise, &server->cb_promises);
+                       vnode->cb_promised = false;
+               }
+               spin_unlock(&server->cb_lock);
+       }
+
+       spin_lock(&vnode->server->fs_lock);
+       rb_erase(&vnode->server_rb, &vnode->server->fs_vnodes);
+       spin_unlock(&vnode->server->fs_lock);
+
+       vnode->server = NULL;
+       afs_put_server(server);
+}
+
+/*
+ * finish off updating the recorded status of a file after a successful
+ * operation completion
  * - starts callback expiry timer
  * - adds to server's callback list
  */
-static void afs_vnode_finalise_status_update(struct afs_vnode *vnode,
-                                            struct afs_server *server,
-                                            int ret)
+void afs_vnode_finalise_status_update(struct afs_vnode *vnode,
+                                     struct afs_server *server)
 {
        struct afs_server *oldserver = NULL;
 
-       _enter("%p,%p,%d", vnode, server, ret);
+       _enter("%p,%p", vnode, server);
 
        spin_lock(&vnode->lock);
+       clear_bit(AFS_VNODE_CB_BROKEN, &vnode->flags);
+       afs_vnode_note_promise(vnode, server);
+       vnode->update_cnt--;
+       ASSERTCMP(vnode->update_cnt, >=, 0);
+       spin_unlock(&vnode->lock);
+
+       wake_up_all(&vnode->update_waitq);
+       afs_put_server(oldserver);
+       _leave("");
+}
 
-       vnode->flags &= ~AFS_VNODE_CHANGED;
+/*
+ * finish off updating the recorded status of a file after an operation failed
+ */
+static void afs_vnode_status_update_failed(struct afs_vnode *vnode, int ret)
+{
+       _enter("%p,%d", vnode, ret);
 
-       if (ret == 0) {
-               /* adjust the callback timeout appropriately */
-               afs_kafstimod_add_timer(&vnode->cb_timeout,
-                                       vnode->cb_expiry * HZ);
-
-               spin_lock(&afs_cb_hash_lock);
-               list_move_tail(&vnode->cb_hash_link,
-                             &afs_cb_hash(server, &vnode->fid));
-               spin_unlock(&afs_cb_hash_lock);
-
-               /* swap ref to old callback server with that for new callback
-                * server */
-               oldserver = xchg(&vnode->cb_server, server);
-               if (oldserver != server) {
-                       if (oldserver) {
-                               spin_lock(&oldserver->cb_lock);
-                               list_del_init(&vnode->cb_link);
-                               spin_unlock(&oldserver->cb_lock);
-                       }
+       spin_lock(&vnode->lock);
 
-                       afs_get_server(server);
-                       spin_lock(&server->cb_lock);
-                       list_add_tail(&vnode->cb_link, &server->cb_promises);
-                       spin_unlock(&server->cb_lock);
-               }
-               else {
-                       /* same server */
-                       oldserver = NULL;
-               }
-       }
-       else if (ret == -ENOENT) {
-               /* the file was deleted - clear the callback timeout */
-               oldserver = xchg(&vnode->cb_server, NULL);
-               afs_kafstimod_del_timer(&vnode->cb_timeout);
+       clear_bit(AFS_VNODE_CB_BROKEN, &vnode->flags);
 
+       if (ret == -ENOENT) {
+               /* the file was deleted on the server */
                _debug("got NOENT from server - marking file deleted");
-               vnode->flags |= AFS_VNODE_DELETED;
+               afs_vnode_deleted_remotely(vnode);
        }
 
        vnode->update_cnt--;
-
+       ASSERTCMP(vnode->update_cnt, >=, 0);
        spin_unlock(&vnode->lock);
 
        wake_up_all(&vnode->update_waitq);
-
-       afs_put_server(oldserver);
-
        _leave("");
+}
 
-} /* end afs_vnode_finalise_status_update() */
-
-/*****************************************************************************/
 /*
  * fetch file status from the volume
  * - don't issue a fetch if:
@@ -157,9 +252,11 @@ static void afs_vnode_finalise_status_update(struct afs_vnode *vnode,
  *   - there are any outstanding ops that will fetch the status
  * - TODO implement local caching
  */
-int afs_vnode_fetch_status(struct afs_vnode *vnode)
+int afs_vnode_fetch_status(struct afs_vnode *vnode,
+                          struct afs_vnode *auth_vnode, struct key *key)
 {
        struct afs_server *server;
+       unsigned long acl_order;
        int ret;
 
        DECLARE_WAITQUEUE(myself, current);
@@ -168,38 +265,49 @@ int afs_vnode_fetch_status(struct afs_vnode *vnode)
               vnode->volume->vlocation->vldb.name,
               vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique);
 
-       if (!(vnode->flags & AFS_VNODE_CHANGED) && vnode->cb_server) {
+       if (!test_bit(AFS_VNODE_CB_BROKEN, &vnode->flags) &&
+           vnode->cb_promised) {
                _leave(" [unchanged]");
                return 0;
        }
 
-       if (vnode->flags & AFS_VNODE_DELETED) {
+       if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) {
                _leave(" [deleted]");
                return -ENOENT;
        }
 
+       acl_order = 0;
+       if (auth_vnode)
+               acl_order = auth_vnode->acl_order;
+
        spin_lock(&vnode->lock);
 
-       if (!(vnode->flags & AFS_VNODE_CHANGED)) {
+       if (!test_bit(AFS_VNODE_CB_BROKEN, &vnode->flags) &&
+           vnode->cb_promised) {
                spin_unlock(&vnode->lock);
                _leave(" [unchanged]");
                return 0;
        }
 
+       ASSERTCMP(vnode->update_cnt, >=, 0);
+
        if (vnode->update_cnt > 0) {
                /* someone else started a fetch */
+               _debug("wait on fetch %d", vnode->update_cnt);
+
                set_current_state(TASK_UNINTERRUPTIBLE);
+               ASSERT(myself.func != NULL);
                add_wait_queue(&vnode->update_waitq, &myself);
 
                /* wait for the status to be updated */
                for (;;) {
-                       if (!(vnode->flags & AFS_VNODE_CHANGED))
+                       if (!test_bit(AFS_VNODE_CB_BROKEN, &vnode->flags))
                                break;
-                       if (vnode->flags & AFS_VNODE_DELETED)
+                       if (test_bit(AFS_VNODE_DELETED, &vnode->flags))
                                break;
 
-                       /* it got updated and invalidated all before we saw
-                        * it */
+                       /* check to see if it got updated and invalidated all
+                        * before we saw it */
                        if (vnode->update_cnt == 0) {
                                remove_wait_queue(&vnode->update_waitq,
                                                  &myself);
@@ -219,10 +327,11 @@ int afs_vnode_fetch_status(struct afs_vnode *vnode)
                spin_unlock(&vnode->lock);
                set_current_state(TASK_RUNNING);
 
-               return vnode->flags & AFS_VNODE_DELETED ? -ENOENT : 0;
+               return test_bit(AFS_VNODE_DELETED, &vnode->flags) ?
+                       -ENOENT : 0;
        }
 
- get_anyway:
+get_anyway:
        /* okay... we're going to have to initiate the op */
        vnode->update_cnt++;
 
@@ -232,39 +341,60 @@ int afs_vnode_fetch_status(struct afs_vnode *vnode)
         * vnode */
        do {
                /* pick a server to query */
-               ret = afs_volume_pick_fileserver(vnode->volume, &server);
-               if (ret<0)
-                       return ret;
+               server = afs_volume_pick_fileserver(vnode);
+               if (IS_ERR(server))
+                       goto no_server;
 
-               _debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr));
+               _debug("USING SERVER: %p{%08x}",
+                      server, ntohl(server->addr.s_addr));
 
-               ret = afs_rxfs_fetch_file_status(server, vnode, NULL);
+               ret = afs_fs_fetch_file_status(server, key, vnode, NULL,
+                                              &afs_sync_call);
 
-       } while (!afs_volume_release_fileserver(vnode->volume, server, ret));
+       } while (!afs_volume_release_fileserver(vnode, server, ret));
 
        /* adjust the flags */
-       afs_vnode_finalise_status_update(vnode, server, ret);
+       if (ret == 0) {
+               _debug("adjust");
+               if (auth_vnode)
+                       afs_cache_permit(vnode, key, acl_order);
+               afs_vnode_finalise_status_update(vnode, server);
+               afs_put_server(server);
+       } else {
+               _debug("failed [%d]", ret);
+               afs_vnode_status_update_failed(vnode, ret);
+       }
 
-       _leave(" = %d", ret);
+       ASSERTCMP(vnode->update_cnt, >=, 0);
+
+       _leave(" = %d [cnt %d]", ret, vnode->update_cnt);
        return ret;
-} /* end afs_vnode_fetch_status() */
 
-/*****************************************************************************/
+no_server:
+       spin_lock(&vnode->lock);
+       vnode->update_cnt--;
+       ASSERTCMP(vnode->update_cnt, >=, 0);
+       spin_unlock(&vnode->lock);
+       _leave(" = %ld [cnt %d]", PTR_ERR(server), vnode->update_cnt);
+       return PTR_ERR(server);
+}
+
 /*
  * fetch file data from the volume
- * - TODO implement caching and server failover
+ * - TODO implement caching
  */
-int afs_vnode_fetch_data(struct afs_vnode *vnode,
-                        struct afs_rxfs_fetch_descriptor *desc)
+int afs_vnode_fetch_data(struct afs_vnode *vnode, struct key *key,
+                        off_t offset, size_t length, struct page *page)
 {
        struct afs_server *server;
        int ret;
 
-       _enter("%s,{%u,%u,%u}",
+       _enter("%s{%u,%u,%u},%x,,,",
               vnode->volume->vlocation->vldb.name,
               vnode->fid.vid,
               vnode->fid.vnode,
-              vnode->fid.unique);
+              vnode->fid.unique,
+              key_serial(key));
 
        /* this op will fetch the status */
        spin_lock(&vnode->lock);
@@ -275,120 +405,351 @@ int afs_vnode_fetch_data(struct afs_vnode *vnode,
         * vnode */
        do {
                /* pick a server to query */
-               ret = afs_volume_pick_fileserver(vnode->volume, &server);
-               if (ret < 0)
-                       return ret;
+               server = afs_volume_pick_fileserver(vnode);
+               if (IS_ERR(server))
+                       goto no_server;
 
                _debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr));
 
-               ret = afs_rxfs_fetch_file_data(server, vnode, desc, NULL);
+               ret = afs_fs_fetch_data(server, key, vnode, offset, length,
+                                       page, &afs_sync_call);
 
-       } while (!afs_volume_release_fileserver(vnode->volume, server, ret));
+       } while (!afs_volume_release_fileserver(vnode, server, ret));
 
        /* adjust the flags */
-       afs_vnode_finalise_status_update(vnode, server, ret);
+       if (ret == 0) {
+               afs_vnode_finalise_status_update(vnode, server);
+               afs_put_server(server);
+       } else {
+               afs_vnode_status_update_failed(vnode, ret);
+       }
 
        _leave(" = %d", ret);
        return ret;
 
-} /* end afs_vnode_fetch_data() */
+no_server:
+       spin_lock(&vnode->lock);
+       vnode->update_cnt--;
+       ASSERTCMP(vnode->update_cnt, >=, 0);
+       spin_unlock(&vnode->lock);
+       return PTR_ERR(server);
+}
 
-/*****************************************************************************/
 /*
- * break any outstanding callback on a vnode
- * - only relevent to server that issued it
+ * make a file or a directory
  */
-int afs_vnode_give_up_callback(struct afs_vnode *vnode)
+int afs_vnode_create(struct afs_vnode *vnode, struct key *key,
+                    const char *name, umode_t mode, struct afs_fid *newfid,
+                    struct afs_file_status *newstatus,
+                    struct afs_callback *newcb, struct afs_server **_server)
 {
        struct afs_server *server;
        int ret;
 
-       _enter("%s,{%u,%u,%u}",
+       _enter("%s{%u,%u,%u},%x,%s,,",
               vnode->volume->vlocation->vldb.name,
               vnode->fid.vid,
               vnode->fid.vnode,
-              vnode->fid.unique);
-
-       spin_lock(&afs_cb_hash_lock);
-       list_del_init(&vnode->cb_hash_link);
-       spin_unlock(&afs_cb_hash_lock);
+              vnode->fid.unique,
+              key_serial(key),
+              name);
 
-       /* set the changed flag in the vnode and release the server */
+       /* this op will fetch the status on the directory we're creating in */
        spin_lock(&vnode->lock);
+       vnode->update_cnt++;
+       spin_unlock(&vnode->lock);
 
-       afs_kafstimod_del_timer(&vnode->cb_timeout);
+       do {
+               /* pick a server to query */
+               server = afs_volume_pick_fileserver(vnode);
+               if (IS_ERR(server))
+                       goto no_server;
+
+               _debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr));
 
-       server = xchg(&vnode->cb_server, NULL);
-       if (server) {
-               vnode->flags |= AFS_VNODE_CHANGED;
+               ret = afs_fs_create(server, key, vnode, name, mode, newfid,
+                                   newstatus, newcb, &afs_sync_call);
 
-               spin_lock(&server->cb_lock);
-               list_del_init(&vnode->cb_link);
-               spin_unlock(&server->cb_lock);
+       } while (!afs_volume_release_fileserver(vnode, server, ret));
+
+       /* adjust the flags */
+       if (ret == 0) {
+               afs_vnode_finalise_status_update(vnode, server);
+               *_server = server;
+       } else {
+               afs_vnode_status_update_failed(vnode, ret);
+               *_server = NULL;
        }
 
+       _leave(" = %d [cnt %d]", ret, vnode->update_cnt);
+       return ret;
+
+no_server:
+       spin_lock(&vnode->lock);
+       vnode->update_cnt--;
+       ASSERTCMP(vnode->update_cnt, >=, 0);
        spin_unlock(&vnode->lock);
+       _leave(" = %ld [cnt %d]", PTR_ERR(server), vnode->update_cnt);
+       return PTR_ERR(server);
+}
 
-       ret = 0;
-       if (server) {
-               ret = afs_rxfs_give_up_callback(server, vnode);
+/*
+ * remove a file or directory
+ */
+int afs_vnode_remove(struct afs_vnode *vnode, struct key *key, const char *name,
+                    bool isdir)
+{
+       struct afs_server *server;
+       int ret;
+
+       _enter("%s{%u,%u,%u},%x,%s",
+              vnode->volume->vlocation->vldb.name,
+              vnode->fid.vid,
+              vnode->fid.vnode,
+              vnode->fid.unique,
+              key_serial(key),
+              name);
+
+       /* this op will fetch the status on the directory we're removing from */
+       spin_lock(&vnode->lock);
+       vnode->update_cnt++;
+       spin_unlock(&vnode->lock);
+
+       do {
+               /* pick a server to query */
+               server = afs_volume_pick_fileserver(vnode);
+               if (IS_ERR(server))
+                       goto no_server;
+
+               _debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr));
+
+               ret = afs_fs_remove(server, key, vnode, name, isdir,
+                                   &afs_sync_call);
+
+       } while (!afs_volume_release_fileserver(vnode, server, ret));
+
+       /* adjust the flags */
+       if (ret == 0) {
+               afs_vnode_finalise_status_update(vnode, server);
                afs_put_server(server);
+       } else {
+               afs_vnode_status_update_failed(vnode, ret);
        }
 
-       _leave(" = %d", ret);
+       _leave(" = %d [cnt %d]", ret, vnode->update_cnt);
        return ret;
-} /* end afs_vnode_give_up_callback() */
 
-/*****************************************************************************/
+no_server:
+       spin_lock(&vnode->lock);
+       vnode->update_cnt--;
+       ASSERTCMP(vnode->update_cnt, >=, 0);
+       spin_unlock(&vnode->lock);
+       _leave(" = %ld [cnt %d]", PTR_ERR(server), vnode->update_cnt);
+       return PTR_ERR(server);
+}
+
 /*
- * match a vnode record stored in the cache
+ * create a hard link
  */
-#ifdef AFS_CACHING_SUPPORT
-static cachefs_match_val_t afs_vnode_cache_match(void *target,
-                                                const void *entry)
+extern int afs_vnode_link(struct afs_vnode *dvnode, struct afs_vnode *vnode,
+                         struct key *key, const char *name)
 {
-       const struct afs_cache_vnode *cvnode = entry;
-       struct afs_vnode *vnode = target;
+       struct afs_server *server;
+       int ret;
 
-       _enter("{%x,%x,%Lx},{%x,%x,%Lx}",
+       _enter("%s{%u,%u,%u},%s{%u,%u,%u},%x,%s",
+              dvnode->volume->vlocation->vldb.name,
+              dvnode->fid.vid,
+              dvnode->fid.vnode,
+              dvnode->fid.unique,
+              vnode->volume->vlocation->vldb.name,
+              vnode->fid.vid,
               vnode->fid.vnode,
               vnode->fid.unique,
-              vnode->status.version,
-              cvnode->vnode_id,
-              cvnode->vnode_unique,
-              cvnode->data_version);
-
-       if (vnode->fid.vnode != cvnode->vnode_id) {
-               _leave(" = FAILED");
-               return CACHEFS_MATCH_FAILED;
+              key_serial(key),
+              name);
+
+       /* this op will fetch the status on the directory we're removing from */
+       spin_lock(&vnode->lock);
+       vnode->update_cnt++;
+       spin_unlock(&vnode->lock);
+       spin_lock(&dvnode->lock);
+       dvnode->update_cnt++;
+       spin_unlock(&dvnode->lock);
+
+       do {
+               /* pick a server to query */
+               server = afs_volume_pick_fileserver(dvnode);
+               if (IS_ERR(server))
+                       goto no_server;
+
+               _debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr));
+
+               ret = afs_fs_link(server, key, dvnode, vnode, name,
+                                 &afs_sync_call);
+
+       } while (!afs_volume_release_fileserver(dvnode, server, ret));
+
+       /* adjust the flags */
+       if (ret == 0) {
+               afs_vnode_finalise_status_update(vnode, server);
+               afs_vnode_finalise_status_update(dvnode, server);
+               afs_put_server(server);
+       } else {
+               afs_vnode_status_update_failed(vnode, ret);
+               afs_vnode_status_update_failed(dvnode, ret);
        }
 
-       if (vnode->fid.unique != cvnode->vnode_unique ||
-           vnode->status.version != cvnode->data_version) {
-               _leave(" = DELETE");
-               return CACHEFS_MATCH_SUCCESS_DELETE;
+       _leave(" = %d [cnt %d]", ret, vnode->update_cnt);
+       return ret;
+
+no_server:
+       spin_lock(&vnode->lock);
+       vnode->update_cnt--;
+       ASSERTCMP(vnode->update_cnt, >=, 0);
+       spin_unlock(&vnode->lock);
+       spin_lock(&dvnode->lock);
+       dvnode->update_cnt--;
+       ASSERTCMP(dvnode->update_cnt, >=, 0);
+       spin_unlock(&dvnode->lock);
+       _leave(" = %ld [cnt %d]", PTR_ERR(server), vnode->update_cnt);
+       return PTR_ERR(server);
+}
+
+/*
+ * create a symbolic link
+ */
+int afs_vnode_symlink(struct afs_vnode *vnode, struct key *key,
+                     const char *name, const char *content,
+                     struct afs_fid *newfid,
+                     struct afs_file_status *newstatus,
+                     struct afs_server **_server)
+{
+       struct afs_server *server;
+       int ret;
+
+       _enter("%s{%u,%u,%u},%x,%s,%s,,,",
+              vnode->volume->vlocation->vldb.name,
+              vnode->fid.vid,
+              vnode->fid.vnode,
+              vnode->fid.unique,
+              key_serial(key),
+              name, content);
+
+       /* this op will fetch the status on the directory we're creating in */
+       spin_lock(&vnode->lock);
+       vnode->update_cnt++;
+       spin_unlock(&vnode->lock);
+
+       do {
+               /* pick a server to query */
+               server = afs_volume_pick_fileserver(vnode);
+               if (IS_ERR(server))
+                       goto no_server;
+
+               _debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr));
+
+               ret = afs_fs_symlink(server, key, vnode, name, content,
+                                    newfid, newstatus, &afs_sync_call);
+
+       } while (!afs_volume_release_fileserver(vnode, server, ret));
+
+       /* adjust the flags */
+       if (ret == 0) {
+               afs_vnode_finalise_status_update(vnode, server);
+               *_server = server;
+       } else {
+               afs_vnode_status_update_failed(vnode, ret);
+               *_server = NULL;
        }
 
-       _leave(" = SUCCESS");
-       return CACHEFS_MATCH_SUCCESS;
-} /* end afs_vnode_cache_match() */
-#endif
+       _leave(" = %d [cnt %d]", ret, vnode->update_cnt);
+       return ret;
+
+no_server:
+       spin_lock(&vnode->lock);
+       vnode->update_cnt--;
+       ASSERTCMP(vnode->update_cnt, >=, 0);
+       spin_unlock(&vnode->lock);
+       _leave(" = %ld [cnt %d]", PTR_ERR(server), vnode->update_cnt);
+       return PTR_ERR(server);
+}
 
-/*****************************************************************************/
 /*
- * update a vnode record stored in the cache
+ * rename a file
  */
-#ifdef AFS_CACHING_SUPPORT
-static void afs_vnode_cache_update(void *source, void *entry)
+int afs_vnode_rename(struct afs_vnode *orig_dvnode,
+                    struct afs_vnode *new_dvnode,
+                    struct key *key,
+                    const char *orig_name,
+                    const char *new_name)
 {
-       struct afs_cache_vnode *cvnode = entry;
-       struct afs_vnode *vnode = source;
+       struct afs_server *server;
+       int ret;
 
-       _enter("");
+       _enter("%s{%u,%u,%u},%s{%u,%u,%u},%x,%s,%s",
+              orig_dvnode->volume->vlocation->vldb.name,
+              orig_dvnode->fid.vid,
+              orig_dvnode->fid.vnode,
+              orig_dvnode->fid.unique,
+              new_dvnode->volume->vlocation->vldb.name,
+              new_dvnode->fid.vid,
+              new_dvnode->fid.vnode,
+              new_dvnode->fid.unique,
+              key_serial(key),
+              orig_name,
+              new_name);
+
+       /* this op will fetch the status on both the directories we're dealing
+        * with */
+       spin_lock(&orig_dvnode->lock);
+       orig_dvnode->update_cnt++;
+       spin_unlock(&orig_dvnode->lock);
+       if (new_dvnode != orig_dvnode) {
+               spin_lock(&new_dvnode->lock);
+               new_dvnode->update_cnt++;
+               spin_unlock(&new_dvnode->lock);
+       }
 
-       cvnode->vnode_id        = vnode->fid.vnode;
-       cvnode->vnode_unique    = vnode->fid.unique;
-       cvnode->data_version    = vnode->status.version;
+       do {
+               /* pick a server to query */
+               server = afs_volume_pick_fileserver(orig_dvnode);
+               if (IS_ERR(server))
+                       goto no_server;
 
-} /* end afs_vnode_cache_update() */
-#endif
+               _debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr));
+
+               ret = afs_fs_rename(server, key, orig_dvnode, orig_name,
+                                   new_dvnode, new_name, &afs_sync_call);
+
+       } while (!afs_volume_release_fileserver(orig_dvnode, server, ret));
+
+       /* adjust the flags */
+       if (ret == 0) {
+               afs_vnode_finalise_status_update(orig_dvnode, server);
+               if (new_dvnode != orig_dvnode)
+                       afs_vnode_finalise_status_update(new_dvnode, server);
+               afs_put_server(server);
+       } else {
+               afs_vnode_status_update_failed(orig_dvnode, ret);
+               if (new_dvnode != orig_dvnode)
+                       afs_vnode_status_update_failed(new_dvnode, ret);
+       }
+
+       _leave(" = %d [cnt %d]", ret, orig_dvnode->update_cnt);
+       return ret;
+
+no_server:
+       spin_lock(&orig_dvnode->lock);
+       orig_dvnode->update_cnt--;
+       ASSERTCMP(orig_dvnode->update_cnt, >=, 0);
+       spin_unlock(&orig_dvnode->lock);
+       if (new_dvnode != orig_dvnode) {
+               spin_lock(&new_dvnode->lock);
+               new_dvnode->update_cnt--;
+               ASSERTCMP(new_dvnode->update_cnt, >=, 0);
+               spin_unlock(&new_dvnode->lock);
+       }
+       _leave(" = %ld [cnt %d]", PTR_ERR(server), orig_dvnode->update_cnt);
+       return PTR_ERR(server);
+}
diff --git a/fs/afs/vnode.h b/fs/afs/vnode.h
deleted file mode 100644 (file)
index b86a971..0000000
+++ /dev/null
@@ -1,94 +0,0 @@
-/* vnode.h: AFS vnode record
- *
- * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#ifndef _LINUX_AFS_VNODE_H
-#define _LINUX_AFS_VNODE_H
-
-#include <linux/fs.h>
-#include "server.h"
-#include "kafstimod.h"
-#include "cache.h"
-
-#ifdef __KERNEL__
-
-struct afs_rxfs_fetch_descriptor;
-
-/*****************************************************************************/
-/*
- * vnode catalogue entry
- */
-struct afs_cache_vnode
-{
-       afs_vnodeid_t           vnode_id;       /* vnode ID */
-       unsigned                vnode_unique;   /* vnode ID uniquifier */
-       afs_dataversion_t       data_version;   /* data version */
-};
-
-#ifdef AFS_CACHING_SUPPORT
-extern struct cachefs_index_def afs_vnode_cache_index_def;
-#endif
-
-/*****************************************************************************/
-/*
- * AFS inode private data
- */
-struct afs_vnode
-{
-       struct inode            vfs_inode;      /* the VFS's inode record */
-
-       struct afs_volume       *volume;        /* volume on which vnode resides */
-       struct afs_fid          fid;            /* the file identifier for this inode */
-       struct afs_file_status  status;         /* AFS status info for this file */
-#ifdef AFS_CACHING_SUPPORT
-       struct cachefs_cookie   *cache;         /* caching cookie */
-#endif
-
-       wait_queue_head_t       update_waitq;   /* status fetch waitqueue */
-       unsigned                update_cnt;     /* number of outstanding ops that will update the
-                                                * status */
-       spinlock_t              lock;           /* waitqueue/flags lock */
-       unsigned                flags;
-#define AFS_VNODE_CHANGED      0x00000001      /* set if vnode reported changed by callback */
-#define AFS_VNODE_DELETED      0x00000002      /* set if vnode deleted on server */
-#define AFS_VNODE_MOUNTPOINT   0x00000004      /* set if vnode is a mountpoint symlink */
-
-       /* outstanding callback notification on this file */
-       struct afs_server       *cb_server;     /* server that made the current promise */
-       struct list_head        cb_link;        /* link in server's promises list */
-       struct list_head        cb_hash_link;   /* link in master callback hash */
-       struct afs_timer        cb_timeout;     /* timeout on promise */
-       unsigned                cb_version;     /* callback version */
-       unsigned                cb_expiry;      /* callback expiry time */
-       afs_callback_type_t     cb_type;        /* type of callback */
-};
-
-static inline struct afs_vnode *AFS_FS_I(struct inode *inode)
-{
-       return container_of(inode,struct afs_vnode,vfs_inode);
-}
-
-static inline struct inode *AFS_VNODE_TO_I(struct afs_vnode *vnode)
-{
-       return &vnode->vfs_inode;
-}
-
-extern int afs_vnode_fetch_status(struct afs_vnode *vnode);
-
-extern int afs_vnode_fetch_data(struct afs_vnode *vnode,
-                               struct afs_rxfs_fetch_descriptor *desc);
-
-extern int afs_vnode_give_up_callback(struct afs_vnode *vnode);
-
-extern struct afs_timer_ops afs_vnode_cb_timed_out_ops;
-
-#endif /* __KERNEL__ */
-
-#endif /* _LINUX_AFS_VNODE_H */
index 768c6db..dd160ca 100644 (file)
@@ -1,6 +1,6 @@
-/* volume.c: AFS volume management
+/* AFS volume management
  *
- * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2002, 2007 Red Hat, Inc. All Rights Reserved.
  * Written by David Howells (dhowells@redhat.com)
  *
  * This program is free software; you can redistribute it and/or
 #include <linux/slab.h>
 #include <linux/fs.h>
 #include <linux/pagemap.h>
-#include "volume.h"
-#include "vnode.h"
-#include "cell.h"
-#include "cache.h"
-#include "cmservice.h"
-#include "fsclient.h"
-#include "vlclient.h"
 #include "internal.h"
 
-#ifdef __KDEBUG
 static const char *afs_voltypes[] = { "R/W", "R/O", "BAK" };
-#endif
-
-#ifdef AFS_CACHING_SUPPORT
-static cachefs_match_val_t afs_volume_cache_match(void *target,
-                                                 const void *entry);
-static void afs_volume_cache_update(void *source, void *entry);
-
-struct cachefs_index_def afs_volume_cache_index_def = {
-       .name           = "volume",
-       .data_size      = sizeof(struct afs_cache_vhash),
-       .keys[0]        = { CACHEFS_INDEX_KEYS_BIN, 1 },
-       .keys[1]        = { CACHEFS_INDEX_KEYS_BIN, 1 },
-       .match          = afs_volume_cache_match,
-       .update         = afs_volume_cache_update,
-};
-#endif
 
-/*****************************************************************************/
 /*
  * lookup a volume by name
  * - this can be one of the following:
@@ -66,118 +41,52 @@ struct cachefs_index_def afs_volume_cache_index_def = {
  * - Rule 3: If parent volume is R/W, then only mount R/W volume unless
  *           explicitly told otherwise
  */
-int afs_volume_lookup(const char *name, struct afs_cell *cell, int rwpath,
-                     struct afs_volume **_volume)
+struct afs_volume *afs_volume_lookup(struct afs_mount_params *params)
 {
        struct afs_vlocation *vlocation = NULL;
        struct afs_volume *volume = NULL;
-       afs_voltype_t type;
-       const char *cellname, *volname, *suffix;
+       struct afs_server *server = NULL;
        char srvtmask;
-       int force, ret, loop, cellnamesz, volnamesz;
-
-       _enter("%s,,%d,", name, rwpath);
-
-       if (!name || (name[0] != '%' && name[0] != '#') || !name[1]) {
-               printk("kAFS: unparsable volume name\n");
-               return -EINVAL;
-       }
-
-       /* determine the type of volume we're looking for */
-       force = 0;
-       type = AFSVL_ROVOL;
-
-       if (rwpath || name[0] == '%') {
-               type = AFSVL_RWVOL;
-               force = 1;
-       }
-
-       suffix = strrchr(name, '.');
-       if (suffix) {
-               if (strcmp(suffix, ".readonly") == 0) {
-                       type = AFSVL_ROVOL;
-                       force = 1;
-               }
-               else if (strcmp(suffix, ".backup") == 0) {
-                       type = AFSVL_BACKVOL;
-                       force = 1;
-               }
-               else if (suffix[1] == 0) {
-               }
-               else {
-                       suffix = NULL;
-               }
-       }
+       int ret, loop;
 
-       /* split the cell and volume names */
-       name++;
-       volname = strchr(name, ':');
-       if (volname) {
-               cellname = name;
-               cellnamesz = volname - name;
-               volname++;
-       }
-       else {
-               volname = name;
-               cellname = NULL;
-               cellnamesz = 0;
-       }
-
-       volnamesz = suffix ? suffix - volname : strlen(volname);
-
-       _debug("CELL:%*.*s [%p] VOLUME:%*.*s SUFFIX:%s TYPE:%d%s",
-              cellnamesz, cellnamesz, cellname ?: "", cell,
-              volnamesz, volnamesz, volname, suffix ?: "-",
-              type,
-              force ? " FORCE" : "");
-
-       /* lookup the cell record */
-       if (cellname || !cell) {
-               ret = afs_cell_lookup(cellname, cellnamesz, &cell);
-               if (ret<0) {
-                       printk("kAFS: unable to lookup cell '%s'\n",
-                              cellname ?: "");
-                       goto error;
-               }
-       }
-       else {
-               afs_get_cell(cell);
-       }
+       _enter("{%*.*s,%d}",
+              params->volnamesz, params->volnamesz, params->volname, params->rwpath);
 
        /* lookup the volume location record */
-       ret = afs_vlocation_lookup(cell, volname, volnamesz, &vlocation);
-       if (ret < 0)
+       vlocation = afs_vlocation_lookup(params->cell, params->key,
+                                        params->volname, params->volnamesz);
+       if (IS_ERR(vlocation)) {
+               ret = PTR_ERR(vlocation);
+               vlocation = NULL;
                goto error;
+       }
 
        /* make the final decision on the type we want */
        ret = -ENOMEDIUM;
-       if (force && !(vlocation->vldb.vidmask & (1 << type)))
+       if (params->force && !(vlocation->vldb.vidmask & (1 << params->type)))
                goto error;
 
        srvtmask = 0;
        for (loop = 0; loop < vlocation->vldb.nservers; loop++)
                srvtmask |= vlocation->vldb.srvtmask[loop];
 
-       if (force) {
-               if (!(srvtmask & (1 << type)))
+       if (params->force) {
+               if (!(srvtmask & (1 << params->type)))
                        goto error;
-       }
-       else if (srvtmask & AFS_VOL_VTM_RO) {
-               type = AFSVL_ROVOL;
-       }
-       else if (srvtmask & AFS_VOL_VTM_RW) {
-               type = AFSVL_RWVOL;
-       }
-       else {
+       } else if (srvtmask & AFS_VOL_VTM_RO) {
+               params->type = AFSVL_ROVOL;
+       } else if (srvtmask & AFS_VOL_VTM_RW) {
+               params->type = AFSVL_RWVOL;
+       } else {
                goto error;
        }
 
-       down_write(&cell->vl_sem);
+       down_write(&params->cell->vl_sem);
 
        /* is the volume already active? */
-       if (vlocation->vols[type]) {
+       if (vlocation->vols[params->type]) {
                /* yes - re-use it */
-               volume = vlocation->vols[type];
+               volume = vlocation->vols[params->type];
                afs_get_volume(volume);
                goto success;
        }
@@ -191,23 +100,24 @@ int afs_volume_lookup(const char *name, struct afs_cell *cell, int rwpath,
                goto error_up;
 
        atomic_set(&volume->usage, 1);
-       volume->type            = type;
-       volume->type_force      = force;
-       volume->cell            = cell;
-       volume->vid             = vlocation->vldb.vid[type];
+       volume->type            = params->type;
+       volume->type_force      = params->force;
+       volume->cell            = params->cell;
+       volume->vid             = vlocation->vldb.vid[params->type];
 
        init_rwsem(&volume->server_sem);
 
        /* look up all the applicable server records */
        for (loop = 0; loop < 8; loop++) {
                if (vlocation->vldb.srvtmask[loop] & (1 << volume->type)) {
-                       ret = afs_server_lookup(
-                               volume->cell,
-                               &vlocation->vldb.servers[loop],
-                               &volume->servers[volume->nservers]);
-                       if (ret < 0)
+                       server = afs_lookup_server(
+                              volume->cell, &vlocation->vldb.servers[loop]);
+                       if (IS_ERR(server)) {
+                               ret = PTR_ERR(server);
                                goto error_discard;
+                       }
 
+                       volume->servers[volume->nservers] = server;
                        volume->nservers++;
                }
        }
@@ -223,35 +133,34 @@ int afs_volume_lookup(const char *name, struct afs_cell *cell, int rwpath,
        afs_get_vlocation(vlocation);
        volume->vlocation = vlocation;
 
-       vlocation->vols[type] = volume;
+       vlocation->vols[volume->type] = volume;
 
- success:
+success:
        _debug("kAFS selected %s volume %08x",
               afs_voltypes[volume->type], volume->vid);
-       *_volume = volume;
-       ret = 0;
+       up_write(&params->cell->vl_sem);
+       afs_put_vlocation(vlocation);
+       _leave(" = %p", volume);
+       return volume;
 
        /* clean up */
- error_up:
-       up_write(&cell->vl_sem);
- error:
+error_up:
+       up_write(&params->cell->vl_sem);
+error:
        afs_put_vlocation(vlocation);
-       afs_put_cell(cell);
-
-       _leave(" = %d (%p)", ret, volume);
-       return ret;
+       _leave(" = %d", ret);
+       return ERR_PTR(ret);
 
- error_discard:
-       up_write(&cell->vl_sem);
+error_discard:
+       up_write(&params->cell->vl_sem);
 
        for (loop = volume->nservers - 1; loop >= 0; loop--)
                afs_put_server(volume->servers[loop]);
 
        kfree(volume);
        goto error;
-} /* end afs_volume_lookup() */
+}
 
-/*****************************************************************************/
 /*
  * destroy a volume record
  */
@@ -265,10 +174,9 @@ void afs_put_volume(struct afs_volume *volume)
 
        _enter("%p", volume);
 
-       vlocation = volume->vlocation;
+       ASSERTCMP(atomic_read(&volume->usage), >, 0);
 
-       /* sanity check */
-       BUG_ON(atomic_read(&volume->usage) <= 0);
+       vlocation = volume->vlocation;
 
        /* to prevent a race, the decrement and the dequeue must be effectively
         * atomic */
@@ -296,21 +204,27 @@ void afs_put_volume(struct afs_volume *volume)
        kfree(volume);
 
        _leave(" [destroyed]");
-} /* end afs_put_volume() */
+}
 
-/*****************************************************************************/
 /*
  * pick a server to use to try accessing this volume
  * - returns with an elevated usage count on the server chosen
  */
-int afs_volume_pick_fileserver(struct afs_volume *volume,
-                              struct afs_server **_server)
+struct afs_server *afs_volume_pick_fileserver(struct afs_vnode *vnode)
 {
+       struct afs_volume *volume = vnode->volume;
        struct afs_server *server;
        int ret, state, loop;
 
        _enter("%s", volume->vlocation->vldb.name);
 
+       /* stick with the server we're already using if we can */
+       if (vnode->server && vnode->server->fs_state == 0) {
+               afs_get_server(vnode->server);
+               _leave(" = %p [current]", vnode->server);
+               return vnode->server;
+       }
+
        down_read(&volume->server_sem);
 
        /* handle the no-server case */
@@ -318,7 +232,7 @@ int afs_volume_pick_fileserver(struct afs_volume *volume,
                ret = volume->rjservers ? -ENOMEDIUM : -ESTALE;
                up_read(&volume->server_sem);
                _leave(" = %d [no servers]", ret);
-               return ret;
+               return ERR_PTR(ret);
        }
 
        /* basically, just search the list for the first live server and use
@@ -328,15 +242,16 @@ int afs_volume_pick_fileserver(struct afs_volume *volume,
                server = volume->servers[loop];
                state = server->fs_state;
 
+               _debug("consider %d [%d]", loop, state);
+
                switch (state) {
                        /* found an apparently healthy server */
                case 0:
                        afs_get_server(server);
                        up_read(&volume->server_sem);
-                       *_server = server;
-                       _leave(" = 0 (picked %08x)",
-                              ntohl(server->addr.s_addr));
-                       return 0;
+                       _leave(" = %p (picked %08x)",
+                              server, ntohl(server->addr.s_addr));
+                       return server;
 
                case -ENETUNREACH:
                        if (ret == 0)
@@ -372,20 +287,21 @@ int afs_volume_pick_fileserver(struct afs_volume *volume,
         */
        up_read(&volume->server_sem);
        _leave(" = %d", ret);
-       return ret;
-} /* end afs_volume_pick_fileserver() */
+       return ERR_PTR(ret);
+}
 
-/*****************************************************************************/
 /*
  * release a server after use
  * - releases the ref on the server struct that was acquired by picking
  * - records result of using a particular server to access a volume
  * - return 0 to try again, 1 if okay or to issue error
+ * - the caller must release the server struct if result was 0
  */
-int afs_volume_release_fileserver(struct afs_volume *volume,
+int afs_volume_release_fileserver(struct afs_vnode *vnode,
                                  struct afs_server *server,
                                  int result)
 {
+       struct afs_volume *volume = vnode->volume;
        unsigned loop;
 
        _enter("%s,%08x,%d",
@@ -396,14 +312,16 @@ int afs_volume_release_fileserver(struct afs_volume *volume,
                /* success */
        case 0:
                server->fs_act_jif = jiffies;
-               break;
+               server->fs_state = 0;
+               _leave("");
+               return 1;
 
                /* the fileserver denied all knowledge of the volume */
        case -ENOMEDIUM:
                server->fs_act_jif = jiffies;
                down_write(&volume->server_sem);
 
-               /* first, find where the server is in the active list (if it
+               /* firstly, find where the server is in the active list (if it
                 * is) */
                for (loop = 0; loop < volume->nservers; loop++)
                        if (volume->servers[loop] == server)
@@ -441,6 +359,7 @@ int afs_volume_release_fileserver(struct afs_volume *volume,
        case -ENETUNREACH:
        case -EHOSTUNREACH:
        case -ECONNREFUSED:
+       case -ETIME:
        case -ETIMEDOUT:
        case -EREMOTEIO:
                /* mark the server as dead
@@ -460,60 +379,17 @@ int afs_volume_release_fileserver(struct afs_volume *volume,
                server->fs_act_jif = jiffies;
        case -ENOMEM:
        case -ENONET:
-               break;
+               /* tell the caller to accept the result */
+               afs_put_server(server);
+               _leave(" [local failure]");
+               return 1;
        }
 
-       /* tell the caller to accept the result */
-       afs_put_server(server);
-       _leave("");
-       return 1;
-
        /* tell the caller to loop around and try the next server */
- try_next_server_upw:
+try_next_server_upw:
        up_write(&volume->server_sem);
- try_next_server:
+try_next_server:
        afs_put_server(server);
        _leave(" [try next server]");
        return 0;
-
-} /* end afs_volume_release_fileserver() */
-
-/*****************************************************************************/
-/*
- * match a volume hash record stored in the cache
- */
-#ifdef AFS_CACHING_SUPPORT
-static cachefs_match_val_t afs_volume_cache_match(void *target,
-                                                 const void *entry)
-{
-       const struct afs_cache_vhash *vhash = entry;
-       struct afs_volume *volume = target;
-
-       _enter("{%u},{%u}", volume->type, vhash->vtype);
-
-       if (volume->type == vhash->vtype) {
-               _leave(" = SUCCESS");
-               return CACHEFS_MATCH_SUCCESS;
-       }
-
-       _leave(" = FAILED");
-       return CACHEFS_MATCH_FAILED;
-} /* end afs_volume_cache_match() */
-#endif
-
-/*****************************************************************************/
-/*
- * update a volume hash record stored in the cache
- */
-#ifdef AFS_CACHING_SUPPORT
-static void afs_volume_cache_update(void *source, void *entry)
-{
-       struct afs_cache_vhash *vhash = entry;
-       struct afs_volume *volume = source;
-
-       _enter("");
-
-       vhash->vtype = volume->type;
-
-} /* end afs_volume_cache_update() */
-#endif
+}
diff --git a/fs/afs/volume.h b/fs/afs/volume.h
deleted file mode 100644 (file)
index bfdcf19..0000000
+++ /dev/null
@@ -1,140 +0,0 @@
-/* volume.h: AFS volume management
- *
- * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#ifndef _LINUX_AFS_VOLUME_H
-#define _LINUX_AFS_VOLUME_H
-
-#include "types.h"
-#include "fsclient.h"
-#include "kafstimod.h"
-#include "kafsasyncd.h"
-#include "cache.h"
-
-typedef enum {
-       AFS_VLUPD_SLEEP,                /* sleeping waiting for update timer to fire */
-       AFS_VLUPD_PENDING,              /* on pending queue */
-       AFS_VLUPD_INPROGRESS,           /* op in progress */
-       AFS_VLUPD_BUSYSLEEP,            /* sleeping because server returned EBUSY */
-       
-} __attribute__((packed)) afs_vlocation_upd_t;
-
-/*****************************************************************************/
-/*
- * entry in the cached volume location catalogue
- */
-struct afs_cache_vlocation
-{
-       uint8_t                 name[64];       /* volume name (lowercase, padded with NULs) */
-       uint8_t                 nservers;       /* number of entries used in servers[] */
-       uint8_t                 vidmask;        /* voltype mask for vid[] */
-       uint8_t                 srvtmask[8];    /* voltype masks for servers[] */
-#define AFS_VOL_VTM_RW 0x01 /* R/W version of the volume is available (on this server) */
-#define AFS_VOL_VTM_RO 0x02 /* R/O version of the volume is available (on this server) */
-#define AFS_VOL_VTM_BAK        0x04 /* backup version of the volume is available (on this server) */
-
-       afs_volid_t             vid[3];         /* volume IDs for R/W, R/O and Bak volumes */
-       struct in_addr          servers[8];     /* fileserver addresses */
-       time_t                  rtime;          /* last retrieval time */
-};
-
-#ifdef AFS_CACHING_SUPPORT
-extern struct cachefs_index_def afs_vlocation_cache_index_def;
-#endif
-
-/*****************************************************************************/
-/*
- * volume -> vnode hash table entry
- */
-struct afs_cache_vhash
-{
-       afs_voltype_t           vtype;          /* which volume variation */
-       uint8_t                 hash_bucket;    /* which hash bucket this represents */
-} __attribute__((packed));
-
-#ifdef AFS_CACHING_SUPPORT
-extern struct cachefs_index_def afs_volume_cache_index_def;
-#endif
-
-/*****************************************************************************/
-/*
- * AFS volume location record
- */
-struct afs_vlocation
-{
-       atomic_t                usage;
-       struct list_head        link;           /* link in cell volume location list */
-       struct afs_timer        timeout;        /* decaching timer */
-       struct afs_cell         *cell;          /* cell to which volume belongs */
-#ifdef AFS_CACHING_SUPPORT
-       struct cachefs_cookie   *cache;         /* caching cookie */
-#endif
-       struct afs_cache_vlocation vldb;        /* volume information DB record */
-       struct afs_volume       *vols[3];       /* volume access record pointer (index by type) */
-       rwlock_t                lock;           /* access lock */
-       unsigned long           read_jif;       /* time at which last read from vlserver */
-       struct afs_timer        upd_timer;      /* update timer */
-       struct afs_async_op     upd_op;         /* update operation */
-       afs_vlocation_upd_t     upd_state;      /* update state */
-       unsigned short          upd_first_svix; /* first server index during update */
-       unsigned short          upd_curr_svix;  /* current server index during update */
-       unsigned short          upd_rej_cnt;    /* ENOMEDIUM count during update */
-       unsigned short          upd_busy_cnt;   /* EBUSY count during update */
-       unsigned short          valid;          /* T if valid */
-};
-
-extern int afs_vlocation_lookup(struct afs_cell *cell,
-                               const char *name,
-                               unsigned namesz,
-                               struct afs_vlocation **_vlocation);
-
-#define afs_get_vlocation(V) do { atomic_inc(&(V)->usage); } while(0)
-
-extern void afs_put_vlocation(struct afs_vlocation *vlocation);
-extern void afs_vlocation_do_timeout(struct afs_vlocation *vlocation);
-
-/*****************************************************************************/
-/*
- * AFS volume access record
- */
-struct afs_volume
-{
-       atomic_t                usage;
-       struct afs_cell         *cell;          /* cell to which belongs (unrefd ptr) */
-       struct afs_vlocation    *vlocation;     /* volume location */
-#ifdef AFS_CACHING_SUPPORT
-       struct cachefs_cookie   *cache;         /* caching cookie */
-#endif
-       afs_volid_t             vid;            /* volume ID */
-       afs_voltype_t           type;           /* type of volume */
-       char                    type_force;     /* force volume type (suppress R/O -> R/W) */
-       unsigned short          nservers;       /* number of server slots filled */
-       unsigned short          rjservers;      /* number of servers discarded due to -ENOMEDIUM */
-       struct afs_server       *servers[8];    /* servers on which volume resides (ordered) */
-       struct rw_semaphore     server_sem;     /* lock for accessing current server */
-};
-
-extern int afs_volume_lookup(const char *name,
-                            struct afs_cell *cell,
-                            int rwpath,
-                            struct afs_volume **_volume);
-
-#define afs_get_volume(V) do { atomic_inc(&(V)->usage); } while(0)
-
-extern void afs_put_volume(struct afs_volume *volume);
-
-extern int afs_volume_pick_fileserver(struct afs_volume *volume,
-                                     struct afs_server **_server);
-
-extern int afs_volume_release_fileserver(struct afs_volume *volume,
-                                        struct afs_server *server,
-                                        int result);
-
-#endif /* _LINUX_AFS_VOLUME_H */
index 8b1c5d8..c68b055 100644 (file)
@@ -266,6 +266,23 @@ static int do_siocgstamp(unsigned int fd, unsigned int cmd, unsigned long arg)
        return err;
 }
 
+static int do_siocgstampns(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+       struct compat_timespec __user *up = compat_ptr(arg);
+       struct timespec kts;
+       mm_segment_t old_fs = get_fs();
+       int err;
+
+       set_fs(KERNEL_DS);
+       err = sys_ioctl(fd, cmd, (unsigned long)&kts);
+       set_fs(old_fs);
+       if (!err) {
+               err = put_user(kts.tv_sec, &up->tv_sec);
+               err |= __put_user(kts.tv_nsec, &up->tv_nsec);
+       }
+       return err;
+}
+
 struct ifmap32 {
        compat_ulong_t mem_start;
        compat_ulong_t mem_end;
@@ -2437,6 +2454,7 @@ HANDLE_IOCTL(SIOCBRDELIF, dev_ifsioc)
 /* Note SIOCRTMSG is no longer, so this is safe and * the user would have seen just an -EINVAL anyways. */
 HANDLE_IOCTL(SIOCRTMSG, ret_einval)
 HANDLE_IOCTL(SIOCGSTAMP, do_siocgstamp)
+HANDLE_IOCTL(SIOCGSTAMPNS, do_siocgstampns)
 #endif
 #ifdef CONFIG_BLOCK
 HANDLE_IOCTL(HDIO_GETGEO, hdio_getgeo)
index e3aa225..fe91863 100644 (file)
@@ -97,7 +97,7 @@ out:
  */
 static int ecryptfs_process_nl_response(struct sk_buff *skb)
 {
-       struct nlmsghdr *nlh = (struct nlmsghdr*)skb->data;
+       struct nlmsghdr *nlh = nlmsg_hdr(skb);
        struct ecryptfs_message *msg = NLMSG_DATA(nlh);
        int rc;
 
@@ -181,7 +181,7 @@ receive:
                                "rc = [%d]\n", rc);
                return;
        }
-       nlh = (struct nlmsghdr *)skb->data;
+       nlh = nlmsg_hdr(skb);
        if (!NLMSG_OK(nlh, skb->len)) {
                ecryptfs_printk(KERN_ERR, "Received corrupt netlink "
                                "message\n");
@@ -229,7 +229,7 @@ int ecryptfs_init_netlink(void)
 
        ecryptfs_nl_sock = netlink_kernel_create(NETLINK_ECRYPTFS, 0,
                                                 ecryptfs_receive_nl_message,
-                                                THIS_MODULE);
+                                                NULL, THIS_MODULE);
        if (!ecryptfs_nl_sock) {
                rc = -EIO;
                ecryptfs_printk(KERN_ERR, "Failed to create netlink socket\n");
index ad2e91b..7975589 100644 (file)
@@ -38,7 +38,6 @@
 static struct nfs_page * nfs_update_request(struct nfs_open_context*,
                                            struct page *,
                                            unsigned int, unsigned int);
-static void nfs_mark_request_dirty(struct nfs_page *req);
 static long nfs_flush_mapping(struct address_space *mapping, struct writeback_control *wbc, int how);
 static const struct rpc_call_ops nfs_write_partial_ops;
 static const struct rpc_call_ops nfs_write_full_ops;
@@ -255,7 +254,8 @@ static void nfs_end_page_writeback(struct page *page)
 static int nfs_page_mark_flush(struct page *page)
 {
        struct nfs_page *req;
-       spinlock_t *req_lock = &NFS_I(page->mapping->host)->req_lock;
+       struct nfs_inode *nfsi = NFS_I(page->mapping->host);
+       spinlock_t *req_lock = &nfsi->req_lock;
        int ret;
 
        spin_lock(req_lock);
@@ -279,11 +279,23 @@ static int nfs_page_mark_flush(struct page *page)
                        return ret;
                spin_lock(req_lock);
        }
-       spin_unlock(req_lock);
+       if (test_bit(PG_NEED_COMMIT, &req->wb_flags)) {
+               /* This request is marked for commit */
+               spin_unlock(req_lock);
+               nfs_unlock_request(req);
+               return 1;
+       }
        if (nfs_set_page_writeback(page) == 0) {
                nfs_list_remove_request(req);
-               nfs_mark_request_dirty(req);
-       }
+               /* add the request to the inode's dirty list. */
+               radix_tree_tag_set(&nfsi->nfs_page_tree,
+                               req->wb_index, NFS_PAGE_TAG_DIRTY);
+               nfs_list_add_request(req, &nfsi->dirty);
+               nfsi->ndirty++;
+               spin_unlock(req_lock);
+               __mark_inode_dirty(page->mapping->host, I_DIRTY_PAGES);
+       } else
+               spin_unlock(req_lock);
        ret = test_bit(PG_NEED_FLUSH, &req->wb_flags);
        nfs_unlock_request(req);
        return ret;
@@ -376,6 +388,8 @@ static int nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
        }
        SetPagePrivate(req->wb_page);
        set_page_private(req->wb_page, (unsigned long)req);
+       if (PageDirty(req->wb_page))
+               set_bit(PG_NEED_FLUSH, &req->wb_flags);
        nfsi->npages++;
        atomic_inc(&req->wb_count);
        return 0;
@@ -395,6 +409,8 @@ static void nfs_inode_remove_request(struct nfs_page *req)
        set_page_private(req->wb_page, 0);
        ClearPagePrivate(req->wb_page);
        radix_tree_delete(&nfsi->nfs_page_tree, req->wb_index);
+       if (test_and_clear_bit(PG_NEED_FLUSH, &req->wb_flags))
+               __set_page_dirty_nobuffers(req->wb_page);
        nfsi->npages--;
        if (!nfsi->npages) {
                spin_unlock(&nfsi->req_lock);
@@ -406,24 +422,6 @@ static void nfs_inode_remove_request(struct nfs_page *req)
        nfs_release_request(req);
 }
 
-/*
- * Add a request to the inode's dirty list.
- */
-static void
-nfs_mark_request_dirty(struct nfs_page *req)
-{
-       struct inode *inode = req->wb_context->dentry->d_inode;
-       struct nfs_inode *nfsi = NFS_I(inode);
-
-       spin_lock(&nfsi->req_lock);
-       radix_tree_tag_set(&nfsi->nfs_page_tree,
-                       req->wb_index, NFS_PAGE_TAG_DIRTY);
-       nfs_list_add_request(req, &nfsi->dirty);
-       nfsi->ndirty++;
-       spin_unlock(&nfsi->req_lock);
-       __mark_inode_dirty(inode, I_DIRTY_PAGES);
-}
-
 static void
 nfs_redirty_request(struct nfs_page *req)
 {
@@ -438,7 +436,7 @@ nfs_dirty_request(struct nfs_page *req)
 {
        struct page *page = req->wb_page;
 
-       if (page == NULL)
+       if (page == NULL || test_bit(PG_NEED_COMMIT, &req->wb_flags))
                return 0;
        return !PageWriteback(req->wb_page);
 }
@@ -456,10 +454,48 @@ nfs_mark_request_commit(struct nfs_page *req)
        spin_lock(&nfsi->req_lock);
        nfs_list_add_request(req, &nfsi->commit);
        nfsi->ncommit++;
+       set_bit(PG_NEED_COMMIT, &(req)->wb_flags);
        spin_unlock(&nfsi->req_lock);
        inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
        __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
 }
+
+static inline
+int nfs_write_need_commit(struct nfs_write_data *data)
+{
+       return data->verf.committed != NFS_FILE_SYNC;
+}
+
+static inline
+int nfs_reschedule_unstable_write(struct nfs_page *req)
+{
+       if (test_bit(PG_NEED_COMMIT, &req->wb_flags)) {
+               nfs_mark_request_commit(req);
+               return 1;
+       }
+       if (test_and_clear_bit(PG_NEED_RESCHED, &req->wb_flags)) {
+               nfs_redirty_request(req);
+               return 1;
+       }
+       return 0;
+}
+#else
+static inline void
+nfs_mark_request_commit(struct nfs_page *req)
+{
+}
+
+static inline
+int nfs_write_need_commit(struct nfs_write_data *data)
+{
+       return 0;
+}
+
+static inline
+int nfs_reschedule_unstable_write(struct nfs_page *req)
+{
+       return 0;
+}
 #endif
 
 /*
@@ -520,6 +556,7 @@ static void nfs_cancel_commit_list(struct list_head *head)
                req = nfs_list_entry(head->next);
                dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
                nfs_list_remove_request(req);
+               clear_bit(PG_NEED_COMMIT, &(req)->wb_flags);
                nfs_inode_remove_request(req);
                nfs_unlock_request(req);
        }
@@ -746,26 +783,12 @@ int nfs_updatepage(struct file *file, struct page *page,
 
 static void nfs_writepage_release(struct nfs_page *req)
 {
-       nfs_end_page_writeback(req->wb_page);
 
-#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
-       if (!PageError(req->wb_page)) {
-               if (NFS_NEED_RESCHED(req)) {
-                       nfs_redirty_request(req);
-                       goto out;
-               } else if (NFS_NEED_COMMIT(req)) {
-                       nfs_mark_request_commit(req);
-                       goto out;
-               }
-       }
-       nfs_inode_remove_request(req);
-
-out:
-       nfs_clear_commit(req);
-       nfs_clear_reschedule(req);
-#else
-       nfs_inode_remove_request(req);
-#endif
+       if (PageError(req->wb_page) || !nfs_reschedule_unstable_write(req)) {
+               nfs_end_page_writeback(req->wb_page);
+               nfs_inode_remove_request(req);
+       } else
+               nfs_end_page_writeback(req->wb_page);
        nfs_clear_page_writeback(req);
 }
 
@@ -897,8 +920,8 @@ out_bad:
                list_del(&data->pages);
                nfs_writedata_release(data);
        }
-       nfs_end_page_writeback(req->wb_page);
        nfs_redirty_request(req);
+       nfs_end_page_writeback(req->wb_page);
        nfs_clear_page_writeback(req);
        return -ENOMEM;
 }
@@ -943,8 +966,8 @@ static int nfs_flush_one(struct inode *inode, struct list_head *head, int how)
        while (!list_empty(head)) {
                struct nfs_page *req = nfs_list_entry(head->next);
                nfs_list_remove_request(req);
-               nfs_end_page_writeback(req->wb_page);
                nfs_redirty_request(req);
+               nfs_end_page_writeback(req->wb_page);
                nfs_clear_page_writeback(req);
        }
        return -ENOMEM;
@@ -979,8 +1002,8 @@ out_err:
        while (!list_empty(head)) {
                req = nfs_list_entry(head->next);
                nfs_list_remove_request(req);
-               nfs_end_page_writeback(req->wb_page);
                nfs_redirty_request(req);
+               nfs_end_page_writeback(req->wb_page);
                nfs_clear_page_writeback(req);
        }
        return error;
@@ -1008,22 +1031,28 @@ static void nfs_writeback_done_partial(struct rpc_task *task, void *calldata)
                nfs_set_pageerror(page);
                req->wb_context->error = task->tk_status;
                dprintk(", error = %d\n", task->tk_status);
-       } else {
-#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
-               if (data->verf.committed < NFS_FILE_SYNC) {
-                       if (!NFS_NEED_COMMIT(req)) {
-                               nfs_defer_commit(req);
-                               memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
-                               dprintk(" defer commit\n");
-                       } else if (memcmp(&req->wb_verf, &data->verf, sizeof(req->wb_verf))) {
-                               nfs_defer_reschedule(req);
-                               dprintk(" server reboot detected\n");
-                       }
-               } else
-#endif
-                       dprintk(" OK\n");
+               goto out;
        }
 
+       if (nfs_write_need_commit(data)) {
+               spinlock_t *req_lock = &NFS_I(page->mapping->host)->req_lock;
+
+               spin_lock(req_lock);
+               if (test_bit(PG_NEED_RESCHED, &req->wb_flags)) {
+                       /* Do nothing we need to resend the writes */
+               } else if (!test_and_set_bit(PG_NEED_COMMIT, &req->wb_flags)) {
+                       memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
+                       dprintk(" defer commit\n");
+               } else if (memcmp(&req->wb_verf, &data->verf, sizeof(req->wb_verf))) {
+                       set_bit(PG_NEED_RESCHED, &req->wb_flags);
+                       clear_bit(PG_NEED_COMMIT, &req->wb_flags);
+                       dprintk(" server reboot detected\n");
+               }
+               spin_unlock(req_lock);
+       } else
+               dprintk(" OK\n");
+
+out:
        if (atomic_dec_and_test(&req->wb_complete))
                nfs_writepage_release(req);
 }
@@ -1064,25 +1093,21 @@ static void nfs_writeback_done_full(struct rpc_task *task, void *calldata)
                if (task->tk_status < 0) {
                        nfs_set_pageerror(page);
                        req->wb_context->error = task->tk_status;
-                       nfs_end_page_writeback(page);
-                       nfs_inode_remove_request(req);
                        dprintk(", error = %d\n", task->tk_status);
-                       goto next;
+                       goto remove_request;
                }
-               nfs_end_page_writeback(page);
 
-#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
-               if (data->args.stable != NFS_UNSTABLE || data->verf.committed == NFS_FILE_SYNC) {
-                       nfs_inode_remove_request(req);
-                       dprintk(" OK\n");
+               if (nfs_write_need_commit(data)) {
+                       memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
+                       nfs_mark_request_commit(req);
+                       nfs_end_page_writeback(page);
+                       dprintk(" marked for commit\n");
                        goto next;
                }
-               memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
-               nfs_mark_request_commit(req);
-               dprintk(" marked for commit\n");
-#else
+               dprintk(" OK\n");
+remove_request:
+               nfs_end_page_writeback(page);
                nfs_inode_remove_request(req);
-#endif
        next:
                nfs_clear_page_writeback(req);
        }
@@ -1270,6 +1295,7 @@ static void nfs_commit_done(struct rpc_task *task, void *calldata)
        while (!list_empty(&data->pages)) {
                req = nfs_list_entry(data->pages.next);
                nfs_list_remove_request(req);
+               clear_bit(PG_NEED_COMMIT, &(req)->wb_flags);
                dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
 
                dprintk("NFS: commit (%s/%Ld %d@%Ld)",
@@ -1505,15 +1531,22 @@ int nfs_wb_page(struct inode *inode, struct page* page)
 
 int nfs_set_page_dirty(struct page *page)
 {
+       spinlock_t *req_lock = &NFS_I(page->mapping->host)->req_lock;
        struct nfs_page *req;
+       int ret;
 
-       req = nfs_page_find_request(page);
+       spin_lock(req_lock);
+       req = nfs_page_find_request_locked(page);
        if (req != NULL) {
                /* Mark any existing write requests for flushing */
-               set_bit(PG_NEED_FLUSH, &req->wb_flags);
+               ret = !test_and_set_bit(PG_NEED_FLUSH, &req->wb_flags);
+               spin_unlock(req_lock);
                nfs_release_request(req);
+               return ret;
        }
-       return __set_page_dirty_nobuffers(page);
+       ret = __set_page_dirty_nobuffers(page);
+       spin_unlock(req_lock);
+       return ret;
 }
 
 
index f01389f..c8178b7 100644 (file)
 static struct reiserfs_xattr_handler *find_xattr_handler_prefix(const char
                                                                *prefix);
 
-static struct dentry *create_xa_root(struct super_block *sb)
+/* Returns the dentry referring to the root of the extended attribute
+ * directory tree. If it has already been retrieved, it is used. If it
+ * hasn't been created and the flags indicate creation is allowed, we
+ * attempt to create it. On error, we return a pointer-encoded error.
+ */
+static struct dentry *get_xa_root(struct super_block *sb, int flags)
 {
        struct dentry *privroot = dget(REISERFS_SB(sb)->priv_root);
        struct dentry *xaroot;
 
        /* This needs to be created at mount-time */
        if (!privroot)
-               return ERR_PTR(-EOPNOTSUPP);
+               return ERR_PTR(-ENODATA);
 
-       xaroot = lookup_one_len(XAROOT_NAME, privroot, strlen(XAROOT_NAME));
-       if (IS_ERR(xaroot)) {
+       mutex_lock(&privroot->d_inode->i_mutex);
+       if (REISERFS_SB(sb)->xattr_root) {
+               xaroot = dget(REISERFS_SB(sb)->xattr_root);
                goto out;
-       } else if (!xaroot->d_inode) {
-               int err;
-               mutex_lock(&privroot->d_inode->i_mutex);
-               err =
-                   privroot->d_inode->i_op->mkdir(privroot->d_inode, xaroot,
-                                                  0700);
-               mutex_unlock(&privroot->d_inode->i_mutex);
-
-               if (err) {
-                       dput(xaroot);
-                       dput(privroot);
-                       return ERR_PTR(err);
-               }
-               REISERFS_SB(sb)->xattr_root = dget(xaroot);
        }
 
-      out:
-       dput(privroot);
-       return xaroot;
-}
-
-/* This will return a dentry, or error, refering to the xa root directory.
- * If the xa root doesn't exist yet, the dentry will be returned without
- * an associated inode. This dentry can be used with ->mkdir to create
- * the xa directory. */
-static struct dentry *__get_xa_root(struct super_block *s)
-{
-       struct dentry *privroot = dget(REISERFS_SB(s)->priv_root);
-       struct dentry *xaroot = NULL;
-
-       if (IS_ERR(privroot) || !privroot)
-               return privroot;
-
        xaroot = lookup_one_len(XAROOT_NAME, privroot, strlen(XAROOT_NAME));
        if (IS_ERR(xaroot)) {
                goto out;
        } else if (!xaroot->d_inode) {
-               dput(xaroot);
-               xaroot = NULL;
-               goto out;
+               int err = -ENODATA;
+               if (flags == 0 || flags & XATTR_CREATE)
+                       err = privroot->d_inode->i_op->mkdir(privroot->d_inode,
+                                                            xaroot, 0700);
+               if (err) {
+                       dput(xaroot);
+                       xaroot = ERR_PTR(err);
+                       goto out;
+               }
        }
-
-       REISERFS_SB(s)->xattr_root = dget(xaroot);
+       REISERFS_SB(sb)->xattr_root = dget(xaroot);
 
       out:
+       mutex_unlock(&privroot->d_inode->i_mutex);
        dput(privroot);
        return xaroot;
 }
 
-/* Returns the dentry (or NULL) referring to the root of the extended
- * attribute directory tree. If it has already been retrieved, it is used.
- * Otherwise, we attempt to retrieve it from disk. It may also return
- * a pointer-encoded error.
- */
-static inline struct dentry *get_xa_root(struct super_block *s)
-{
-       struct dentry *dentry = dget(REISERFS_SB(s)->xattr_root);
-
-       if (!dentry)
-               dentry = __get_xa_root(s);
-
-       return dentry;
-}
-
 /* Opens the directory corresponding to the inode's extended attribute store.
  * If flags allow, the tree to the directory may be created. If creation is
  * prohibited, -ENODATA is returned. */
@@ -138,21 +104,11 @@ static struct dentry *open_xa_dir(const struct inode *inode, int flags)
        struct dentry *xaroot, *xadir;
        char namebuf[17];
 
-       xaroot = get_xa_root(inode->i_sb);
-       if (IS_ERR(xaroot)) {
+       xaroot = get_xa_root(inode->i_sb, flags);
+       if (IS_ERR(xaroot))
                return xaroot;
-       } else if (!xaroot) {
-               if (flags == 0 || flags & XATTR_CREATE) {
-                       xaroot = create_xa_root(inode->i_sb);
-                       if (IS_ERR(xaroot))
-                               return xaroot;
-               }
-               if (!xaroot)
-                       return ERR_PTR(-ENODATA);
-       }
 
        /* ok, we have xaroot open */
-
        snprintf(namebuf, sizeof(namebuf), "%X.%X",
                 le32_to_cpu(INODE_PKEY(inode)->k_objectid),
                 inode->i_generation);
@@ -821,7 +777,7 @@ int reiserfs_delete_xattrs(struct inode *inode)
 
        /* Leftovers besides . and .. -- that's not good. */
        if (dir->d_inode->i_nlink <= 2) {
-               root = get_xa_root(inode->i_sb);
+               root = get_xa_root(inode->i_sb, XATTR_REPLACE);
                reiserfs_write_lock_xattrs(inode->i_sb);
                err = vfs_rmdir(root->d_inode, dir);
                reiserfs_write_unlock_xattrs(inode->i_sb);
index d22ab97..1fede7f 100644 (file)
@@ -52,6 +52,8 @@
 
 #define SO_PEERSEC             30
 #define SO_PASSSEC             34
+#define SO_TIMESTAMPNS         35
+#define SCM_TIMESTAMPNS                SO_TIMESTAMPNS
 
 /* Security levels - as per NRL IPv6 - don't actually do anything */
 #define SO_SECURITY_AUTHENTICATION             19
index e4961a7..7932c7a 100644 (file)
@@ -10,6 +10,7 @@
 #define SIOCSPGRP      _IOW('s', 8, pid_t)
 #define SIOCGPGRP      _IOR('s', 9, pid_t)
 
-#define SIOCGSTAMP     0x8906          /* Get stamp - linux-specific */
+#define SIOCGSTAMP     0x8906          /* Get stamp (timeval) */
+#define SIOCGSTAMPNS   0x8907          /* Get stamp (timespec) */
 
 #endif /* _ASM_ALPHA_SOCKIOS_H */
index 9e44fea..b02b8a2 100644 (file)
@@ -61,8 +61,6 @@ extern void * __memsetw(void *dest, unsigned short, size_t count);
  ? __constant_c_memset((s),0x0001000100010001UL*(unsigned short)(c),(n)) \
  : __memsetw((s),(c),(n)))
 
-extern int strcasecmp(const char *, const char *);
-
 #endif /* __KERNEL__ */
 
 #endif /* __ALPHA_STRING_H__ */
index 37e0a96..0b5f881 100644 (file)
@@ -2,6 +2,7 @@
 #define __ASM_ARM_DIV64
 
 #include <asm/system.h>
+#include <linux/types.h>
 
 /*
  * The semantics of do_div() are:
 
 #endif
 
+extern uint64_t div64_64(uint64_t dividend, uint64_t divisor);
+
 #endif
index 19f7df7..65a1a64 100644 (file)
@@ -49,5 +49,7 @@
 
 #define SO_PEERSEC             31
 #define SO_PASSSEC             34
+#define SO_TIMESTAMPNS         35
+#define SCM_TIMESTAMPNS                SO_TIMESTAMPNS
 
 #endif /* _ASM_SOCKET_H */
index 77c3408..a2588a2 100644 (file)
@@ -7,6 +7,7 @@
 #define FIOGETOWN      0x8903
 #define SIOCGPGRP      0x8904
 #define SIOCATMARK     0x8905
-#define SIOCGSTAMP     0x8906          /* Get stamp */
+#define SIOCGSTAMP     0x8906          /* Get stamp (timeval) */
+#define SIOCGSTAMPNS   0x8907          /* Get stamp (timespec) */
 
 #endif
index 19f7df7..65a1a64 100644 (file)
@@ -49,5 +49,7 @@
 
 #define SO_PEERSEC             31
 #define SO_PASSSEC             34
+#define SO_TIMESTAMPNS         35
+#define SCM_TIMESTAMPNS                SO_TIMESTAMPNS
 
 #endif /* _ASM_SOCKET_H */
index 77c3408..a2588a2 100644 (file)
@@ -7,6 +7,7 @@
 #define FIOGETOWN      0x8903
 #define SIOCGPGRP      0x8904
 #define SIOCATMARK     0x8905
-#define SIOCGSTAMP     0x8906          /* Get stamp */
+#define SIOCGSTAMP     0x8906          /* Get stamp (timeval) */
+#define SIOCGSTAMPNS   0x8907          /* Get stamp (timespec) */
 
 #endif
diff --git a/include/asm-avr32/arch-at32ap/io.h b/include/asm-avr32/arch-at32ap/io.h
new file mode 100644 (file)
index 0000000..ee59e40
--- /dev/null
@@ -0,0 +1,39 @@
+#ifndef __ASM_AVR32_ARCH_AT32AP_IO_H
+#define __ASM_AVR32_ARCH_AT32AP_IO_H
+
+/* For "bizarre" halfword swapping */
+#include <linux/byteorder/swabb.h>
+
+#if defined(CONFIG_AP7000_32_BIT_SMC)
+# define __swizzle_addr_b(addr)        (addr ^ 3UL)
+# define __swizzle_addr_w(addr)        (addr ^ 2UL)
+# define __swizzle_addr_l(addr)        (addr)
+# define ioswabb(a, x)         (x)
+# define ioswabw(a, x)         (x)
+# define ioswabl(a, x)         (x)
+# define __mem_ioswabb(a, x)   (x)
+# define __mem_ioswabw(a, x)   swab16(x)
+# define __mem_ioswabl(a, x)   swab32(x)
+#elif defined(CONFIG_AP7000_16_BIT_SMC)
+# define __swizzle_addr_b(addr)        (addr ^ 1UL)
+# define __swizzle_addr_w(addr)        (addr)
+# define __swizzle_addr_l(addr)        (addr)
+# define ioswabb(a, x)         (x)
+# define ioswabw(a, x)         (x)
+# define ioswabl(a, x)         swahw32(x)
+# define __mem_ioswabb(a, x)   (x)
+# define __mem_ioswabw(a, x)   swab16(x)
+# define __mem_ioswabl(a, x)   swahb32(x)
+#else
+# define __swizzle_addr_b(addr)        (addr)
+# define __swizzle_addr_w(addr)        (addr)
+# define __swizzle_addr_l(addr)        (addr)
+# define ioswabb(a, x)         (x)
+# define ioswabw(a, x)         swab16(x)
+# define ioswabl(a, x)         swab32(x)
+# define __mem_ioswabb(a, x)   (x)
+# define __mem_ioswabw(a, x)   (x)
+# define __mem_ioswabl(a, x)   (x)
+#endif
+
+#endif /* __ASM_AVR32_ARCH_AT32AP_IO_H */
index 3732b32..07152b7 100644 (file)
@@ -47,11 +47,33 @@ struct smc_config {
         */
        unsigned int    nwe_controlled:1;
 
+       /*
+        * 0: NWAIT is disabled
+        * 1: Reserved
+        * 2: NWAIT is frozen mode
+        * 3: NWAIT in ready mode
+        */
+       unsigned int    nwait_mode:2;
+
        /*
         * 0: Byte select access type
         * 1: Byte write access type
         */
        unsigned int    byte_write:1;
+
+       /*
+        * Number of clock cycles before data is released after
+        * the rising edge of the read controlling signal
+        *
+        * Total cycles from SMC is tdf_cycles + 1
+        */
+       unsigned int    tdf_cycles:4;
+
+       /*
+        * 0: TDF optimization disabled
+        * 1: TDF optimization enabled
+        */
+       unsigned int    tdf_mode:1;
 };
 
 extern int smc_set_configuration(int cs, const struct smc_config *config);
diff --git a/include/asm-avr32/arch-at32ap/time.h b/include/asm-avr32/arch-at32ap/time.h
new file mode 100644 (file)
index 0000000..cc8a434
--- /dev/null
@@ -0,0 +1,112 @@
+/*
+ * Copyright (C) 2007 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_AVR32_ARCH_AT32AP_TIME_H
+#define _ASM_AVR32_ARCH_AT32AP_TIME_H
+
+#include <linux/platform_device.h>
+
+extern struct irqaction timer_irqaction;
+extern struct platform_device at32_systc0_device;
+extern void local_timer_interrupt(int irq, void *dev_id);
+
+#define TIMER_BCR                                      0x000000c0
+#define TIMER_BCR_SYNC                                          0
+#define TIMER_BMR                                      0x000000c4
+#define TIMER_BMR_TC0XC0S                                       0
+#define TIMER_BMR_TC1XC1S                                       2
+#define TIMER_BMR_TC2XC2S                                       4
+#define TIMER_CCR                                      0x00000000
+#define TIMER_CCR_CLKDIS                                        1
+#define TIMER_CCR_CLKEN                                                 0
+#define TIMER_CCR_SWTRG                                                 2
+#define TIMER_CMR                                      0x00000004
+#define TIMER_CMR_ABETRG                                       10
+#define TIMER_CMR_ACPA                                         16
+#define TIMER_CMR_ACPC                                         18
+#define TIMER_CMR_AEEVT                                                20
+#define TIMER_CMR_ASWTRG                                       22
+#define TIMER_CMR_BCPB                                         24
+#define TIMER_CMR_BCPC                                         26
+#define TIMER_CMR_BEEVT                                                28
+#define TIMER_CMR_BSWTRG                                       30
+#define TIMER_CMR_BURST                                                 4
+#define TIMER_CMR_CLKI                                          3
+#define TIMER_CMR_CPCDIS                                        7
+#define TIMER_CMR_CPCSTOP                                       6
+#define TIMER_CMR_CPCTRG                                       14
+#define TIMER_CMR_EEVT                                         10
+#define TIMER_CMR_EEVTEDG                                       8
+#define TIMER_CMR_ENETRG                                       12
+#define TIMER_CMR_ETRGEDG                                       8
+#define TIMER_CMR_LDBDIS                                        7
+#define TIMER_CMR_LDBSTOP                                       6
+#define TIMER_CMR_LDRA                                         16
+#define TIMER_CMR_LDRB                                         18
+#define TIMER_CMR_TCCLKS                                        0
+#define TIMER_CMR_WAVE                                         15
+#define TIMER_CMR_WAVSEL                                       13
+#define TIMER_CV                                       0x00000010
+#define TIMER_CV_CV                                             0
+#define TIMER_IDR                                      0x00000028
+#define TIMER_IDR_COVFS                                                 0
+#define TIMER_IDR_CPAS                                          2
+#define TIMER_IDR_CPBS                                          3
+#define TIMER_IDR_CPCS                                          4
+#define TIMER_IDR_ETRGS                                                 7
+#define TIMER_IDR_LDRAS                                                 5
+#define TIMER_IDR_LDRBS                                                 6
+#define TIMER_IDR_LOVRS                                                 1
+#define TIMER_IER                                      0x00000024
+#define TIMER_IER_COVFS                                                 0
+#define TIMER_IER_CPAS                                          2
+#define TIMER_IER_CPBS                                          3
+#define TIMER_IER_CPCS                                          4
+#define TIMER_IER_ETRGS                                                 7
+#define TIMER_IER_LDRAS                                                 5
+#define TIMER_IER_LDRBS                                                 6
+#define TIMER_IER_LOVRS                                                 1
+#define TIMER_IMR                                      0x0000002c
+#define TIMER_IMR_COVFS                                                 0
+#define TIMER_IMR_CPAS                                          2
+#define TIMER_IMR_CPBS                                          3
+#define TIMER_IMR_CPCS                                          4
+#define TIMER_IMR_ETRGS                                                 7
+#define TIMER_IMR_LDRAS                                                 5
+#define TIMER_IMR_LDRBS                                                 6
+#define TIMER_IMR_LOVRS                                                 1
+#define TIMER_RA                                       0x00000014
+#define TIMER_RA_RA                                             0
+#define TIMER_RB                                       0x00000018
+#define TIMER_RB_RB                                             0
+#define TIMER_RC                                       0x0000001c
+#define TIMER_RC_RC                                             0
+#define TIMER_SR                                       0x00000020
+#define TIMER_SR_CLKSTA                                                16
+#define TIMER_SR_COVFS                                          0
+#define TIMER_SR_CPAS                                           2
+#define TIMER_SR_CPBS                                           3
+#define TIMER_SR_CPCS                                           4
+#define TIMER_SR_ETRGS                                          7
+#define TIMER_SR_LDRAS                                          5
+#define TIMER_SR_LDRBS                                          6
+#define TIMER_SR_LOVRS                                          1
+#define TIMER_SR_MTIOA                                         17
+#define TIMER_SR_MTIOB                                         18
+
+/* Bit manipulation macros */
+#define TIMER_BIT(name)                (1 << TIMER_##name)
+#define TIMER_BF(name,value)   ((value) << TIMER_##name)
+
+/* Register access macros */
+#define timer_read(port,instance,reg) \
+       __raw_readl(port + (0x40 * instance) + TIMER_##reg)
+#define timer_write(port,instance,reg,value) \
+       __raw_writel((value), port + (0x40 * instance) + TIMER_##reg)
+
+#endif /* _ASM_AVR32_ARCH_AT32AP_TIME_H */
index c40b603..b9c2548 100644 (file)
@@ -173,7 +173,7 @@ static inline int atomic_sub_if_positive(int i, atomic_t *v)
 }
 
 #define atomic_xchg(v, new)    (xchg(&((v)->counter), new))
-#define atomic_cmpxchg(v, o, n)        ((int)cmpxchg(&((v)->counter), (o), (n)))
+#define atomic_cmpxchg(v, o, n)        (cmpxchg(&((v)->counter), (o), (n)))
 
 #define atomic_sub(i, v)       (void)atomic_sub_return(i, v)
 #define atomic_add(i, v)       (void)atomic_add_return(i, v)
index 521766b..afdcd79 100644 (file)
 
 #ifdef CONFIG_DEBUG_BUGVERBOSE
 
-#define BUG()                                                          \
-       do {                                                            \
-               asm volatile(".hword    %0\n\t"                         \
-                            ".hword    %1\n\t"                         \
-                            ".long     %2"                             \
-                            :                                          \
-                            : "n"(AVR32_BUG_OPCODE),                   \
-                              "i"(__LINE__), "X"(__FILE__));           \
-       } while (0)
+#define _BUG_OR_WARN(flags)                                            \
+       asm volatile(                                                   \
+               "1:     .hword  %0\n"                                   \
+               "       .section __bug_table,\"a\",@progbits\n"         \
+               "2:     .long   1b\n"                                   \
+               "       .long   %1\n"                                   \
+               "       .short  %2\n"                                   \
+               "       .short  %3\n"                                   \
+               "       .org    2b + %4\n"                              \
+               "       .previous"                                      \
+               :                                                       \
+               : "i"(AVR32_BUG_OPCODE), "i"(__FILE__),                 \
+                 "i"(__LINE__), "i"(flags),                            \
+                 "i"(sizeof(struct bug_entry)))
 
 #else
 
+#define _BUG_OR_WARN(flags)                                            \
+       asm volatile(                                                   \
+               "1:     .hword  %0\n"                                   \
+               "       .section __bug_table,\"a\",@progbits\n"         \
+               "2:     .long   1b\n"                                   \
+               "       .short  %1\n"                                   \
+               "       .org    2b + %2\n"                              \
+               "       .previous"                                      \
+               :                                                       \
+               : "i"(AVR32_BUG_OPCODE), "i"(flags),                    \
+                 "i"(sizeof(struct bug_entry)))
+
+#endif /* CONFIG_DEBUG_BUGVERBOSE */
+
 #define BUG()                                                          \
        do {                                                            \
-               asm volatile(".hword    %0\n\t"                         \
-                            : : "n"(AVR32_BUG_OPCODE));                \
+               _BUG_OR_WARN(0);                                        \
+               for (;;);                                               \
        } while (0)
 
-#endif /* CONFIG_DEBUG_BUGVERBOSE */
+#define WARN_ON(condition)                                                     \
+       ({                                                              \
+               typeof(condition) __ret_warn_on = (condition);          \
+               if (unlikely(__ret_warn_on))                            \
+                       _BUG_OR_WARN(BUGFLAG_WARNING);                  \
+               unlikely(__ret_warn_on);                                \
+       })
 
 #define HAVE_ARCH_BUG
+#define HAVE_ARCH_WARN_ON
 
 #endif /* CONFIG_BUG */
 
index c08e810..e30d4b3 100644 (file)
@@ -1,13 +1,15 @@
 #ifndef __ASM_AVR32_IO_H
 #define __ASM_AVR32_IO_H
 
+#include <linux/kernel.h>
 #include <linux/string.h>
-
-#ifdef __KERNEL__
+#include <linux/types.h>
 
 #include <asm/addrspace.h>
 #include <asm/byteorder.h>
 
+#include <asm/arch/io.h>
+
 /* virt_to_phys will only work when address is in P1 or P2 */
 static __inline__ unsigned long virt_to_phys(volatile void *address)
 {
@@ -36,104 +38,215 @@ extern void __raw_readsb(const void __iomem *addr, void *data, int bytelen);
 extern void __raw_readsw(const void __iomem *addr, void *data, int wordlen);
 extern void __raw_readsl(const void __iomem *addr, void *data, int longlen);
 
-static inline void writeb(unsigned char b, volatile void __iomem *addr)
+static inline void __raw_writeb(u8 v, volatile void __iomem *addr)
 {
-       *(volatile unsigned char __force *)addr = b;
+       *(volatile u8 __force *)addr = v;
 }
-static inline void writew(unsigned short b, volatile void __iomem *addr)
+static inline void __raw_writew(u16 v, volatile void __iomem *addr)
 {
-       *(volatile unsigned short __force *)addr = b;
+       *(volatile u16 __force *)addr = v;
 }
-static inline void writel(unsigned int b, volatile void __iomem *addr)
+static inline void __raw_writel(u32 v, volatile void __iomem *addr)
 {
-       *(volatile unsigned int __force *)addr = b;
+       *(volatile u32 __force *)addr = v;
 }
-#define __raw_writeb writeb
-#define __raw_writew writew
-#define __raw_writel writel
 
-static inline unsigned char readb(const volatile void __iomem *addr)
+static inline u8 __raw_readb(const volatile void __iomem *addr)
 {
-       return *(const volatile unsigned char __force *)addr;
+       return *(const volatile u8 __force *)addr;
 }
-static inline unsigned short readw(const volatile void __iomem *addr)
+static inline u16 __raw_readw(const volatile void __iomem *addr)
 {
-       return *(const volatile unsigned short __force *)addr;
+       return *(const volatile u16 __force *)addr;
 }
-static inline unsigned int readl(const volatile void __iomem *addr)
+static inline u32 __raw_readl(const volatile void __iomem *addr)
 {
-       return *(const volatile unsigned int __force *)addr;
+       return *(const volatile u32 __force *)addr;
+}
+
+/* Convert I/O port address to virtual address */
+#ifndef __io
+# define __io(p)       ((void *)phys_to_uncached(p))
+#endif
+
+/*
+ * Not really sure about the best way to slow down I/O on
+ * AVR32. Defining it as a no-op until we have an actual test case.
+ */
+#define SLOW_DOWN_IO   do { } while (0)
+
+#define __BUILD_MEMORY_SINGLE(pfx, bwl, type)                          \
+static inline void                                                     \
+pfx##write##bwl(type val, volatile void __iomem *addr)                 \
+{                                                                      \
+       volatile type *__addr;                                          \
+       type __val;                                                     \
+                                                                       \
+       __addr = (void *)__swizzle_addr_##bwl((unsigned long)(addr));   \
+       __val = pfx##ioswab##bwl(__addr, val);                          \
+                                                                       \
+       BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long));             \
+                                                                       \
+       *__addr = __val;                                                \
+}                                                                      \
+                                                                       \
+static inline type pfx##read##bwl(const volatile void __iomem *addr)   \
+{                                                                      \
+       volatile type *__addr;                                          \
+       type __val;                                                     \
+                                                                       \
+       __addr = (void *)__swizzle_addr_##bwl((unsigned long)(addr));   \
+                                                                       \
+       BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long));             \
+                                                                       \
+       __val = *__addr;                                                \
+       return pfx##ioswab##bwl(__addr, __val);                         \
+}
+
+#define __BUILD_IOPORT_SINGLE(pfx, bwl, type, p, slow)                 \
+static inline void pfx##out##bwl##p(type val, unsigned long port)      \
+{                                                                      \
+       volatile type *__addr;                                          \
+       type __val;                                                     \
+                                                                       \
+       __addr = __io(__swizzle_addr_##bwl(port));                      \
+       __val = pfx##ioswab##bwl(__addr, val);                          \
+                                                                       \
+       BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long));             \
+                                                                       \
+       *__addr = __val;                                                \
+       slow;                                                           \
+}                                                                      \
+                                                                       \
+static inline type pfx##in##bwl##p(unsigned long port)                 \
+{                                                                      \
+       volatile type *__addr;                                          \
+       type __val;                                                     \
+                                                                       \
+       __addr = __io(__swizzle_addr_##bwl(port));                      \
+                                                                       \
+       BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long));             \
+                                                                       \
+       __val = *__addr;                                                \
+       slow;                                                           \
+                                                                       \
+       return pfx##ioswab##bwl(__addr, __val);                         \
+}
+
+#define __BUILD_MEMORY_PFX(bus, bwl, type)                             \
+       __BUILD_MEMORY_SINGLE(bus, bwl, type)
+
+#define BUILDIO_MEM(bwl, type)                                         \
+       __BUILD_MEMORY_PFX(, bwl, type)                                 \
+       __BUILD_MEMORY_PFX(__mem_, bwl, type)
+
+#define __BUILD_IOPORT_PFX(bus, bwl, type)                             \
+       __BUILD_IOPORT_SINGLE(bus, bwl, type, ,)                        \
+       __BUILD_IOPORT_SINGLE(bus, bwl, type, _p, SLOW_DOWN_IO)
+
+#define BUILDIO_IOPORT(bwl, type)                                      \
+       __BUILD_IOPORT_PFX(, bwl, type)                                 \
+       __BUILD_IOPORT_PFX(__mem_, bwl, type)
+
+BUILDIO_MEM(b, u8)
+BUILDIO_MEM(w, u16)
+BUILDIO_MEM(l, u32)
+
+BUILDIO_IOPORT(b, u8)
+BUILDIO_IOPORT(w, u16)
+BUILDIO_IOPORT(l, u32)
+
+#define readb_relaxed                  readb
+#define readw_relaxed                  readw
+#define readl_relaxed                  readl
+
+#define __BUILD_MEMORY_STRING(bwl, type)                               \
+static inline void writes##bwl(volatile void __iomem *addr,            \
+                              const void *data, unsigned int count)    \
+{                                                                      \
+       const type *__data = data;                                      \
+                                                                       \
+       while (count--)                                                 \
+               __mem_write##bwl(*__data++, addr);                      \
+}                                                                      \
+                                                                       \
+static inline void reads##bwl(const volatile void __iomem *addr,       \
+                             void *data, unsigned int count)           \
+{                                                                      \
+       type *__data = data;                                            \
+                                                                       \
+       while (count--)                                                 \
+               *__data++ = __mem_read##bwl(addr);                      \
 }
-#define __raw_readb readb
-#define __raw_readw readw
-#define __raw_readl readl
 
-#define writesb(p, d, l)       __raw_writesb((unsigned int)p, d, l)
-#define writesw(p, d, l)       __raw_writesw((unsigned int)p, d, l)
-#define writesl(p, d, l)       __raw_writesl((unsigned int)p, d, l)
+#define __BUILD_IOPORT_STRING(bwl, type)                               \
+static inline void outs##bwl(unsigned long port, const void *data,     \
+                            unsigned int count)                        \
+{                                                                      \
+       const type *__data = data;                                      \
+                                                                       \
+       while (count--)                                                 \
+               __mem_out##bwl(*__data++, port);                        \
+}                                                                      \
+                                                                       \
+static inline void ins##bwl(unsigned long port, void *data,            \
+                          unsigned int count)                          \
+{                                                                      \
+       type *__data = data;                                            \
+                                                                       \
+       while (count--)                                                 \
+               *__data++ = __mem_in##bwl(port);                        \
+}
 
-#define readsb(p, d, l)                __raw_readsb((unsigned int)p, d, l)
-#define readsw(p, d, l)                __raw_readsw((unsigned int)p, d, l)
-#define readsl(p, d, l)                __raw_readsl((unsigned int)p, d, l)
+#define BUILDSTRING(bwl, type)                                         \
+       __BUILD_MEMORY_STRING(bwl, type)                                \
+       __BUILD_IOPORT_STRING(bwl, type)
 
+BUILDSTRING(b, u8)
+BUILDSTRING(w, u16)
+BUILDSTRING(l, u32)
 
 /*
  * io{read,write}{8,16,32} macros in both le (for PCI style consumers) and native be
  */
 #ifndef ioread8
 
-#define ioread8(p)     ({ unsigned int __v = __raw_readb(p); __v; })
+#define ioread8(p)             ((unsigned int)readb(p))
 
-#define ioread16(p)    ({ unsigned int __v = le16_to_cpu(__raw_readw(p)); __v; })
-#define ioread16be(p)  ({ unsigned int __v = be16_to_cpu(__raw_readw(p)); __v; })
+#define ioread16(p)            ((unsigned int)readw(p))
+#define ioread16be(p)          ((unsigned int)__raw_readw(p))
 
-#define ioread32(p)    ({ unsigned int __v = le32_to_cpu(__raw_readl(p)); __v; })
-#define ioread32be(p)  ({ unsigned int __v = be32_to_cpu(__raw_readl(p)); __v; })
+#define ioread32(p)            ((unsigned int)readl(p))
+#define ioread32be(p)          ((unsigned int)__raw_readl(p))
 
-#define iowrite8(v,p)  __raw_writeb(v, p)
+#define iowrite8(v,p)          writeb(v, p)
 
-#define iowrite16(v,p) __raw_writew(cpu_to_le16(v), p)
-#define iowrite16be(v,p)       __raw_writew(cpu_to_be16(v), p)
+#define iowrite16(v,p)         writew(v, p)
+#define iowrite16be(v,p)       __raw_writew(v, p)
 
-#define iowrite32(v,p) __raw_writel(cpu_to_le32(v), p)
-#define iowrite32be(v,p)       __raw_writel(cpu_to_be32(v), p)
+#define iowrite32(v,p)         writel(v, p)
+#define iowrite32be(v,p)       __raw_writel(v, p)
 
-#define ioread8_rep(p,d,c)     __raw_readsb(p,d,c)
-#define ioread16_rep(p,d,c)    __raw_readsw(p,d,c)
-#define ioread32_rep(p,d,c)    __raw_readsl(p,d,c)
+#define ioread8_rep(p,d,c)     readsb(p,d,c)
+#define ioread16_rep(p,d,c)    readsw(p,d,c)
+#define ioread32_rep(p,d,c)    readsl(p,d,c)
 
-#define iowrite8_rep(p,s,c)    __raw_writesb(p,s,c)
-#define iowrite16_rep(p,s,c)   __raw_writesw(p,s,c)
-#define iowrite32_rep(p,s,c)   __raw_writesl(p,s,c)
+#define iowrite8_rep(p,s,c)    writesb(p,s,c)
+#define iowrite16_rep(p,s,c)   writesw(p,s,c)
+#define iowrite32_rep(p,s,c)   writesl(p,s,c)
 
 #endif
 
-
-/*
- * These two are only here because ALSA _thinks_ it needs them...
- */
 static inline void memcpy_fromio(void * to, const volatile void __iomem *from,
                                 unsigned long count)
 {
-       char *p = to;
-       while (count) {
-               count--;
-               *p = readb(from);
-               p++;
-               from++;
-       }
+       memcpy(to, (const void __force *)from, count);
 }
 
 static inline void  memcpy_toio(volatile void __iomem *to, const void * from,
                                unsigned long count)
 {
-       const char *p = from;
-       while (count) {
-               count--;
-               writeb(*p, to);
-               p++;
-               to++;
-       }
+       memcpy((void __force *)to, from, count);
 }
 
 static inline void memset_io(volatile void __iomem *addr, unsigned char val,
@@ -142,99 +255,8 @@ static inline void memset_io(volatile void __iomem *addr, unsigned char val,
        memset((void __force *)addr, val, count);
 }
 
-/*
- * Bad read/write accesses...
- */
-extern void __readwrite_bug(const char *fn);
-
 #define IO_SPACE_LIMIT 0xffffffff
 
-/* Convert I/O port address to virtual address */
-#define __io(p)                ((void __iomem *)phys_to_uncached(p))
-
-/*
- *  IO port access primitives
- *  -------------------------
- *
- * The AVR32 doesn't have special IO access instructions; all IO is memory
- * mapped. Note that these are defined to perform little endian accesses
- * only. Their primary purpose is to access PCI and ISA peripherals.
- *
- * Note that for a big endian machine, this implies that the following
- * big endian mode connectivity is in place.
- *
- * The machine specific io.h include defines __io to translate an "IO"
- * address to a memory address.
- *
- * Note that we prevent GCC re-ordering or caching values in expressions
- * by introducing sequence points into the in*() definitions.  Note that
- * __raw_* do not guarantee this behaviour.
- *
- * The {in,out}[bwl] macros are for emulating x86-style PCI/ISA IO space.
- */
-#define outb(v, p)             __raw_writeb(v, __io(p))
-#define outw(v, p)             __raw_writew(cpu_to_le16(v), __io(p))
-#define outl(v, p)             __raw_writel(cpu_to_le32(v), __io(p))
-
-#define inb(p)                 __raw_readb(__io(p))
-#define inw(p)                 le16_to_cpu(__raw_readw(__io(p)))
-#define inl(p)                 le32_to_cpu(__raw_readl(__io(p)))
-
-static inline void __outsb(unsigned long port, void *addr, unsigned int count)
-{
-       while (count--) {
-               outb(*(u8 *)addr, port);
-               addr++;
-       }
-}
-
-static inline void __insb(unsigned long port, void *addr, unsigned int count)
-{
-       while (count--) {
-               *(u8 *)addr = inb(port);
-               addr++;
-       }
-}
-
-static inline void __outsw(unsigned long port, void *addr, unsigned int count)
-{
-       while (count--) {
-               outw(*(u16 *)addr, port);
-               addr += 2;
-       }
-}
-
-static inline void __insw(unsigned long port, void *addr, unsigned int count)
-{
-       while (count--) {
-               *(u16 *)addr = inw(port);
-               addr += 2;
-       }
-}
-
-static inline void __outsl(unsigned long port, void *addr, unsigned int count)
-{
-       while (count--) {
-               outl(*(u32 *)addr, port);
-               addr += 4;
-       }
-}
-
-static inline void __insl(unsigned long port, void *addr, unsigned int count)
-{
-       while (count--) {
-               *(u32 *)addr = inl(port);
-               addr += 4;
-       }
-}
-
-#define outsb(port, addr, count)       __outsb(port, addr, count)
-#define insb(port, addr, count)                __insb(port, addr, count)
-#define outsw(port, addr, count)       __outsw(port, addr, count)
-#define insw(port, addr, count)                __insw(port, addr, count)
-#define outsl(port, addr, count)       __outsl(port, addr, count)
-#define insl(port, addr, count)                __insl(port, addr, count)
-
 extern void __iomem *__ioremap(unsigned long offset, size_t size,
                               unsigned long flags);
 extern void __iounmap(void __iomem *addr);
@@ -292,6 +314,4 @@ extern void __iounmap(void __iomem *addr);
  */
 #define xlate_dev_kmem_ptr(p)   p
 
-#endif /* __KERNEL__ */
-
 #endif /* __ASM_AVR32_IO_H */
index f691377..6a64833 100644 (file)
@@ -40,6 +40,14 @@ enum tlb_config {
        TLB_INVALID
 };
 
+#define AVR32_FEATURE_RMW      (1 << 0)
+#define AVR32_FEATURE_DSP      (1 << 1)
+#define AVR32_FEATURE_SIMD     (1 << 2)
+#define AVR32_FEATURE_OCD      (1 << 3)
+#define AVR32_FEATURE_PCTR     (1 << 4)
+#define AVR32_FEATURE_JAVA     (1 << 5)
+#define AVR32_FEATURE_FPU      (1 << 6)
+
 struct avr32_cpuinfo {
        struct clk *clk;
        unsigned long loops_per_jiffy;
@@ -48,6 +56,7 @@ struct avr32_cpuinfo {
        unsigned short arch_revision;
        unsigned short cpu_revision;
        enum tlb_config tlb_config;
+       unsigned long features;
 
        struct cache_info icache;
        struct cache_info dcache;
@@ -125,10 +134,10 @@ extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
 #define thread_saved_pc(tsk)    ((tsk)->thread.cpu_context.pc)
 
 struct pt_regs;
-void show_trace(struct task_struct *task, unsigned long *stack,
-               struct pt_regs *regs);
-
 extern unsigned long get_wchan(struct task_struct *p);
+extern void show_regs_log_lvl(struct pt_regs *regs, const char *log_lvl);
+extern void show_stack_log_lvl(struct task_struct *tsk, unsigned long sp,
+                              struct pt_regs *regs, const char *log_lvl);
 
 #define KSTK_EIP(tsk)  ((tsk)->thread.cpu_context.pc)
 #define KSTK_ESP(tsk)  ((tsk)->thread.cpu_context.ksp)
index 0a52242..1ff1a21 100644 (file)
@@ -124,19 +124,12 @@ struct tagtable {
 #define for_each_tag(t,base)                                           \
        for (t = base; t->hdr.size; t = tag_next(t))
 
-extern struct tag_mem_range *mem_phys;
-extern struct tag_mem_range *mem_reserved;
-extern struct tag_mem_range *mem_ramdisk;
-
 extern struct tag *bootloader_tags;
 
-extern void setup_bootmem(void);
-extern void setup_processor(void);
-extern void board_setup_fbmem(unsigned long fbmem_start,
-                             unsigned long fbmem_size);
+extern resource_size_t fbmem_start;
+extern resource_size_t fbmem_size;
 
-/* Chip-specific hook to enable the use of SDRAM */
-void chip_enable_sdram(void);
+void setup_processor(void);
 
 #endif /* !__ASSEMBLY__ */
 
index 543229d..a0d0507 100644 (file)
@@ -49,5 +49,7 @@
 
 #define SO_PEERSEC             31
 #define SO_PASSSEC             34
+#define SO_TIMESTAMPNS         35
+#define SCM_TIMESTAMPNS                SO_TIMESTAMPNS
 
 #endif /* __ASM_AVR32_SOCKET_H */
index 84f3d65..0802d74 100644 (file)
@@ -7,6 +7,7 @@
 #define FIOGETOWN      0x8903
 #define SIOCGPGRP      0x8904
 #define SIOCATMARK     0x8905
-#define SIOCGSTAMP     0x8906          /* Get stamp */
+#define SIOCGSTAMP     0x8906          /* Get stamp (timeval) */
+#define SIOCGSTAMPNS   0x8907          /* Get stamp (timespec) */
 
 #endif /* __ASM_AVR32_SOCKIOS_H */
index f91975f..c02bc83 100644 (file)
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  */
-#ifndef __ASM_AVR32_SYSREG_H__
-#define __ASM_AVR32_SYSREG_H__
+#ifndef __ASM_AVR32_SYSREG_H
+#define __ASM_AVR32_SYSREG_H
 
 /* sysreg register offsets */
-#define SYSREG_SR                               0x0000
-#define SYSREG_EVBA                             0x0004
-#define SYSREG_ACBA                             0x0008
-#define SYSREG_CPUCR                            0x000c
-#define SYSREG_ECR                              0x0010
-#define SYSREG_RSR_SUP                          0x0014
-#define SYSREG_RSR_INT0                         0x0018
-#define SYSREG_RSR_INT1                         0x001c
-#define SYSREG_RSR_INT2                         0x0020
-#define SYSREG_RSR_INT3                         0x0024
-#define SYSREG_RSR_EX                           0x0028
-#define SYSREG_RSR_NMI                          0x002c
-#define SYSREG_RSR_DBG                          0x0030
-#define SYSREG_RAR_SUP                          0x0034
-#define SYSREG_RAR_INT0                         0x0038
-#define SYSREG_RAR_INT1                         0x003c
-#define SYSREG_RAR_INT2                         0x0040
-#define SYSREG_RAR_INT3                         0x0044
-#define SYSREG_RAR_EX                           0x0048
-#define SYSREG_RAR_NMI                          0x004c
-#define SYSREG_RAR_DBG                          0x0050
-#define SYSREG_JECR                             0x0054
-#define SYSREG_JOSP                             0x0058
-#define SYSREG_JAVA_LV0                         0x005c
-#define SYSREG_JAVA_LV1                         0x0060
-#define SYSREG_JAVA_LV2                         0x0064
-#define SYSREG_JAVA_LV3                         0x0068
-#define SYSREG_JAVA_LV4                         0x006c
-#define SYSREG_JAVA_LV5                         0x0070
-#define SYSREG_JAVA_LV6                         0x0074
-#define SYSREG_JAVA_LV7                         0x0078
-#define SYSREG_JTBA                             0x007c
-#define SYSREG_JBCR                             0x0080
-#define SYSREG_CONFIG0                          0x0100
-#define SYSREG_CONFIG1                          0x0104
-#define SYSREG_COUNT                            0x0108
-#define SYSREG_COMPARE                          0x010c
-#define SYSREG_TLBEHI                           0x0110
-#define SYSREG_TLBELO                           0x0114
-#define SYSREG_PTBR                             0x0118
-#define SYSREG_TLBEAR                           0x011c
-#define SYSREG_MMUCR                            0x0120
-#define SYSREG_TLBARLO                          0x0124
-#define SYSREG_TLBARHI                          0x0128
-#define SYSREG_PCCNT                            0x012c
-#define SYSREG_PCNT0                            0x0130
-#define SYSREG_PCNT1                            0x0134
-#define SYSREG_PCCR                             0x0138
-#define SYSREG_BEAR                             0x013c
+#define SYSREG_SR                              0x0000
+#define SYSREG_EVBA                            0x0004
+#define SYSREG_ACBA                            0x0008
+#define SYSREG_CPUCR                           0x000c
+#define SYSREG_ECR                             0x0010
+#define SYSREG_RSR_SUP                         0x0014
+#define SYSREG_RSR_INT0                                0x0018
+#define SYSREG_RSR_INT1                                0x001c
+#define SYSREG_RSR_INT2                                0x0020
+#define SYSREG_RSR_INT3                                0x0024
+#define SYSREG_RSR_EX                          0x0028
+#define SYSREG_RSR_NMI                         0x002c
+#define SYSREG_RSR_DBG                         0x0030
+#define SYSREG_RAR_SUP                         0x0034
+#define SYSREG_RAR_INT0                                0x0038
+#define SYSREG_RAR_INT1                                0x003c
+#define SYSREG_RAR_INT2                                0x0040
+#define SYSREG_RAR_INT3                                0x0044
+#define SYSREG_RAR_EX                          0x0048
+#define SYSREG_RAR_NMI                         0x004c
+#define SYSREG_RAR_DBG                         0x0050
+#define SYSREG_JECR                            0x0054
+#define SYSREG_JOSP                            0x0058
+#define SYSREG_JAVA_LV0                                0x005c
+#define SYSREG_JAVA_LV1                                0x0060
+#define SYSREG_JAVA_LV2                                0x0064
+#define SYSREG_JAVA_LV3                                0x0068
+#define SYSREG_JAVA_LV4                                0x006c
+#define SYSREG_JAVA_LV5                                0x0070
+#define SYSREG_JAVA_LV6                                0x0074
+#define SYSREG_JAVA_LV7                                0x0078
+#define SYSREG_JTBA                            0x007c
+#define SYSREG_JBCR                            0x0080
+#define SYSREG_CONFIG0                         0x0100
+#define SYSREG_CONFIG1                         0x0104
+#define SYSREG_COUNT                           0x0108
+#define SYSREG_COMPARE                         0x010c
+#define SYSREG_TLBEHI                          0x0110
+#define SYSREG_TLBELO                          0x0114
+#define SYSREG_PTBR                            0x0118
+#define SYSREG_TLBEAR                          0x011c
+#define SYSREG_MMUCR                           0x0120
+#define SYSREG_TLBARLO                         0x0124
+#define SYSREG_TLBARHI                         0x0128
+#define SYSREG_PCCNT                           0x012c
+#define SYSREG_PCNT0                           0x0130
+#define SYSREG_PCNT1                           0x0134
+#define SYSREG_PCCR                            0x0138
+#define SYSREG_BEAR                            0x013c
+#define SYSREG_SABAL                           0x0300
+#define SYSREG_SABAH                           0x0304
+#define SYSREG_SABD                            0x0308
 
 /* Bitfields in SR */
-#define SYSREG_SR_C_OFFSET                      0
-#define SYSREG_SR_C_SIZE                        1
-#define SYSREG_Z_OFFSET                         1
-#define SYSREG_Z_SIZE                           1
-#define SYSREG_SR_N_OFFSET                      2
-#define SYSREG_SR_N_SIZE                        1
-#define SYSREG_SR_V_OFFSET                      3
-#define SYSREG_SR_V_SIZE                        1
-#define SYSREG_Q_OFFSET                         4
-#define SYSREG_Q_SIZE                           1
-#define SYSREG_GM_OFFSET                        16
-#define SYSREG_GM_SIZE                          1
-#define SYSREG_I0M_OFFSET                       17
-#define SYSREG_I0M_SIZE                         1
-#define SYSREG_I1M_OFFSET                       18
-#define SYSREG_I1M_SIZE                         1
-#define SYSREG_I2M_OFFSET                       19
-#define SYSREG_I2M_SIZE                         1
-#define SYSREG_I3M_OFFSET                       20
-#define SYSREG_I3M_SIZE                         1
-#define SYSREG_EM_OFFSET                        21
-#define SYSREG_EM_SIZE                          1
-#define SYSREG_M0_OFFSET                        22
-#define SYSREG_M0_SIZE                          1
-#define SYSREG_M1_OFFSET                        23
-#define SYSREG_M1_SIZE                          1
-#define SYSREG_M2_OFFSET                        24
-#define SYSREG_M2_SIZE                          1
-#define SYSREG_SR_D_OFFSET                      26
-#define SYSREG_SR_D_SIZE                        1
-#define SYSREG_DM_OFFSET                        27
-#define SYSREG_DM_SIZE                          1
-#define SYSREG_SR_J_OFFSET                      28
-#define SYSREG_SR_J_SIZE                        1
-#define SYSREG_R_OFFSET                         29
-#define SYSREG_R_SIZE                           1
-#define SYSREG_H_OFFSET                         30
-#define SYSREG_H_SIZE                           1
-
-/* Bitfields in EVBA */
-
-/* Bitfields in ACBA */
+#define SYSREG_SR_C_OFFSET                     0
+#define SYSREG_SR_C_SIZE                       1
+#define SYSREG_Z_OFFSET                                1
+#define SYSREG_Z_SIZE                          1
+#define SYSREG_SR_N_OFFSET                     2
+#define SYSREG_SR_N_SIZE                       1
+#define SYSREG_SR_V_OFFSET                     3
+#define SYSREG_SR_V_SIZE                       1
+#define SYSREG_Q_OFFSET                                4
+#define SYSREG_Q_SIZE                          1
+#define SYSREG_L_OFFSET                                5
+#define SYSREG_L_SIZE                          1
+#define SYSREG_T_OFFSET                                14
+#define SYSREG_T_SIZE                          1
+#define SYSREG_SR_R_OFFSET                     15
+#define SYSREG_SR_R_SIZE                       1
+#define SYSREG_GM_OFFSET                       16
+#define SYSREG_GM_SIZE                         1
+#define SYSREG_I0M_OFFSET                      17
+#define SYSREG_I0M_SIZE                                1
+#define SYSREG_I1M_OFFSET                      18
+#define SYSREG_I1M_SIZE                                1
+#define SYSREG_I2M_OFFSET                      19
+#define SYSREG_I2M_SIZE                                1
+#define SYSREG_I3M_OFFSET                      20
+#define SYSREG_I3M_SIZE                                1
+#define SYSREG_EM_OFFSET                       21
+#define SYSREG_EM_SIZE                         1
+#define SYSREG_M0_OFFSET                       22
+#define SYSREG_M0_SIZE                         1
+#define SYSREG_M1_OFFSET                       23
+#define SYSREG_M1_SIZE                         1
+#define SYSREG_M2_OFFSET                       24
+#define SYSREG_M2_SIZE                         1
+#define SYSREG_SR_D_OFFSET                     26
+#define SYSREG_SR_D_SIZE                       1
+#define SYSREG_DM_OFFSET                       27
+#define SYSREG_DM_SIZE                         1
+#define SYSREG_SR_J_OFFSET                     28
+#define SYSREG_SR_J_SIZE                       1
+#define SYSREG_H_OFFSET                                29
+#define SYSREG_H_SIZE                          1
 
 /* Bitfields in CPUCR */
-#define SYSREG_BI_OFFSET                        0
-#define SYSREG_BI_SIZE                          1
-#define SYSREG_BE_OFFSET                        1
-#define SYSREG_BE_SIZE                          1
-#define SYSREG_FE_OFFSET                        2
-#define SYSREG_FE_SIZE                          1
-#define SYSREG_RE_OFFSET                        3
-#define SYSREG_RE_SIZE                          1
-#define SYSREG_IBE_OFFSET                       4
-#define SYSREG_IBE_SIZE                         1
-#define SYSREG_IEE_OFFSET                       5
-#define SYSREG_IEE_SIZE                         1
-
-/* Bitfields in ECR */
-#define SYSREG_ECR_OFFSET                       0
-#define SYSREG_ECR_SIZE                         32
-
-/* Bitfields in RSR_SUP */
-
-/* Bitfields in RSR_INT0 */
-
-/* Bitfields in RSR_INT1 */
-
-/* Bitfields in RSR_INT2 */
-
-/* Bitfields in RSR_INT3 */
-
-/* Bitfields in RSR_EX */
-
-/* Bitfields in RSR_NMI */
-
-/* Bitfields in RSR_DBG */
-
-/* Bitfields in RAR_SUP */
-
-/* Bitfields in RAR_INT0 */
-
-/* Bitfields in RAR_INT1 */
-
-/* Bitfields in RAR_INT2 */
-
-/* Bitfields in RAR_INT3 */
-
-/* Bitfields in RAR_EX */
-
-/* Bitfields in RAR_NMI */
-
-/* Bitfields in RAR_DBG */
-
-/* Bitfields in JECR */
-
-/* Bitfields in JOSP */
-
-/* Bitfields in JAVA_LV0 */
-
-/* Bitfields in JAVA_LV1 */
-
-/* Bitfields in JAVA_LV2 */
-
-/* Bitfields in JAVA_LV3 */
-
-/* Bitfields in JAVA_LV4 */
-
-/* Bitfields in JAVA_LV5 */
-
-/* Bitfields in JAVA_LV6 */
-
-/* Bitfields in JAVA_LV7 */
-
-/* Bitfields in JTBA */
-
-/* Bitfields in JBCR */
+#define SYSREG_BI_OFFSET                       0
+#define SYSREG_BI_SIZE                         1
+#define SYSREG_BE_OFFSET                       1
+#define SYSREG_BE_SIZE                         1
+#define SYSREG_FE_OFFSET                       2
+#define SYSREG_FE_SIZE                         1
+#define SYSREG_RE_OFFSET                       3
+#define SYSREG_RE_SIZE                         1
+#define SYSREG_IBE_OFFSET                      4
+#define SYSREG_IBE_SIZE                                1
+#define SYSREG_IEE_OFFSET                      5
+#define SYSREG_IEE_SIZE                                1
 
 /* Bitfields in CONFIG0 */
-#define SYSREG_CONFIG0_D_OFFSET                 1
-#define SYSREG_CONFIG0_D_SIZE                   1
-#define SYSREG_CONFIG0_S_OFFSET                 2
-#define SYSREG_CONFIG0_S_SIZE                   1
-#define SYSREG_O_OFFSET                         3
-#define SYSREG_O_SIZE                           1
-#define SYSREG_P_OFFSET                         4
-#define SYSREG_P_SIZE                           1
-#define SYSREG_CONFIG0_J_OFFSET                 5
-#define SYSREG_CONFIG0_J_SIZE                   1
-#define SYSREG_F_OFFSET                         6
-#define SYSREG_F_SIZE                           1
-#define SYSREG_MMUT_OFFSET                      7
-#define SYSREG_MMUT_SIZE                        3
-#define SYSREG_AR_OFFSET                        10
-#define SYSREG_AR_SIZE                          3
-#define SYSREG_AT_OFFSET                        13
-#define SYSREG_AT_SIZE                          3
-#define SYSREG_PROCESSORREVISION_OFFSET         16
-#define SYSREG_PROCESSORREVISION_SIZE           8
-#define SYSREG_PROCESSORID_OFFSET               24
-#define SYSREG_PROCESSORID_SIZE                 8
+#define SYSREG_CONFIG0_R_OFFSET                        0
+#define SYSREG_CONFIG0_R_SIZE                  1
+#define SYSREG_CONFIG0_D_OFFSET                        1
+#define SYSREG_CONFIG0_D_SIZE                  1
+#define SYSREG_CONFIG0_S_OFFSET                        2
+#define SYSREG_CONFIG0_S_SIZE                  1
+#define SYSREG_CONFIG0_O_OFFSET                        3
+#define SYSREG_CONFIG0_O_SIZE                  1
+#define SYSREG_CONFIG0_P_OFFSET                        4
+#define SYSREG_CONFIG0_P_SIZE                  1
+#define SYSREG_CONFIG0_J_OFFSET                        5
+#define SYSREG_CONFIG0_J_SIZE                  1
+#define SYSREG_CONFIG0_F_OFFSET                        6
+#define SYSREG_CONFIG0_F_SIZE                  1
+#define SYSREG_MMUT_OFFSET                     7
+#define SYSREG_MMUT_SIZE                       3
+#define SYSREG_AR_OFFSET                       10
+#define SYSREG_AR_SIZE                         3
+#define SYSREG_AT_OFFSET                       13
+#define SYSREG_AT_SIZE                         3
+#define SYSREG_PROCESSORREVISION_OFFSET                16
+#define SYSREG_PROCESSORREVISION_SIZE          8
+#define SYSREG_PROCESSORID_OFFSET              24
+#define SYSREG_PROCESSORID_SIZE                        8
 
 /* Bitfields in CONFIG1 */
-#define SYSREG_DASS_OFFSET                      0
-#define SYSREG_DASS_SIZE                        3
-#define SYSREG_DLSZ_OFFSET                      3
-#define SYSREG_DLSZ_SIZE                        3
-#define SYSREG_DSET_OFFSET                      6
-#define SYSREG_DSET_SIZE                        4
-#define SYSREG_IASS_OFFSET                      10
-#define SYSREG_IASS_SIZE                        2
-#define SYSREG_ILSZ_OFFSET                      13
-#define SYSREG_ILSZ_SIZE                        3
-#define SYSREG_ISET_OFFSET                      16
-#define SYSREG_ISET_SIZE                        4
-#define SYSREG_DMMUSZ_OFFSET                    20
-#define SYSREG_DMMUSZ_SIZE                      6
-#define SYSREG_IMMUSZ_OFFSET                    26
-#define SYSREG_IMMUSZ_SIZE                      6
-
-/* Bitfields in COUNT */
-
-/* Bitfields in COMPARE */
+#define SYSREG_DASS_OFFSET                     0
+#define SYSREG_DASS_SIZE                       3
+#define SYSREG_DLSZ_OFFSET                     3
+#define SYSREG_DLSZ_SIZE                       3
+#define SYSREG_DSET_OFFSET                     6
+#define SYSREG_DSET_SIZE                       4
+#define SYSREG_IASS_OFFSET                     10
+#define SYSREG_IASS_SIZE                       3
+#define SYSREG_ILSZ_OFFSET                     13
+#define SYSREG_ILSZ_SIZE                       3
+#define SYSREG_ISET_OFFSET                     16
+#define SYSREG_ISET_SIZE                       4
+#define SYSREG_DMMUSZ_OFFSET                   20
+#define SYSREG_DMMUSZ_SIZE                     6
+#define SYSREG_IMMUSZ_OFFSET                   26
+#define SYSREG_IMMUSZ_SIZE                     6
 
 /* Bitfields in TLBEHI */
-#define SYSREG_ASID_OFFSET                      0
-#define SYSREG_ASID_SIZE                        8
-#define SYSREG_TLBEHI_I_OFFSET                  8
-#define SYSREG_TLBEHI_I_SIZE                    1
-#define SYSREG_TLBEHI_V_OFFSET                  9
-#define SYSREG_TLBEHI_V_SIZE                    1
-#define SYSREG_VPN_OFFSET                       10
-#define SYSREG_VPN_SIZE                         22
+#define SYSREG_ASID_OFFSET                     0
+#define SYSREG_ASID_SIZE                       8
+#define SYSREG_TLBEHI_I_OFFSET                 8
+#define SYSREG_TLBEHI_I_SIZE                   1
+#define SYSREG_TLBEHI_V_OFFSET                 9
+#define SYSREG_TLBEHI_V_SIZE                   1
+#define SYSREG_VPN_OFFSET                      10
+#define SYSREG_VPN_SIZE                                22
 
 /* Bitfields in TLBELO */
-#define SYSREG_W_OFFSET                         0
-#define SYSREG_W_SIZE                           1
-#define SYSREG_TLBELO_D_OFFSET                  1
-#define SYSREG_TLBELO_D_SIZE                    1
-#define SYSREG_SZ_OFFSET                        2
-#define SYSREG_SZ_SIZE                          2
-#define SYSREG_AP_OFFSET                        4
-#define SYSREG_AP_SIZE                          3
-#define SYSREG_B_OFFSET                         7
-#define SYSREG_B_SIZE                           1
-#define SYSREG_G_OFFSET                         8
-#define SYSREG_G_SIZE                           1
-#define SYSREG_TLBELO_C_OFFSET                  9
-#define SYSREG_TLBELO_C_SIZE                    1
-#define SYSREG_PFN_OFFSET                       10
-#define SYSREG_PFN_SIZE                         22
-
-/* Bitfields in PTBR */
-
-/* Bitfields in TLBEAR */
+#define SYSREG_W_OFFSET                                0
+#define SYSREG_W_SIZE                          1
+#define SYSREG_TLBELO_D_OFFSET                 1
+#define SYSREG_TLBELO_D_SIZE                   1
+#define SYSREG_SZ_OFFSET                       2
+#define SYSREG_SZ_SIZE                         2
+#define SYSREG_AP_OFFSET                       4
+#define SYSREG_AP_SIZE                         3
+#define SYSREG_B_OFFSET                                7
+#define SYSREG_B_SIZE                          1
+#define SYSREG_G_OFFSET                                8
+#define SYSREG_G_SIZE                          1
+#define SYSREG_TLBELO_C_OFFSET                 9
+#define SYSREG_TLBELO_C_SIZE                   1
+#define SYSREG_PFN_OFFSET                      10
+#define SYSREG_PFN_SIZE                                22
 
 /* Bitfields in MMUCR */
-#define SYSREG_E_OFFSET                         0
-#define SYSREG_E_SIZE                           1
-#define SYSREG_M_OFFSET                         1
-#define SYSREG_M_SIZE                           1
-#define SYSREG_MMUCR_I_OFFSET                   2
-#define SYSREG_MMUCR_I_SIZE                     1
-#define SYSREG_MMUCR_N_OFFSET                   3
-#define SYSREG_MMUCR_N_SIZE                     1
-#define SYSREG_MMUCR_S_OFFSET                   4
-#define SYSREG_MMUCR_S_SIZE                     1
-#define SYSREG_DLA_OFFSET                       8
-#define SYSREG_DLA_SIZE                         6
-#define SYSREG_DRP_OFFSET                       14
-#define SYSREG_DRP_SIZE                         6
-#define SYSREG_ILA_OFFSET                       20
-#define SYSREG_ILA_SIZE                         6
-#define SYSREG_IRP_OFFSET                       26
-#define SYSREG_IRP_SIZE                         6
-
-/* Bitfields in TLBARLO */
-
-/* Bitfields in TLBARHI */
-
-/* Bitfields in PCCNT */
-
-/* Bitfields in PCNT0 */
-
-/* Bitfields in PCNT1 */
+#define SYSREG_E_OFFSET                                0
+#define SYSREG_E_SIZE                          1
+#define SYSREG_M_OFFSET                                1
+#define SYSREG_M_SIZE                          1
+#define SYSREG_MMUCR_I_OFFSET                  2
+#define SYSREG_MMUCR_I_SIZE                    1
+#define SYSREG_MMUCR_N_OFFSET                  3
+#define SYSREG_MMUCR_N_SIZE                    1
+#define SYSREG_MMUCR_S_OFFSET                  4
+#define SYSREG_MMUCR_S_SIZE                    1
+#define SYSREG_DLA_OFFSET                      8
+#define SYSREG_DLA_SIZE                                6
+#define SYSREG_DRP_OFFSET                      14
+#define SYSREG_DRP_SIZE                                6
+#define SYSREG_ILA_OFFSET                      20
+#define SYSREG_ILA_SIZE                                6
+#define SYSREG_IRP_OFFSET                      26
+#define SYSREG_IRP_SIZE                                6
 
 /* Bitfields in PCCR */
-
-/* Bitfields in BEAR */
+#define SYSREG_PCCR_R_OFFSET                   1
+#define SYSREG_PCCR_R_SIZE                     1
+#define SYSREG_PCCR_C_OFFSET                   2
+#define SYSREG_PCCR_C_SIZE                     1
+#define SYSREG_PCCR_S_OFFSET                   3
+#define SYSREG_PCCR_S_SIZE                     1
+#define SYSREG_IEC_OFFSET                      4
+#define SYSREG_IEC_SIZE                                1
+#define SYSREG_IE0_OFFSET                      5
+#define SYSREG_IE0_SIZE                                1
+#define SYSREG_IE1_OFFSET                      6
+#define SYSREG_IE1_SIZE                                1
+#define SYSREG_FC_OFFSET                       8
+#define SYSREG_FC_SIZE                         1
+#define SYSREG_F0_OFFSET                       9
+#define SYSREG_F0_SIZE                         1
+#define SYSREG_F1_OFFSET                       10
+#define SYSREG_F1_SIZE                         1
+#define SYSREG_CONF0_OFFSET                    12
+#define SYSREG_CONF0_SIZE                      6
+#define SYSREG_CONF1_OFFSET                    18
+#define SYSREG_CONF1_SIZE                      6
 
 /* Constants for ECR */
-#define ECR_UNRECOVERABLE                       0
-#define ECR_TLB_MULTIPLE                        1
-#define ECR_BUS_ERROR_WRITE                     2
-#define ECR_BUS_ERROR_READ                      3
-#define ECR_NMI                                 4
-#define ECR_ADDR_ALIGN_X                        5
-#define ECR_PROTECTION_X                        6
-#define ECR_DEBUG                               7
-#define ECR_ILLEGAL_OPCODE                      8
-#define ECR_UNIMPL_INSTRUCTION                  9
-#define ECR_PRIVILEGE_VIOLATION                 10
-#define ECR_FPE                                 11
-#define ECR_COPROC_ABSENT                       12
-#define ECR_ADDR_ALIGN_R                        13
-#define ECR_ADDR_ALIGN_W                        14
-#define ECR_PROTECTION_R                        15
-#define ECR_PROTECTION_W                        16
-#define ECR_DTLB_MODIFIED                       17
-#define ECR_TLB_MISS_X                          20
-#define ECR_TLB_MISS_R                          24
-#define ECR_TLB_MISS_W                          28
+#define ECR_UNRECOVERABLE                      0
+#define ECR_TLB_MULTIPLE                       1
+#define ECR_BUS_ERROR_WRITE                    2
+#define ECR_BUS_ERROR_READ                     3
+#define ECR_NMI                                        4
+#define ECR_ADDR_ALIGN_X                       5
+#define ECR_PROTECTION_X                       6
+#define ECR_DEBUG                              7
+#define ECR_ILLEGAL_OPCODE                     8
+#define ECR_UNIMPL_INSTRUCTION                 9
+#define ECR_PRIVILEGE_VIOLATION                        10
+#define ECR_FPE                                        11
+#define ECR_COPROC_ABSENT                      12
+#define ECR_ADDR_ALIGN_R                       13
+#define ECR_ADDR_ALIGN_W                       14
+#define ECR_PROTECTION_R                       15
+#define ECR_PROTECTION_W                       16
+#define ECR_DTLB_MODIFIED                      17
+#define ECR_TLB_MISS_X                         20
+#define ECR_TLB_MISS_R                         24
+#define ECR_TLB_MISS_W                         28
 
 /* Bit manipulation macros */
-#define SYSREG_BIT(name)                        (1 << SYSREG_##name##_OFFSET)
-#define SYSREG_BF(name,value)                   (((value) & ((1 << SYSREG_##name##_SIZE) - 1)) << SYSREG_##name##_OFFSET)
-#define SYSREG_BFEXT(name,value)                (((value) >> SYSREG_##name##_OFFSET) & ((1 << SYSREG_##name##_SIZE) - 1))
-#define SYSREG_BFINS(name,value,old)            (((old) & ~(((1 << SYSREG_##name##_SIZE) - 1) << SYSREG_##name##_OFFSET)) | SYSREG_BF(name,value))
+#define SYSREG_BIT(name)                               \
+       (1 << SYSREG_##name##_OFFSET)
+#define SYSREG_BF(name,value)                          \
+       (((value) & ((1 << SYSREG_##name##_SIZE) - 1))  \
+        << SYSREG_##name##_OFFSET)
+#define SYSREG_BFEXT(name,value)\
+       (((value) >> SYSREG_##name##_OFFSET)            \
+        & ((1 << SYSREG_##name##_SIZE) - 1))
+#define SYSREG_BFINS(name,value,old)                   \
+       (((old) & ~(((1 << SYSREG_##name##_SIZE) - 1)   \
+                   << SYSREG_##name##_OFFSET))         \
+        | SYSREG_BF(name,value))
 
+/* Register access macros */
 #ifdef __CHECKER__
 extern unsigned long __builtin_mfsr(unsigned long reg);
 extern void __builtin_mtsr(unsigned long reg, unsigned long value);
 #endif
 
-/* Register access macros */
-#define sysreg_read(reg)                        __builtin_mfsr(SYSREG_##reg)
-#define sysreg_write(reg, value)                __builtin_mtsr(SYSREG_##reg, value)
+#define sysreg_read(reg)               __builtin_mfsr(SYSREG_##reg)
+#define sysreg_write(reg, value)       __builtin_mtsr(SYSREG_##reg, value)
 
-#endif /* __ASM_AVR32_SYSREG_H__ */
+#endif /* __ASM_AVR32_SYSREG_H */
index ac59605..a8236ba 100644 (file)
@@ -9,6 +9,7 @@
 #define __ASM_AVR32_SYSTEM_H
 
 #include <linux/compiler.h>
+#include <linux/linkage.h>
 #include <linux/types.h>
 
 #include <asm/ptrace.h>
@@ -140,15 +141,9 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
                                   sizeof(*(ptr))))
 
 struct pt_regs;
-extern void __die(const char *, struct pt_regs *, unsigned long,
-                 const char *, const char *, unsigned long);
-extern void __die_if_kernel(const char *, struct pt_regs *, unsigned long,
-                           const char *, const char *, unsigned long);
-
-#define die(msg, regs, err)                                    \
-       __die(msg, regs, err, __FILE__ ":", __FUNCTION__, __LINE__)
-#define die_if_kernel(msg, regs, err)                                  \
-       __die_if_kernel(msg, regs, err, __FILE__ ":", __FUNCTION__, __LINE__)
+void NORET_TYPE die(const char *str, struct pt_regs *regs, long err);
+void _exception(long signr, struct pt_regs *regs, int code,
+               unsigned long addr);
 
 #define arch_align_stack(x)    (x)
 
index d1f5b35..a2e606d 100644 (file)
@@ -83,6 +83,7 @@ static inline struct thread_info *current_thread_info(void)
 #define TIF_SINGLE_STEP                6       /* single step after next break */
 #define TIF_MEMDIE             7
 #define TIF_RESTORE_SIGMASK    8       /* restore signal mask in do_signal */
+#define TIF_CPU_GOING_TO_SLEEP 9       /* CPU is entering sleep 0 mode */
 #define TIF_USERSPACE          31      /* true if FS sets userspace */
 
 #define _TIF_SYSCALL_TRACE     (1 << TIF_SYSCALL_TRACE)
@@ -94,6 +95,7 @@ static inline struct thread_info *current_thread_info(void)
 #define _TIF_SINGLE_STEP       (1 << TIF_SINGLE_STEP)
 #define _TIF_MEMDIE            (1 << TIF_MEMDIE)
 #define _TIF_RESTORE_SIGMASK   (1 << TIF_RESTORE_SIGMASK)
+#define _TIF_CPU_GOING_TO_SLEEP (1 << TIF_CPU_GOING_TO_SLEEP)
 
 /* XXX: These two masks must never span more than 16 bits! */
 /* work to do on interrupt/exception return */
index 74a679e..ed09239 100644 (file)
@@ -181,24 +181,23 @@ extern int __put_user_bad(void);
 
 #define __get_user_nocheck(x, ptr, size)                               \
 ({                                                                     \
-       typeof(*(ptr)) __gu_val = (typeof(*(ptr)) __force)0;            \
+       unsigned long __gu_val = 0;                                     \
        int __gu_err = 0;                                               \
                                                                        \
        switch (size) {                                                 \
        case 1: __get_user_asm("ub", __gu_val, ptr, __gu_err); break;   \
        case 2: __get_user_asm("uh", __gu_val, ptr, __gu_err); break;   \
        case 4: __get_user_asm("w", __gu_val, ptr, __gu_err); break;    \
-       case 8: __get_user_asm("d", __gu_val, ptr, __gu_err); break;    \
        default: __gu_err = __get_user_bad(); break;                    \
        }                                                               \
                                                                        \
-       x = __gu_val;                                                   \
+       x = (typeof(*(ptr)))__gu_val;                                   \
        __gu_err;                                                       \
 })
 
 #define __get_user_check(x, ptr, size)                                 \
 ({                                                                     \
-       typeof(*(ptr)) __gu_val = (typeof(*(ptr)) __force)0;            \
+       unsigned long __gu_val = 0;                                     \
        const typeof(*(ptr)) __user * __gu_addr = (ptr);                \
        int __gu_err = 0;                                               \
                                                                        \
@@ -216,10 +215,6 @@ extern int __put_user_bad(void);
                        __get_user_asm("w", __gu_val, __gu_addr,        \
                                       __gu_err);                       \
                        break;                                          \
-               case 8:                                                 \
-                       __get_user_asm("d", __gu_val, __gu_addr,        \
-                                      __gu_err);                       \
-                       break;                                          \
                default:                                                \
                        __gu_err = __get_user_bad();                    \
                        break;                                          \
@@ -227,7 +222,7 @@ extern int __put_user_bad(void);
        } else {                                                        \
                __gu_err = -EFAULT;                                     \
        }                                                               \
-       x = __gu_val;                                                   \
+       x = (typeof(*(ptr)))__gu_val;                                   \
        __gu_err;                                                       \
 })
 
index 01cfdf1..5b18dfd 100644 (file)
@@ -51,6 +51,8 @@
 
 #define SO_PEERSEC             31
 #define SO_PASSSEC             34
+#define SO_TIMESTAMPNS         35
+#define SCM_TIMESTAMPNS                SO_TIMESTAMPNS
 
 #endif /* _ASM_SOCKET_H */
 
index 6c4012f..cfe7bfe 100644 (file)
@@ -7,6 +7,7 @@
 #define FIOGETOWN      0x8903
 #define SIOCGPGRP      0x8904
 #define SIOCATMARK     0x8905
-#define SIOCGSTAMP     0x8906          /* Get stamp */
+#define SIOCGSTAMP     0x8906          /* Get stamp (timeval) */
+#define SIOCGSTAMPNS   0x8907          /* Get stamp (timespec) */
 
 #endif
index 31db18f..a823bef 100644 (file)
@@ -49,6 +49,8 @@
 
 #define SO_PEERSEC             31
 #define SO_PASSSEC             34
+#define SO_TIMESTAMPNS         35
+#define SCM_TIMESTAMPNS                SO_TIMESTAMPNS
 
 #endif /* _ASM_SOCKET_H */
 
index 8a6e4b2..5dbdd13 100644 (file)
@@ -7,7 +7,8 @@
 #define FIOGETOWN      0x8903
 #define SIOCGPGRP      0x8904
 #define SIOCATMARK     0x8905
-#define SIOCGSTAMP     0x8906          /* Get stamp */
+#define SIOCGSTAMP     0x8906          /* Get stamp (timeval) */
+#define SIOCGSTAMPNS   0x8907          /* Get stamp (timespec) */
 
 #endif /* _ASM_SOCKIOS__ */
 
index 8f4e319..a4a4937 100644 (file)
        __rem;                                                  \
  })
 
+static inline uint64_t div64_64(uint64_t dividend, uint64_t divisor)
+{
+       return dividend / divisor;
+}
+
 #elif BITS_PER_LONG == 32
 
 extern uint32_t __div64_32(uint64_t *dividend, uint32_t divisor);
@@ -49,6 +54,8 @@ extern uint32_t __div64_32(uint64_t *dividend, uint32_t divisor);
        __rem;                                          \
  })
 
+extern uint64_t div64_64(uint64_t dividend, uint64_t divisor);
+
 #else /* BITS_PER_LONG == ?? */
 
 # error do_div() does not yet support the C64
index 6d7e279..dc8f99e 100644 (file)
@@ -139,8 +139,15 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres
 #define pte_same(A,B)  (pte_val(A) == pte_val(B))
 #endif
 
-#ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY
-#define page_test_and_clear_dirty(page) (0)
+#ifndef __HAVE_ARCH_PAGE_TEST_DIRTY
+#define page_test_dirty(page)          (0)
+#endif
+
+#ifndef __HAVE_ARCH_PAGE_CLEAR_DIRTY
+#define page_clear_dirty(page)         do { } while (0)
+#endif
+
+#ifndef __HAVE_ARCH_PAGE_TEST_DIRTY
 #define pte_maybe_dirty(pte)           pte_dirty(pte)
 #else
 #define pte_maybe_dirty(pte)           (1)
index ebc830f..39911d8 100644 (file)
@@ -49,5 +49,7 @@
 
 #define SO_PEERSEC             31
 #define SO_PASSSEC             34
+#define SO_TIMESTAMPNS         35
+#define SCM_TIMESTAMPNS                SO_TIMESTAMPNS
 
 #endif /* _ASM_SOCKET_H */
index d005d95..e9c7ec8 100644 (file)
@@ -7,6 +7,7 @@
 #define FIOGETOWN      0x8903
 #define SIOCGPGRP      0x8904
 #define SIOCATMARK     0x8905
-#define SIOCGSTAMP     0x8906          /* Get stamp */
+#define SIOCGSTAMP     0x8906          /* Get stamp (timeval) */
+#define SIOCGSTAMPNS   0x8907          /* Get stamp (timespec) */
 
 #endif /* __ARCH_H8300_SOCKIOS__ */
index 75c67c7..438e980 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef __I386_DIV64
 #define __I386_DIV64
 
+#include <linux/types.h>
+
 /*
  * do_div() is NOT a C function. It wants to return
  * two values (the quotient and the remainder), but
@@ -45,4 +47,6 @@ div_ll_X_l_rem(long long divs, long div, long *rem)
        return dum2;
 
 }
+
+extern uint64_t div64_64(uint64_t dividend, uint64_t divisor);
 #endif
index 5755d57..99ca648 100644 (file)
@@ -49,5 +49,7 @@
 
 #define SO_PEERSEC             31
 #define SO_PASSSEC             34
+#define SO_TIMESTAMPNS         35
+#define SCM_TIMESTAMPNS                SO_TIMESTAMPNS
 
 #endif /* _ASM_SOCKET_H */
index 6b747f8..ff528c7 100644 (file)
@@ -7,6 +7,7 @@
 #define FIOGETOWN      0x8903
 #define SIOCGPGRP      0x8904
 #define SIOCATMARK     0x8905
-#define SIOCGSTAMP     0x8906          /* Get stamp */
+#define SIOCGSTAMP     0x8906          /* Get stamp (timeval) */
+#define SIOCGSTAMPNS   0x8907          /* Get stamp (timespec) */
 
 #endif
index d638ef3..9e42ce4 100644 (file)
@@ -58,5 +58,7 @@
 
 #define SO_PEERSEC             31
 #define SO_PASSSEC             34
+#define SO_TIMESTAMPNS         35
+#define SCM_TIMESTAMPNS                SO_TIMESTAMPNS
 
 #endif /* _ASM_IA64_SOCKET_H */
index cf94857..15c9246 100644 (file)
@@ -14,6 +14,7 @@
 #define FIOGETOWN      0x8903
 #define SIOCGPGRP      0x8904
 #define SIOCATMARK     0x8905
-#define SIOCGSTAMP     0x8906          /* Get stamp */
+#define SIOCGSTAMP     0x8906          /* Get stamp (timeval) */
+#define SIOCGSTAMPNS   0x8907          /* Get stamp (timespec) */
 
 #endif /* _ASM_IA64_SOCKIOS_H */
index acdf748..793d5d3 100644 (file)
@@ -49,5 +49,7 @@
 
 #define SO_PEERSEC             31
 #define SO_PASSSEC             34
+#define SO_TIMESTAMPNS         35
+#define SCM_TIMESTAMPNS                SO_TIMESTAMPNS
 
 #endif /* _ASM_M32R_SOCKET_H */
index f89962e..6c1fb9b 100644 (file)
@@ -7,6 +7,7 @@
 #define FIOGETOWN      0x8903
 #define SIOCGPGRP      0x8904
 #define SIOCATMARK     0x8905
-#define SIOCGSTAMP     0x8906          /* Get stamp */
+#define SIOCGSTAMP     0x8906          /* Get stamp (timeval) */
+#define SIOCGSTAMPNS   0x8907          /* Get stamp (timespec) */
 
 #endif  /* _ASM_M32R_SOCKIOS_H */
index 9f65de1..33caad1 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef _M68K_DIV64_H
 #define _M68K_DIV64_H
 
+#include <linux/types.h>
+
 /* n = n / base; return rem; */
 
 #define do_div(n, base) ({                                     \
@@ -23,4 +25,5 @@
        __rem;                                                  \
 })
 
+extern uint64_t div64_64(uint64_t dividend, uint64_t divisor);
 #endif /* _M68K_DIV64_H */
index a5966ec..6d21b90 100644 (file)
@@ -49,5 +49,7 @@
 
 #define SO_PEERSEC             31
 #define SO_PASSSEC             34
+#define SO_TIMESTAMPNS         35
+#define SCM_TIMESTAMPNS                SO_TIMESTAMPNS
 
 #endif /* _ASM_SOCKET_H */
index 9b9ed97..c04a239 100644 (file)
@@ -7,6 +7,7 @@
 #define FIOGETOWN      0x8903
 #define SIOCGPGRP      0x8904
 #define SIOCATMARK     0x8905
-#define SIOCGSTAMP     0x8906          /* Get stamp */
+#define SIOCGSTAMP     0x8906          /* Get stamp (timeval) */
+#define SIOCGSTAMPNS   0x8907          /* Get stamp (timespec) */
 
 #endif /* __ARCH_M68K_SOCKIOS__ */
index 4d560a5..7eb63de 100644 (file)
@@ -18,7 +18,8 @@ do {                                                                  \
 
 #define BUG_ON(condition)                                              \
 do {                                                                   \
-       __asm__ __volatile__("tne $0, %0" : : "r" (condition));         \
+       __asm__ __volatile__("tne $0, %0, %1"                           \
+                            : : "r" (condition), "i" (BRK_BUG));       \
 } while (0)
 
 #define HAVE_ARCH_BUG_ON
index 28d907d..4933b49 100644 (file)
@@ -96,6 +96,6 @@ extern void (*flush_data_cache_page)(unsigned long addr);
 unsigned long __init run_uncached(void *func);
 
 extern void *kmap_coherent(struct page *page, unsigned long addr);
-extern void kunmap_coherent(struct page *page);
+extern void kunmap_coherent(void);
 
 #endif /* _ASM_CACHEFLUSH_H */
index 20a81e1..290485a 100644 (file)
@@ -166,7 +166,7 @@ static inline __wsum csum_tcpudp_nofold(__be32 saddr,
 #else
          "r" (proto + len),
 #endif
-         "r" (sum));
+         "r" ((__force unsigned long)sum));
 
        return sum;
 }
index d107832..66189f5 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Copyright (C) 2000, 2004  Maciej W. Rozycki
- * Copyright (C) 2003 Ralf Baechle
+ * Copyright (C) 2003, 07 Ralf Baechle (ralf@linux-mips.org)
  *
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file "COPYING" in the main directory of this archive
@@ -9,6 +9,8 @@
 #ifndef _ASM_DIV64_H
 #define _ASM_DIV64_H
 
+#include <linux/types.h>
+
 #if (_MIPS_SZLONG == 32)
 
 #include <asm/compiler.h>
@@ -78,6 +80,8 @@
        __quot = __quot << 32 | __low; \
        (n) = __quot; \
        __mod; })
+
+extern uint64_t div64_64(uint64_t dividend, uint64_t divisor);
 #endif /* (_MIPS_SZLONG == 32) */
 
 #if (_MIPS_SZLONG == 64)
        (n) = __quot; \
        __mod; })
 
+static inline uint64_t div64_64(uint64_t dividend, uint64_t divisor)
+{
+       return dividend / divisor;
+}
+
 #endif /* (_MIPS_SZLONG == 64) */
 
 #endif /* _ASM_DIV64_H */
index 4e12d1f..b414a7d 100644 (file)
@@ -68,8 +68,6 @@ do {                                                                  \
        /* We don't care about the c0 hazard here  */                   \
 } while (0)
 
-#define __fpu_enabled()        (read_c0_status() & ST0_CU1)
-
 #define enable_fpu()                                                   \
 do {                                                                   \
        if (cpu_has_fpu)                                                \
@@ -102,14 +100,19 @@ static inline void __own_fpu(void)
        set_thread_flag(TIF_USEDFPU);
 }
 
-static inline void own_fpu(int restore)
+static inline void own_fpu_inatomic(int restore)
 {
-       preempt_disable();
        if (cpu_has_fpu && !__is_fpu_owner()) {
                __own_fpu();
                if (restore)
                        _restore_fp(current);
        }
+}
+
+static inline void own_fpu(int restore)
+{
+       preempt_disable();
+       own_fpu_inatomic(restore);
        preempt_enable();
 }
 
@@ -162,18 +165,4 @@ static inline fpureg_t *get_fpu_regs(struct task_struct *tsk)
        return tsk->thread.fpu.fpr;
 }
 
-static inline void enable_fp_in_kernel(void)
-{
-       set_thread_flag(TIF_ALLOW_FP_IN_KERNEL);
-       /* make sure CU1 and FPU ownership are consistent */
-       if (!__is_fpu_owner() && __fpu_enabled())
-               __disable_fpu();
-}
-
-static inline void disable_fp_in_kernel(void)
-{
-       BUG_ON(!__is_fpu_owner() && __fpu_enabled());
-       clear_thread_flag(TIF_ALLOW_FP_IN_KERNEL);
-}
-
 #endif /* _ASM_FPU_H */
diff --git a/include/asm-mips/jmr3927/irq.h b/include/asm-mips/jmr3927/irq.h
deleted file mode 100644 (file)
index e3e7ed3..0000000
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- *  linux/include/asm-mips/tx3927/irq.h
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2001 Toshiba Corporation
- */
-#ifndef __ASM_TX3927_IRQ_H
-#define __ASM_TX3927_IRQ_H
-
-#ifndef __ASSEMBLY__
-
-#include <asm/irq.h>
-
-struct tb_irq_space {
-       struct tb_irq_space* next;
-       int start_irqno;
-       int nr_irqs;
-       void (*mask_func)(int irq_nr, int space_id);
-       void (*unmask_func)(int irq_no, int space_id);
-       const char *name;
-       int space_id;
-       int can_share;
-};
-extern struct tb_irq_space* tb_irq_spaces;
-
-static __inline__ void add_tb_irq_space(struct tb_irq_space* sp)
-{
-       sp->next = tb_irq_spaces;
-       tb_irq_spaces = sp;
-}
-
-
-struct pt_regs;
-extern void
-toshibaboards_spurious(struct pt_regs *regs, int irq);
-extern void
-toshibaboards_irqdispatch(struct pt_regs *regs, int irq);
-
-extern struct irqaction *
-toshibaboards_get_irq_action(int irq);
-extern int
-toshibaboards_setup_irq(int irq, struct irqaction * new);
-
-
-extern int (*toshibaboards_gen_iack)(void);
-
-#endif /* !__ASSEMBLY__ */
-
-#define NR_ISA_IRQS 16
-#define TB_IRQ_IS_ISA(irq)     \
-       (0 <= (irq) && (irq) < NR_ISA_IRQS)
-#define TB_IRQ_TO_ISA_IRQ(irq) (irq)
-
-#endif /* __ASM_TX3927_IRQ_H */
index c50e68f..958e297 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Defines for the TJSYS JMR-TX3927/JMI-3927IO2/JMY-1394IF.
+ * Defines for the TJSYS JMR-TX3927
  *
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file "COPYING" in the main directory of this archive
 
 #include <asm/jmr3927/tx3927.h>
 #include <asm/addrspace.h>
-#include <asm/jmr3927/irq.h>
-#ifndef __ASSEMBLY__
 #include <asm/system.h>
-#endif
 
 /* CS */
 #define JMR3927_ROMCE0 0x1fc00000      /* 4M */
 #define JMR3927_SDRAM_SIZE     0x02000000      /* 32M */
 #define JMR3927_PORT_BASE      KSEG1
 
-/* select indirect initiator access per errata */
-#define JMR3927_INIT_INDIRECT_PCI
-#define PCI_ISTAT_IDICC           0x1000
-#define PCI_IPCIBE_IBE_LONG       0
-#define PCI_IPCIBE_ICMD_IOREAD    2
-#define PCI_IPCIBE_ICMD_IOWRITE   3
-#define PCI_IPCIBE_ICMD_MEMREAD   6
-#define PCI_IPCIBE_ICMD_MEMWRITE  7
-#define PCI_IPCIBE_ICMD_SHIFT     4
-
 /* Address map (virtual address) */
 #define JMR3927_ROM0_BASE      (KSEG1 + JMR3927_ROMCE0)
 #define JMR3927_ROM1_BASE      (KSEG1 + JMR3927_ROMCE1)
 #define JMR3927_IOC_BASE       (KSEG1 + JMR3927_ROMCE2)
-#define JMR3927_IOB_BASE       (KSEG1 + JMR3927_ROMCE3)
-#define JMR3927_ISAMEM_BASE    (JMR3927_IOB_BASE)
-#define JMR3927_ISAIO_BASE     (JMR3927_IOB_BASE + 0x01000000)
-#define JMR3927_ISAC_BASE      (JMR3927_IOB_BASE + 0x02000000)
-#define JMR3927_LCDVGA_REG_BASE        (JMR3927_IOB_BASE + 0x03000000)
-#define JMR3927_LCDVGA_MEM_BASE        (JMR3927_IOB_BASE + 0x03800000)
-#define JMR3927_JMY1394_BASE   (KSEG1 + JMR3927_ROMCE5)
-#define JMR3927_PREMIER3_BASE  (JMR3927_JMY1394_BASE + 0x00100000)
 #define JMR3927_PCIMEM_BASE    (KSEG1 + JMR3927_PCIMEM)
 #define JMR3927_PCIIO_BASE     (KSEG1 + JMR3927_PCIIO)
 
 #define JMR3927_IOC_INTP_ADDR  (JMR3927_IOC_BASE + 0x000b0000)
 #define JMR3927_IOC_RESET_ADDR (JMR3927_IOC_BASE + 0x000f0000)
 
-#define JMR3927_ISAC_REV_ADDR  (JMR3927_ISAC_BASE + 0x00000000)
-#define JMR3927_ISAC_EINTS_ADDR        (JMR3927_ISAC_BASE + 0x00200000)
-#define JMR3927_ISAC_EINTM_ADDR        (JMR3927_ISAC_BASE + 0x00300000)
-#define JMR3927_ISAC_NMI_ADDR  (JMR3927_ISAC_BASE + 0x00400000)
-#define JMR3927_ISAC_LED_ADDR  (JMR3927_ISAC_BASE + 0x00500000)
-#define JMR3927_ISAC_INTP_ADDR (JMR3927_ISAC_BASE + 0x00800000)
-#define JMR3927_ISAC_INTS1_ADDR        (JMR3927_ISAC_BASE + 0x00900000)
-#define JMR3927_ISAC_INTS2_ADDR        (JMR3927_ISAC_BASE + 0x00a00000)
-#define JMR3927_ISAC_INTM_ADDR (JMR3927_ISAC_BASE + 0x00b00000)
-
 /* Flash ROM */
 #define JMR3927_FLASH_BASE     (JMR3927_ROM0_BASE)
 #define JMR3927_FLASH_SIZE     0x00400000
 
-/* bits for IOC_REV/IOC_BREV/ISAC_REV (high byte) */
+/* bits for IOC_REV/IOC_BREV (high byte) */
 #define JMR3927_IDT_MASK       0xfc
 #define JMR3927_REV_MASK       0x03
 #define JMR3927_IOC_IDT                0xe0
-#define JMR3927_ISAC_IDT       0x20
 
 /* bits for IOC_INTS1/IOC_INTS2/IOC_INTM/IOC_INTP (high byte) */
 #define JMR3927_IOC_INTB_PCIA  0
 #define JMR3927_IOC_RESET_CPU  1
 #define JMR3927_IOC_RESET_PCI  2
 
-/* bits for ISAC_EINTS/ISAC_EINTM (high byte) */
-#define JMR3927_ISAC_EINTB_IOCHK       2
-#define JMR3927_ISAC_EINTB_BWTH        4
-#define JMR3927_ISAC_EINTF_IOCHK       (1 << JMR3927_ISAC_EINTB_IOCHK)
-#define JMR3927_ISAC_EINTF_BWTH        (1 << JMR3927_ISAC_EINTB_BWTH)
-
-/* bits for ISAC_LED (high byte) */
-#define JMR3927_ISAC_LED_ISALED        0x01
-#define JMR3927_ISAC_LED_USRLED        0x02
-
-/* bits for ISAC_INTS/ISAC_INTM/ISAC_INTP (high byte) */
-#define JMR3927_ISAC_INTB_IRQ5 0
-#define JMR3927_ISAC_INTB_IRQKB        1
-#define JMR3927_ISAC_INTB_IRQMOUSE     2
-#define JMR3927_ISAC_INTB_IRQ4 3
-#define JMR3927_ISAC_INTB_IRQ12        4
-#define JMR3927_ISAC_INTB_IRQ3 5
-#define JMR3927_ISAC_INTB_IRQ10        6
-#define JMR3927_ISAC_INTB_ISAER        7
-#define JMR3927_ISAC_INTF_IRQ5 (1 << JMR3927_ISAC_INTB_IRQ5)
-#define JMR3927_ISAC_INTF_IRQKB        (1 << JMR3927_ISAC_INTB_IRQKB)
-#define JMR3927_ISAC_INTF_IRQMOUSE     (1 << JMR3927_ISAC_INTB_IRQMOUSE)
-#define JMR3927_ISAC_INTF_IRQ4 (1 << JMR3927_ISAC_INTB_IRQ4)
-#define JMR3927_ISAC_INTF_IRQ12        (1 << JMR3927_ISAC_INTB_IRQ12)
-#define JMR3927_ISAC_INTF_IRQ3 (1 << JMR3927_ISAC_INTB_IRQ3)
-#define JMR3927_ISAC_INTF_IRQ10        (1 << JMR3927_ISAC_INTB_IRQ10)
-#define JMR3927_ISAC_INTF_ISAER        (1 << JMR3927_ISAC_INTB_ISAER)
-
-#ifndef __ASSEMBLY__
-
-#if 0
-#define jmr3927_ioc_reg_out(d, a)      ((*(volatile unsigned short *)(a)) = (d) << 8)
-#define jmr3927_ioc_reg_in(a)          (((*(volatile unsigned short *)(a)) >> 8) & 0xff)
-#else
 #if defined(__BIG_ENDIAN)
 #define jmr3927_ioc_reg_out(d, a)      ((*(volatile unsigned char *)(a)) = (d))
 #define jmr3927_ioc_reg_in(a)          (*(volatile unsigned char *)(a))
 #else
 #error "No Endian"
 #endif
-#endif
-#define jmr3927_isac_reg_out(d, a)     ((*(volatile unsigned char *)(a)) = (d))
-#define jmr3927_isac_reg_in(a)         (*(volatile unsigned char *)(a))
-
-static inline int jmr3927_have_isac(void)
-{
-       unsigned char idt;
-       unsigned long flags;
-       unsigned long romcr3;
-
-       local_irq_save(flags);
-       romcr3 = tx3927_romcptr->cr[3];
-       tx3927_romcptr->cr[3] &= 0xffffefff;    /* do not wait infinitely */
-       idt = jmr3927_isac_reg_in(JMR3927_ISAC_REV_ADDR) & JMR3927_IDT_MASK;
-       tx3927_romcptr->cr[3] = romcr3;
-       local_irq_restore(flags);
-
-       return idt == JMR3927_ISAC_IDT;
-}
-#define jmr3927_have_nvram() \
-       ((jmr3927_ioc_reg_in(JMR3927_IOC_REV_ADDR) & JMR3927_IDT_MASK) == JMR3927_IOC_IDT)
 
 /* LED macro */
 #define jmr3927_led_set(n/*0-16*/)     jmr3927_ioc_reg_out(~(n), JMR3927_IOC_LED_ADDR)
-#define jmr3927_io_led_set(n/*0-3*/)   jmr3927_isac_reg_out((n), JMR3927_ISAC_LED_ADDR)
 
 #define jmr3927_led_and_set(n/*0-16*/) jmr3927_ioc_reg_out((~(n)) & jmr3927_ioc_reg_in(JMR3927_IOC_LED_ADDR), JMR3927_IOC_LED_ADDR)
 
@@ -190,10 +102,6 @@ static inline int jmr3927_have_isac(void)
 #define jmr3927_dipsw2()       ((tx3927_pioptr->din & (1 << 10)) == 0)
 #define jmr3927_dipsw3()       ((jmr3927_ioc_reg_in(JMR3927_IOC_DIPSW_ADDR) & 2) == 0)
 #define jmr3927_dipsw4()       ((jmr3927_ioc_reg_in(JMR3927_IOC_DIPSW_ADDR) & 1) == 0)
-#define jmr3927_io_dipsw()     (jmr3927_isac_reg_in(JMR3927_ISAC_LED_ADDR) >> 4)
-
-
-#endif /* !__ASSEMBLY__ */
 
 /*
  * IRQ mappings
@@ -206,16 +114,10 @@ static inline int jmr3927_have_isac(void)
  */
 #define JMR3927_NR_IRQ_IRC     16      /* On-Chip IRC */
 #define JMR3927_NR_IRQ_IOC     8       /* PCI/MODEM/INT[6:7] */
-#define JMR3927_NR_IRQ_ISAC    8       /* ISA */
 
-
-#define JMR3927_IRQ_IRC        NR_ISA_IRQS
+#define JMR3927_IRQ_IRC        16
 #define JMR3927_IRQ_IOC        (JMR3927_IRQ_IRC + JMR3927_NR_IRQ_IRC)
-#define JMR3927_IRQ_ISAC       (JMR3927_IRQ_IOC + JMR3927_NR_IRQ_IOC)
-#define JMR3927_IRQ_END        (JMR3927_IRQ_ISAC + JMR3927_NR_IRQ_ISAC)
-#define JMR3927_IRQ_IS_IRC(irq)        (JMR3927_IRQ_IRC <= (irq) && (irq) < JMR3927_IRQ_IOC)
-#define JMR3927_IRQ_IS_IOC(irq)                (JMR3927_IRQ_IOC <= (irq) && (irq) < JMR3927_IRQ_ISAC)
-#define JMR3927_IRQ_IS_ISAC(irq)       (JMR3927_IRQ_ISAC <= (irq) && (irq) < JMR3927_IRQ_END)
+#define JMR3927_IRQ_END        (JMR3927_IRQ_IOC + JMR3927_NR_IRQ_IOC)
 
 #define JMR3927_IRQ_IRC_INT0   (JMR3927_IRQ_IRC + TX3927_IR_INT0)
 #define JMR3927_IRQ_IRC_INT1   (JMR3927_IRQ_IRC + TX3927_IR_INT1)
@@ -240,37 +142,13 @@ static inline int jmr3927_have_isac(void)
 #define JMR3927_IRQ_IOC_INT6   (JMR3927_IRQ_IOC + JMR3927_IOC_INTB_INT6)
 #define JMR3927_IRQ_IOC_INT7   (JMR3927_IRQ_IOC + JMR3927_IOC_INTB_INT7)
 #define JMR3927_IRQ_IOC_SOFT   (JMR3927_IRQ_IOC + JMR3927_IOC_INTB_SOFT)
-#define JMR3927_IRQ_ISAC_IRQ5  (JMR3927_IRQ_ISAC + JMR3927_ISAC_INTB_IRQ5)
-#define JMR3927_IRQ_ISAC_IRQKB (JMR3927_IRQ_ISAC + JMR3927_ISAC_INTB_IRQKB)
-#define JMR3927_IRQ_ISAC_IRQMOUSE      (JMR3927_IRQ_ISAC + JMR3927_ISAC_INTB_IRQMOUSE)
-#define JMR3927_IRQ_ISAC_IRQ4  (JMR3927_IRQ_ISAC + JMR3927_ISAC_INTB_IRQ4)
-#define JMR3927_IRQ_ISAC_IRQ12 (JMR3927_IRQ_ISAC + JMR3927_ISAC_INTB_IRQ12)
-#define JMR3927_IRQ_ISAC_IRQ3  (JMR3927_IRQ_ISAC + JMR3927_ISAC_INTB_IRQ3)
-#define JMR3927_IRQ_ISAC_IRQ10 (JMR3927_IRQ_ISAC + JMR3927_ISAC_INTB_IRQ10)
-#define JMR3927_IRQ_ISAC_ISAER (JMR3927_IRQ_ISAC + JMR3927_ISAC_INTB_ISAER)
 
-#if 0  /* auto detect */
-/* RTL8019AS 10M Ether (JMI-3927IO2:JPW2:1-2 Short) */
-#define JMR3927_IRQ_ETHER1     JMR3927_IRQ_IRC_INT0
-#endif
 /* IOC (PCI, MODEM) */
 #define JMR3927_IRQ_IOCINT     JMR3927_IRQ_IRC_INT1
-/* ISAC (ISA, PCMCIA, KEYBOARD, MOUSE) */
-#define JMR3927_IRQ_ISACINT    JMR3927_IRQ_IRC_INT2
 /* TC35815 100M Ether (JMR-TX3912:JPW4:2-3 Short) */
 #define JMR3927_IRQ_ETHER0     JMR3927_IRQ_IRC_INT3
 /* Clock Tick (10ms) */
 #define JMR3927_IRQ_TICK       JMR3927_IRQ_IRC_TMR0
-#define JMR3927_IRQ_IDE                JMR3927_IRQ_ISAC_IRQ12
-
-/* IEEE1394 (Note that this may conflicts with RTL8019AS 10M Ether...) */
-#define JMR3927_IRQ_PREMIER3   JMR3927_IRQ_IRC_INT0
-
-/* I/O Ports */
-/* RTL8019AS 10M Ether */
-#define JMR3927_ETHER1_PORT    (JMR3927_ISAIO_BASE - JMR3927_PORT_BASE + 0x280)
-#define JMR3927_KBD_PORT       (JMR3927_ISAIO_BASE - JMR3927_PORT_BASE + 0x00800060)
-#define JMR3927_IDE_PORT       (JMR3927_ISAIO_BASE - JMR3927_PORT_BASE + 0x001001f0)
 
 /* Clocks */
 #define JMR3927_CORECLK        132710400       /* 132.7MHz */
index b3d67c7..0b9073b 100644 (file)
@@ -22,8 +22,6 @@
 #define TX3927_SIO_REG(ch)     (0xfffef300 + (ch) * 0x100)
 #define TX3927_PIO_REG         0xfffef500
 
-#ifndef __ASSEMBLY__
-
 struct tx3927_sdramc_reg {
        volatile unsigned long cr[8];
        volatile unsigned long tr[3];
@@ -164,8 +162,6 @@ struct tx3927_ccfg_reg {
        volatile unsigned long pdcr;
 };
 
-#endif /* !__ASSEMBLY__ */
-
 /*
  * SDRAMC
  */
@@ -348,8 +344,6 @@ struct tx3927_ccfg_reg {
 #define TX3927_PCFG_SELDMA_ALL 0x0000000f
 #define TX3927_PCFG_SELDMA(ch) (0x00000001<<(ch))
 
-#ifndef __ASSEMBLY__
-
 #define tx3927_sdramcptr       ((struct tx3927_sdramc_reg *)TX3927_SDRAMC_REG)
 #define tx3927_romcptr         ((struct tx3927_romc_reg *)TX3927_ROMC_REG)
 #define tx3927_dmaptr          ((struct tx3927_dma_reg *)TX3927_DMA_REG)
@@ -360,6 +354,4 @@ struct tx3927_ccfg_reg {
 #define tx3927_sioptr(ch)      ((struct txx927_sio_reg *)TX3927_SIO_REG(ch))
 #define tx3927_pioptr          ((struct txx927_pio_reg *)TX3927_PIO_REG)
 
-#endif /* !__ASSEMBLY__ */
-
 #endif /* __ASM_TX3927_H */
index 9d5792e..58a8ff6 100644 (file)
@@ -10,8 +10,6 @@
 #ifndef __ASM_TXX927_H
 #define __ASM_TXX927_H
 
-#ifndef __ASSEMBLY__
-
 struct txx927_tmr_reg {
        volatile unsigned long tcr;
        volatile unsigned long tisr;
@@ -52,9 +50,6 @@ struct txx927_pio_reg {
        volatile unsigned long maskext;
 };
 
-#endif /* !__ASSEMBLY__ */
-
-
 /*
  * TMR
  */
index 147844e..8c08fa9 100644 (file)
@@ -34,7 +34,7 @@ struct __large_pstruct { unsigned long buf[100]; };
 #define __get_dbe(x,ptr,size)                                          \
 ({                                                                     \
        long __gu_err;                                                  \
-       __typeof(*(ptr)) __gu_val;                                      \
+       __typeof__(*(ptr)) __gu_val;                                    \
        unsigned long __gu_addr;                                        \
        __asm__("":"=r" (__gu_val));                                    \
        __gu_addr = (unsigned long) (ptr);                              \
index fcec52b..c4729f5 100644 (file)
@@ -206,7 +206,7 @@ struct hpc3_regs {
 #define HPC3_GIOMISC_ERTIME    0x1     /* Enable external timer real time. */
 #define HPC3_GIOMISC_DENDIAN   0x2     /* dma descriptor endian, 1=lit 0=big */
 
-       volatile u32 eeprom;            /* EEPROM data reg. */
+       u32 eeprom;                     /* EEPROM data reg. */
 #define HPC3_EEPROM_EPROT      0x01    /* Protect register enable */
 #define HPC3_EEPROM_CSEL       0x02    /* Chip select */
 #define HPC3_EEPROM_ECLK       0x04    /* EEPROM clock */
index 6592f3b..f4981c4 100644 (file)
@@ -72,7 +72,7 @@
 
 #define ip22_is_fullhouse()    (sgioc->sysid & SGIOC_SYSID_FULLHOUSE)
 
-extern unsigned short ip22_eeprom_read(volatile unsigned int *ctrl, int reg);
+extern unsigned short ip22_eeprom_read(unsigned int *ctrl, int reg);
 extern unsigned short ip22_nvram_read(int reg);
 
 #endif
index c52f783..1576c23 100644 (file)
@@ -57,7 +57,7 @@ struct sgimc_regs {
        volatile u32 divider;   /* Divider reg for RPSS */
 
        u32 _unused5;
-       volatile u32 eeprom;    /* EEPROM byte reg for r4k */
+       u32 eeprom;             /* EEPROM byte reg for r4k */
 #define SGIMC_EEPROM_PRE       0x00000001 /* eeprom chip PRE pin assertion */
 #define SGIMC_EEPROM_CSEL      0x00000002 /* Active high, eeprom chip select */
 #define SGIMC_EEPROM_SECLOCK   0x00000004 /* EEPROM serial clock */
index 42d4cf0..c0d5206 100644 (file)
  * Mask values for each interrupt
  */
 
+#define _BCM1480_INT_MASK(w,n)              _SB_MAKEMASK(w,((n) & 0x3F))
 #define _BCM1480_INT_MASK1(n)               _SB_MAKEMASK1(((n) & 0x3F))
 #define _BCM1480_INT_OFFSET(n)              (((n) & 0x40) << 6)
 
 #define M_BCM1480_INT_PMI_HIGH              _BCM1480_INT_MASK1(K_BCM1480_INT_PMI_HIGH)
 #define M_BCM1480_INT_PMO_LOW               _BCM1480_INT_MASK1(K_BCM1480_INT_PMO_LOW)
 #define M_BCM1480_INT_PMO_HIGH              _BCM1480_INT_MASK1(K_BCM1480_INT_PMO_HIGH)
+#define M_BCM1480_INT_MBOX_ALL              _BCM1480_INT_MASK(8,K_BCM1480_INT_MBOX_0_0)
 #define M_BCM1480_INT_MBOX_0_0              _BCM1480_INT_MASK1(K_BCM1480_INT_MBOX_0_0)
 #define M_BCM1480_INT_MBOX_0_1              _BCM1480_INT_MASK1(K_BCM1480_INT_MBOX_0_1)
 #define M_BCM1480_INT_MBOX_0_2              _BCM1480_INT_MASK1(K_BCM1480_INT_MBOX_0_2)
index 6bdc941..a6a4374 100644 (file)
 #define M_BCM1480_MC_CS6                    _SB_MAKEMASK1(10)
 #define M_BCM1480_MC_CS7                    _SB_MAKEMASK1(11)
 
+#define M_BCM1480_MC_CS                  _SB_MAKEMASK(8,S_BCM1480_MC_CS0)
+#define V_BCM1480_MC_CS(x)               _SB_MAKEVALUE(x,S_BCM1480_MC_CS0)
+#define G_BCM1480_MC_CS(x)               _SB_GETVALUE(x,S_BCM1480_MC_CS0,M_BCM1480_MC_CS0)
+
 #define M_BCM1480_MC_CMD_ACTIVE             _SB_MAKEMASK1(16)
 
 /*
 #define K_BCM1480_MC_DRAM_TYPE_DDR2        2
 #endif
 
+#define K_BCM1480_MC_DRAM_TYPE_DDR2_PASS1   0
+
 #define V_BCM1480_MC_DRAM_TYPE_JEDEC        V_BCM1480_MC_DRAM_TYPE(K_BCM1480_MC_DRAM_TYPE_JEDEC)
 #define V_BCM1480_MC_DRAM_TYPE_FCRAM        V_BCM1480_MC_DRAM_TYPE(K_BCM1480_MC_DRAM_TYPE_FCRAM)
 
 #define M_BCM1480_MC_WR_ODT6_CS6           _SB_MAKEMASK1(31)
 
 #define M_BCM1480_MC_CS_ODD_ODT_EN         _SB_MAKEMASK1(32)
+
+#define S_BCM1480_MC_ODT0                  0
+#define M_BCM1480_MC_ODT0                  _SB_MAKEMASK(8,S_BCM1480_MC_ODT0)
+#define V_BCM1480_MC_ODT0(x)               _SB_MAKEVALUE(x,S_BCM1480_MC_ODT0)
+
+#define S_BCM1480_MC_ODT2                  8
+#define M_BCM1480_MC_ODT2                  _SB_MAKEMASK(8,S_BCM1480_MC_ODT2)
+#define V_BCM1480_MC_ODT2(x)               _SB_MAKEVALUE(x,S_BCM1480_MC_ODT2)
+
+#define S_BCM1480_MC_ODT4                  16
+#define M_BCM1480_MC_ODT4                  _SB_MAKEMASK(8,S_BCM1480_MC_ODT4)
+#define V_BCM1480_MC_ODT4(x)               _SB_MAKEVALUE(x,S_BCM1480_MC_ODT4)
+
+#define S_BCM1480_MC_ODT6                  24
+#define M_BCM1480_MC_ODT6                  _SB_MAKEMASK(8,S_BCM1480_MC_ODT6)
+#define V_BCM1480_MC_ODT6(x)               _SB_MAKEVALUE(x,S_BCM1480_MC_ODT6)
 #endif
 
 /*
 #define        M_BCM1480_MC_DQO_SHIFT            _SB_MAKEMASK1(47)
 #endif
 
-#define S_BCM1480_MC_DLL_DEFAULT            48
-#define M_BCM1480_MC_DLL_DEFAULT            _SB_MAKEMASK(6,S_BCM1480_MC_DLL_DEFAULT)
-#define V_BCM1480_MC_DLL_DEFAULT(x)         _SB_MAKEVALUE(x,S_BCM1480_MC_DLL_DEFAULT)
-#define G_BCM1480_MC_DLL_DEFAULT(x)         _SB_GETVALUE(x,S_BCM1480_MC_DLL_DEFAULT,M_BCM1480_MC_DLL_DEFAULT)
-#define V_BCM1480_MC_DLL_DEFAULT_DEFAULT    V_BCM1480_MC_DLL_DEFAULT(0x10)
+#define S_BCM1480_MC_DLL_DEFAULT           48
+#define M_BCM1480_MC_DLL_DEFAULT           _SB_MAKEMASK(6,S_BCM1480_MC_DLL_DEFAULT)
+#define V_BCM1480_MC_DLL_DEFAULT(x)        _SB_MAKEVALUE(x,S_BCM1480_MC_DLL_DEFAULT)
+#define G_BCM1480_MC_DLL_DEFAULT(x)        _SB_GETVALUE(x,S_BCM1480_MC_DLL_DEFAULT,M_BCM1480_MC_DLL_DEFAULT)
+#define V_BCM1480_MC_DLL_DEFAULT_DEFAULT   V_BCM1480_MC_DLL_DEFAULT(0x10)
 
 #if SIBYTE_HDR_FEATURE(1480, PASS2)
 #define S_BCM1480_MC_DLL_REGCTRL         54
index c2dd2fe..bda391d 100644 (file)
 
 #define A_BCM1480_DUART_IMRREG(chan)       (A_BCM1480_DUART(chan) + R_BCM1480_DUART_IMRREG(chan))
 #define A_BCM1480_DUART_ISRREG(chan)       (A_BCM1480_DUART(chan) + R_BCM1480_DUART_ISRREG(chan))
+#define A_BCM1480_DUART_IN_PORT(chan)       (A_BCM1480_DUART(chan) + R_DUART_INP_ORT)
 
 /*
  * These constants are the absolute addresses.
 #define R_BCM1480_IMR_ALIAS_MAILBOX_0           0x0000         /* 0x0x0 */
 #define R_BCM1480_IMR_ALIAS_MAILBOX_0_SET       0x0008         /* 0x0x8 */
 
+/*
+ * these macros work together to build the address of a mailbox
+ * register, e.g., A_BCM1480_MAILBOX_REGISTER(0,R_BCM1480_IMR_MAILBOX_SET,2)
+ * for mbox_0_set_cpu2 returns 0x00100240C8
+ */
+#define R_BCM1480_IMR_MAILBOX_CPU         0x00
+#define R_BCM1480_IMR_MAILBOX_SET         0x08
+#define R_BCM1480_IMR_MAILBOX_CLR         0x10
+#define R_BCM1480_IMR_MAILBOX_NUM_SPACING 0x20
+#define A_BCM1480_MAILBOX_REGISTER(num,reg,cpu) \
+    (A_BCM1480_IMR_CPU0_BASE + \
+     (num * R_BCM1480_IMR_MAILBOX_NUM_SPACING) + \
+     (cpu * BCM1480_IMR_REGISTER_SPACING) + \
+     (R_BCM1480_IMR_MAILBOX_0_CPU + reg))
+
 /*  *********************************************************************
     * System Performance Counter Registers (Section 4.7)
     ********************************************************************* */
 #define A_BCM1480_SCD_PERF_CNT_6            0x0010020500
 #define A_BCM1480_SCD_PERF_CNT_7            0x0010020508
 
+#define BCM1480_SCD_NUM_PERF_CNT 8
+#define BCM1480_SCD_PERF_CNT_SPACING 8
+#define A_BCM1480_SCD_PERF_CNT(n) (A_SCD_PERF_CNT_0+(n*BCM1480_SCD_PERF_CNT_SPACING))
+
 /*  *********************************************************************
     * System Bus Watcher Registers (Section 4.8)
     ********************************************************************* */
index 648bed9..6111d6d 100644 (file)
@@ -10,7 +10,7 @@
     *
     *********************************************************************
     *
-    *  Copyright 2000,2001,2002,2003
+    *  Copyright 2000,2001,2002,2003,2004,2005
     *  Broadcom Corporation. All rights reserved.
     *
     *  This program is free software; you can redistribute it and/or
@@ -78,6 +78,7 @@
 #define K_SYS_PART_BCM1280          0x1206
 #define K_SYS_PART_BCM1455          0x1407
 #define K_SYS_PART_BCM1255          0x1257
+#define K_SYS_PART_BCM1158          0x1156
 
 /*
  * Manufacturing Information Register (Table 14)
  * System Performance Counter Configuration Register (Table 31)
  * Register: PERF_CNT_CFG_0
  *
- * Since the clear/enable bits are moved compared to the
- * 1250 and there are more fields, this register will be BCM1480 specific.
+ * SPC_CFG_SRC[0-3] is the same as the 1250.
+ * SPC_CFG_SRC[4-7] only exist on the 1480
+ * The clear/enable bits are in different locations on the 1250 and 1480.
  */
 
-#define S_BCM1480_SPC_CFG_SRC0              0
-#define M_BCM1480_SPC_CFG_SRC0              _SB_MAKEMASK(8,S_BCM1480_SPC_CFG_SRC0)
-#define V_BCM1480_SPC_CFG_SRC0(x)           _SB_MAKEVALUE(x,S_BCM1480_SPC_CFG_SRC0)
-#define G_BCM1480_SPC_CFG_SRC0(x)           _SB_GETVALUE(x,S_BCM1480_SPC_CFG_SRC0,M_BCM1480_SPC_CFG_SRC0)
-
-#define S_BCM1480_SPC_CFG_SRC1              8
-#define M_BCM1480_SPC_CFG_SRC1              _SB_MAKEMASK(8,S_BCM1480_SPC_CFG_SRC1)
-#define V_BCM1480_SPC_CFG_SRC1(x)           _SB_MAKEVALUE(x,S_BCM1480_SPC_CFG_SRC1)
-#define G_BCM1480_SPC_CFG_SRC1(x)           _SB_GETVALUE(x,S_BCM1480_SPC_CFG_SRC1,M_BCM1480_SPC_CFG_SRC1)
-
-#define S_BCM1480_SPC_CFG_SRC2              16
-#define M_BCM1480_SPC_CFG_SRC2              _SB_MAKEMASK(8,S_BCM1480_SPC_CFG_SRC2)
-#define V_BCM1480_SPC_CFG_SRC2(x)           _SB_MAKEVALUE(x,S_BCM1480_SPC_CFG_SRC2)
-#define G_BCM1480_SPC_CFG_SRC2(x)           _SB_GETVALUE(x,S_BCM1480_SPC_CFG_SRC2,M_BCM1480_SPC_CFG_SRC2)
-
-#define S_BCM1480_SPC_CFG_SRC3              24
-#define M_BCM1480_SPC_CFG_SRC3              _SB_MAKEMASK(8,S_BCM1480_SPC_CFG_SRC3)
-#define V_BCM1480_SPC_CFG_SRC3(x)           _SB_MAKEVALUE(x,S_BCM1480_SPC_CFG_SRC3)
-#define G_BCM1480_SPC_CFG_SRC3(x)           _SB_GETVALUE(x,S_BCM1480_SPC_CFG_SRC3,M_BCM1480_SPC_CFG_SRC3)
-
-#define S_BCM1480_SPC_CFG_SRC4              32
-#define M_BCM1480_SPC_CFG_SRC4              _SB_MAKEMASK(8,S_BCM1480_SPC_CFG_SRC4)
-#define V_BCM1480_SPC_CFG_SRC4(x)           _SB_MAKEVALUE(x,S_BCM1480_SPC_CFG_SRC4)
-#define G_BCM1480_SPC_CFG_SRC4(x)           _SB_GETVALUE(x,S_BCM1480_SPC_CFG_SRC4,M_BCM1480_SPC_CFG_SRC4)
-
-#define S_BCM1480_SPC_CFG_SRC5              40
-#define M_BCM1480_SPC_CFG_SRC5              _SB_MAKEMASK(8,S_BCM1480_SPC_CFG_SRC5)
-#define V_BCM1480_SPC_CFG_SRC5(x)           _SB_MAKEVALUE(x,S_BCM1480_SPC_CFG_SRC5)
-#define G_BCM1480_SPC_CFG_SRC5(x)           _SB_GETVALUE(x,S_BCM1480_SPC_CFG_SRC5,M_BCM1480_SPC_CFG_SRC5)
-
-#define S_BCM1480_SPC_CFG_SRC6              48
-#define M_BCM1480_SPC_CFG_SRC6              _SB_MAKEMASK(8,S_BCM1480_SPC_CFG_SRC6)
-#define V_BCM1480_SPC_CFG_SRC6(x)           _SB_MAKEVALUE(x,S_BCM1480_SPC_CFG_SRC6)
-#define G_BCM1480_SPC_CFG_SRC6(x)           _SB_GETVALUE(x,S_BCM1480_SPC_CFG_SRC6,M_BCM1480_SPC_CFG_SRC6)
-
-#define S_BCM1480_SPC_CFG_SRC7              56
-#define M_BCM1480_SPC_CFG_SRC7              _SB_MAKEMASK(8,S_BCM1480_SPC_CFG_SRC7)
-#define V_BCM1480_SPC_CFG_SRC7(x)           _SB_MAKEVALUE(x,S_BCM1480_SPC_CFG_SRC7)
-#define G_BCM1480_SPC_CFG_SRC7(x)           _SB_GETVALUE(x,S_BCM1480_SPC_CFG_SRC7,M_BCM1480_SPC_CFG_SRC7)
+#define S_SPC_CFG_SRC4              32
+#define M_SPC_CFG_SRC4              _SB_MAKEMASK(8,S_SPC_CFG_SRC4)
+#define V_SPC_CFG_SRC4(x)           _SB_MAKEVALUE(x,S_SPC_CFG_SRC4)
+#define G_SPC_CFG_SRC4(x)           _SB_GETVALUE(x,S_SPC_CFG_SRC4,M_SPC_CFG_SRC4)
+
+#define S_SPC_CFG_SRC5              40
+#define M_SPC_CFG_SRC5              _SB_MAKEMASK(8,S_SPC_CFG_SRC5)
+#define V_SPC_CFG_SRC5(x)           _SB_MAKEVALUE(x,S_SPC_CFG_SRC5)
+#define G_SPC_CFG_SRC5(x)           _SB_GETVALUE(x,S_SPC_CFG_SRC5,M_SPC_CFG_SRC5)
+
+#define S_SPC_CFG_SRC6              48
+#define M_SPC_CFG_SRC6              _SB_MAKEMASK(8,S_SPC_CFG_SRC6)
+#define V_SPC_CFG_SRC6(x)           _SB_MAKEVALUE(x,S_SPC_CFG_SRC6)
+#define G_SPC_CFG_SRC6(x)           _SB_GETVALUE(x,S_SPC_CFG_SRC6,M_SPC_CFG_SRC6)
+
+#define S_SPC_CFG_SRC7              56
+#define M_SPC_CFG_SRC7              _SB_MAKEMASK(8,S_SPC_CFG_SRC7)
+#define V_SPC_CFG_SRC7(x)           _SB_MAKEVALUE(x,S_SPC_CFG_SRC7)
+#define G_SPC_CFG_SRC7(x)           _SB_GETVALUE(x,S_SPC_CFG_SRC7,M_SPC_CFG_SRC7)
 
 /*
  * System Performance Counter Control Register (Table 32)
  * Register: PERF_CNT_CFG_1
  * BCM1480 specific
  */
-
-#define M_BCM1480_SPC_CFG_CLEAR             _SB_MAKEMASK1(0)
-#define M_BCM1480_SPC_CFG_ENABLE            _SB_MAKEMASK1(1)
+#define M_BCM1480_SPC_CFG_CLEAR     _SB_MAKEMASK1(0)
+#define M_BCM1480_SPC_CFG_ENABLE    _SB_MAKEMASK1(1)
+#if SIBYTE_HDR_FEATURE_CHIP(1480)
+#define M_SPC_CFG_CLEAR                        M_BCM1480_SPC_CFG_CLEAR
+#define M_SPC_CFG_ENABLE               M_BCM1480_SPC_CFG_ENABLE
+#endif
 
 /*
  * System Performance Counters (Table 33)
  * Trace Control Register (Table 49)
  * Register: TRACE_CFG
  *
- * Bits 0..8 are the same as the BCM1250, rest are different.
- * Entire register is redefined below.
+ * BCM1480 changes to this register (other than location of the CUR_ADDR field)
+ * are defined below.
  */
 
-#define M_BCM1480_SCD_TRACE_CFG_RESET       _SB_MAKEMASK1(0)
-#define M_BCM1480_SCD_TRACE_CFG_START_READ  _SB_MAKEMASK1(1)
-#define M_BCM1480_SCD_TRACE_CFG_START       _SB_MAKEMASK1(2)
-#define M_BCM1480_SCD_TRACE_CFG_STOP        _SB_MAKEMASK1(3)
-#define M_BCM1480_SCD_TRACE_CFG_FREEZE      _SB_MAKEMASK1(4)
-#define M_BCM1480_SCD_TRACE_CFG_FREEZE_FULL _SB_MAKEMASK1(5)
-#define M_BCM1480_SCD_TRACE_CFG_DEBUG_FULL  _SB_MAKEMASK1(6)
-#define M_BCM1480_SCD_TRACE_CFG_FULL        _SB_MAKEMASK1(7)
-#define M_BCM1480_SCD_TRACE_CFG_FORCE_CNT   _SB_MAKEMASK1(8)
-
 #define S_BCM1480_SCD_TRACE_CFG_MODE        16
 #define M_BCM1480_SCD_TRACE_CFG_MODE        _SB_MAKEMASK(2,S_BCM1480_SCD_TRACE_CFG_MODE)
 #define V_BCM1480_SCD_TRACE_CFG_MODE(x)     _SB_MAKEVALUE(x,S_BCM1480_SCD_TRACE_CFG_MODE)
 #define K_BCM1480_SCD_TRACE_CFG_MODE_BYTEEN_INT        1
 #define K_BCM1480_SCD_TRACE_CFG_MODE_FLOW_ID   2
 
-#define S_BCM1480_SCD_TRACE_CFG_CUR_ADDR    24
-#define M_BCM1480_SCD_TRACE_CFG_CUR_ADDR    _SB_MAKEMASK(8,S_BCM1480_SCD_TRACE_CFG_CUR_ADDR)
-#define V_BCM1480_SCD_TRACE_CFG_CUR_ADDR(x) _SB_MAKEVALUE(x,S_BCM1480_SCD_TRACE_CFG_CUR_ADDR)
-#define G_BCM1480_SCD_TRACE_CFG_CUR_ADDR(x) _SB_GETVALUE(x,S_BCM1480_SCD_TRACE_CFG_CUR_ADDR,M_BCM1480_SCD_TRACE_CFG_CUR_ADDR)
-
 #endif /* _BCM1480_SCD_H */
index 3dfe29e..73bce90 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2000, 2001, 2002, 2003 Broadcom Corporation
+ * Copyright (C) 2000,2001,2002,2003,2004 Broadcom Corporation
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
@@ -19,8 +19,8 @@
 #ifndef _SIBYTE_BOARD_H
 #define _SIBYTE_BOARD_H
 
-
 #if defined(CONFIG_SIBYTE_SWARM) || defined(CONFIG_SIBYTE_PTSWARM) || \
+    defined(CONFIG_SIBYTE_PT1120) || defined(CONFIG_SIBYTE_PT1125) || \
     defined(CONFIG_SIBYTE_CRHONE) || defined(CONFIG_SIBYTE_CRHINE) || \
     defined(CONFIG_SIBYTE_LITTLESUR)
 #include <asm/sibyte/swarm.h>
 #define setleds(t0,t1,c0,c1,c2,c3)
 #endif /* LEDS_PHYS */
 
+#else
+
+void swarm_setup(void);
+
+#ifdef LEDS_PHYS
+extern void setleds(char *str);
+#else
+#define setleds(s) do { } while (0)
+#endif /* LEDS_PHYS */
+
 #endif /* __ASSEMBLY__ */
 
 #endif /* _SIBYTE_BOARD_H */
index 57c53e6..11cad71 100644 (file)
@@ -18,7 +18,6 @@
 #ifndef __ASM_SIBYTE_CARMEL_H
 #define __ASM_SIBYTE_CARMEL_H
 
-
 #include <asm/sibyte/sb1250.h>
 #include <asm/sibyte/sb1250_int.h>
 
index 05c7b39..94e8299 100644 (file)
@@ -45,8 +45,6 @@
  * First, the interrupt numbers.
  */
 
-#if SIBYTE_HDR_FEATURE_1250_112x
-
 #define K_INT_SOURCES               64
 
 #define K_INT_WATCHDOG_TIMER_0      0
 #define M_INT_MBOX_1                _SB_MAKEMASK1(K_INT_MBOX_1)
 #define M_INT_MBOX_2                _SB_MAKEMASK1(K_INT_MBOX_2)
 #define M_INT_MBOX_3                _SB_MAKEMASK1(K_INT_MBOX_3)
+#define M_INT_MBOX_ALL              _SB_MAKEMASK(4,K_INT_MBOX_0)
 #if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1)
 #define M_INT_CYCLE_CP0_INT        _SB_MAKEMASK1(K_INT_CYCLE_CP0_INT)
 #define M_INT_CYCLE_CP1_INT        _SB_MAKEMASK1(K_INT_CYCLE_CP1_INT)
 
 
 #endif /* 1250/112x */
-
-#endif
index adfc688..833c8b5 100644 (file)
 #define M_MAC_BYPASS_16             _SB_MAKEMASK1(42)
 #define M_MAC_BYPASS_FCS_CHK       _SB_MAKEMASK1(43)
 
-#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1)
+#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
 #define M_MAC_RX_CH_SEL_MSB        _SB_MAKEMASK1(44)
-#endif /* 1250 PASS2 || 112x PASS1 */
+#endif /* 1250 PASS2 || 112x PASS1 || 1480*/
 
 #if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
 #define M_MAC_SPLIT_CH_SEL         _SB_MAKEMASK1(45)
 /* XXX: Can't enable, as it has the same name as a pass2+ define below.  */
 /* #define M_MAC_TX_WR_THRSH           _SB_MAKEMASK(6,S_MAC_TX_WR_THRSH) */
 #endif /* up to 1250 PASS1 */
-#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1)
+#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
 #define M_MAC_TX_WR_THRSH           _SB_MAKEMASK(7,S_MAC_TX_WR_THRSH)
-#endif /* 1250 PASS2 || 112x PASS1 */
+#endif /* 1250 PASS2 || 112x PASS1 || 1480 */
 #define V_MAC_TX_WR_THRSH(x)        _SB_MAKEVALUE(x,S_MAC_TX_WR_THRSH)
 #define G_MAC_TX_WR_THRSH(x)        _SB_GETVALUE(x,S_MAC_TX_WR_THRSH,M_MAC_TX_WR_THRSH)
 
 /* XXX: Can't enable, as it has the same name as a pass2+ define below.  */
 /* #define M_MAC_TX_RD_THRSH           _SB_MAKEMASK(6,S_MAC_TX_RD_THRSH) */
 #endif /* up to 1250 PASS1 */
-#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1)
+#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
 #define M_MAC_TX_RD_THRSH           _SB_MAKEMASK(7,S_MAC_TX_RD_THRSH)
-#endif /* 1250 PASS2 || 112x PASS1 */
+#endif /* 1250 PASS2 || 112x PASS1 || 1480 */
 #define V_MAC_TX_RD_THRSH(x)        _SB_MAKEVALUE(x,S_MAC_TX_RD_THRSH)
 #define G_MAC_TX_RD_THRSH(x)        _SB_GETVALUE(x,S_MAC_TX_RD_THRSH,M_MAC_TX_RD_THRSH)
 
 #define V_MAC_RX_RL_THRSH(x)        _SB_MAKEVALUE(x,S_MAC_RX_RL_THRSH)
 #define G_MAC_RX_RL_THRSH(x)        _SB_GETVALUE(x,S_MAC_RX_RL_THRSH,M_MAC_RX_RL_THRSH)
 
-#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1)
+#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
 #define S_MAC_ENC_FC_THRSH           _SB_MAKE64(56)
 #define M_MAC_ENC_FC_THRSH           _SB_MAKEMASK(6,S_MAC_ENC_FC_THRSH)
 #define V_MAC_ENC_FC_THRSH(x)        _SB_MAKEVALUE(x,S_MAC_ENC_FC_THRSH)
 #define G_MAC_ENC_FC_THRSH(x)        _SB_GETVALUE(x,S_MAC_ENC_FC_THRSH,M_MAC_ENC_FC_THRSH)
-#endif /* 1250 PASS2 || 112x PASS1 */
+#endif /* 1250 PASS2 || 112x PASS1 || 1480 */
 
 /*
  * MAC Frame Configuration Registers (Table 9-15)
 #define M_MAC_LTCOL_ERR             _SB_MAKEMASK1(44)
 #define M_MAC_EXCOL_ERR             _SB_MAKEMASK1(45)
 #define M_MAC_CNTR_OVRFL_ERR        _SB_MAKEMASK1(46)
-#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1)
+#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
 #define M_MAC_SPLIT_EN             _SB_MAKEMASK1(47)   /* interrupt mask only */
-#endif /* 1250 PASS2 || 112x PASS1 */
+#endif /* 1250 PASS2 || 112x PASS1 || 1480 */
 
 #define S_MAC_COUNTER_ADDR          _SB_MAKE64(47)
 #define M_MAC_COUNTER_ADDR          _SB_MAKEMASK(5,S_MAC_COUNTER_ADDR)
 #define M_MAC_MCAST_INV         _SB_MAKEMASK1(4)
 #define M_MAC_BCAST_EN          _SB_MAKEMASK1(5)
 #define M_MAC_DIRECT_INV        _SB_MAKEMASK1(6)
-#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1)
+#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
 #define M_MAC_ALLMCAST_EN      _SB_MAKEMASK1(7)
-#endif /* 1250 PASS2 || 112x PASS1 */
+#endif /* 1250 PASS2 || 112x PASS1 || 1480 */
 
 #define S_MAC_IPHDR_OFFSET      _SB_MAKE64(8)
 #define M_MAC_IPHDR_OFFSET      _SB_MAKEMASK(8,S_MAC_IPHDR_OFFSET)
index 26e4214..4fe848f 100644 (file)
 
 #if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1)
 #define M_MC_PRE_ON_A8              _SB_MAKEMASK1(36)
-#define M_MC_RAM_WITH_A13           _SB_MAKEMASK1(38)
+#define M_MC_RAM_WITH_A13           _SB_MAKEMASK1(37)
 #endif /* 1250 PASS3 || 112x PASS1 */
 
 
index bab3a45..da7c188 100644 (file)
 
 #endif
 
+
 /*  *********************************************************************
     * PCI Interface Registers
     ********************************************************************* */
 #define R_MAC_VLANTAG                   0x00000110
 #define R_MAC_FRAMECFG                  0x00000118
 #define R_MAC_EOPCNT                    0x00000120
-#define R_MAC_FIFO_PTRS                 0x00000130
+#define R_MAC_FIFO_PTRS                 0x00000128
 #define R_MAC_ADFILTER_CFG              0x00000200
 #define R_MAC_ETHERNET_ADDR             0x00000208
 #define R_MAC_PKT_TYPE                  0x00000210
-#if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1)
+#if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
 #define R_MAC_ADMASK0                  0x00000218
 #define R_MAC_ADMASK1                  0x00000220
-#endif /* 1250 PASS3 || 112x PASS1 */
+#endif /* 1250 PASS3 || 112x PASS1 || 1480 */
 #define R_MAC_HASH_BASE                 0x00000240
 #define R_MAC_ADDR_BASE                 0x00000280
 #define R_MAC_CHLO0_BASE                0x00000300
 #define R_MAC_INT_MASK                  0x00000410
 #define R_MAC_TXD_CTL                   0x00000420
 #define R_MAC_MDIO                      0x00000428
-#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1)
+#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
 #define R_MAC_STATUS1                  0x00000430
-#endif /* 1250 PASS2 || 112x PASS1 */
+#endif /* 1250 PASS2 || 112x PASS1 || 1480 */
 #define R_MAC_DEBUG_STATUS              0x00000448
 
 #define MAC_HASH_COUNT                 8
 #define R_DUART_RX_HOLD             0x160
 #define R_DUART_TX_HOLD             0x170
 
-#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1)
+#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
 #define R_DUART_FULL_CTL           0x140
 #define R_DUART_OPCR_X             0x180
 #define R_DUART_AUXCTL_X           0x190
-#endif /* 1250 PASS2 || 112x PASS1 */
+#endif /* 1250 PASS2 || 112x PASS1 || 1480*/
 
 
 /*
 #define R_DUART_IMR_B               0x350
 #define R_DUART_OUT_PORT            0x360
 #define R_DUART_OPCR                0x370
+#define R_DUART_IN_PORT             0x380
 
 #define R_DUART_SET_OPR                    0x3B0
 #define R_DUART_CLEAR_OPR          0x3C0
 #define A_ADDR_TRAP_REG_DEBUG      0x0010020460
 #endif /* 1250 PASS2 || 112x PASS1 || 1480 */
 
+#define ADDR_TRAP_SPACING 8
+#define NUM_ADDR_TRAP 4
+#define A_ADDR_TRAP_UP(n) (A_ADDR_TRAP_UP_0 + ((n) * ADDR_TRAP_SPACING))
+#define A_ADDR_TRAP_DOWN(n) (A_ADDR_TRAP_DOWN_0 + ((n) * ADDR_TRAP_SPACING))
+#define A_ADDR_TRAP_CFG(n) (A_ADDR_TRAP_CFG_0 + ((n) * ADDR_TRAP_SPACING))
+
 
 /*  *********************************************************************
     * System Interrupt Mapper Registers
     ********************************************************************* */
 
-#if SIBYTE_HDR_FEATURE_1250_112x
 #define A_IMR_CPU0_BASE                 0x0010020000
 #define A_IMR_CPU1_BASE                 0x0010022000
 #define IMR_REGISTER_SPACING            0x2000
 #define A_IMR_REGISTER(cpu,reg) (A_IMR_MAPPER(cpu)+(reg))
 
 #define R_IMR_INTERRUPT_DIAG            0x0010
+#define R_IMR_INTERRUPT_LDT             0x0018
 #define R_IMR_INTERRUPT_MASK            0x0028
 #define R_IMR_INTERRUPT_TRACE           0x0038
 #define R_IMR_INTERRUPT_SOURCE_STATUS   0x0040
 #define R_IMR_INTERRUPT_STATUS_COUNT    7
 #define R_IMR_INTERRUPT_MAP_BASE        0x0200
 #define R_IMR_INTERRUPT_MAP_COUNT       64
-#endif /* 1250/112x */
+
+/*
+ * these macros work together to build the address of a mailbox
+ * register, e.g., A_MAILBOX_REGISTER(R_IMR_MAILBOX_SET_CPU,1)
+ * for mbox_0_set_cpu2 returns 0x00100240C8
+ */
+#define A_MAILBOX_REGISTER(reg,cpu) \
+    (A_IMR_CPU0_BASE + (cpu * IMR_REGISTER_SPACING) + reg)
 
 /*  *********************************************************************
     * System Performance Counter Registers
 #define A_SCD_PERF_CNT_2            0x00100204E0
 #define A_SCD_PERF_CNT_3            0x00100204E8
 
+#define SCD_NUM_PERF_CNT 4
+#define SCD_PERF_CNT_SPACING 8
+#define A_SCD_PERF_CNT(n) (A_SCD_PERF_CNT_0+(n*SCD_PERF_CNT_SPACING))
+
 /*  *********************************************************************
     * System Bus Watcher Registers
     ********************************************************************* */
 #define A_SCD_TRACE_SEQUENCE_6      0x0010020A90
 #define A_SCD_TRACE_SEQUENCE_7      0x0010020A98
 
+#define TRACE_REGISTER_SPACING 8
+#define TRACE_NUM_REGISTERS    8
+#define A_SCD_TRACE_EVENT(n) (((n) & 4) ? \
+   (A_SCD_TRACE_EVENT_4 + (((n) & 3) * TRACE_REGISTER_SPACING)) : \
+   (A_SCD_TRACE_EVENT_0 + ((n) * TRACE_REGISTER_SPACING)))
+#define A_SCD_TRACE_SEQUENCE(n) (((n) & 4) ? \
+   (A_SCD_TRACE_SEQUENCE_4 + (((n) & 3) * TRACE_REGISTER_SPACING)) : \
+   (A_SCD_TRACE_SEQUENCE_0 + ((n) * TRACE_REGISTER_SPACING)))
+
 /*  *********************************************************************
     * System Generic DMA Registers
     ********************************************************************* */
index 7ed0bb6..9ea3da3 100644 (file)
@@ -10,7 +10,7 @@
     *
     *********************************************************************
     *
-    *  Copyright 2000,2001,2002,2003
+    *  Copyright 2000,2001,2002,2003,2004,2005
     *  Broadcom Corporation. All rights reserved.
     *
     *  This program is free software; you can redistribute it and/or
@@ -84,6 +84,7 @@
 #define K_SYS_REVISION_BCM112x_A2      0x21
 #define K_SYS_REVISION_BCM112x_A3      0x22
 #define K_SYS_REVISION_BCM112x_A4      0x23
+#define K_SYS_REVISION_BCM112x_B0      0x30
 
 #define K_SYS_REVISION_BCM1480_S0      0x01
 #define K_SYS_REVISION_BCM1480_A1      0x02
  * (For the assembler version, sysrev and dest may be the same register.
  * Also, it clobbers AT.)
  */
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
 #define SYS_SOC_TYPE(dest, sysrev)                                     \
        .set push ;                                                     \
        .set reorder ;                                                  \
 #define G_SYS_YPOS(x)             _SB_GETVALUE(x,S_SYS_YPOS,M_SYS_YPOS)
 #endif
 
+
 /*
  * System Config Register (Table 4-2)
  * Register: SCD_SYSTEM_CFG
  */
 
 #define V_SCD_TIMER_FREQ            1000000
-#define V_SCD_TIMER_WIDTH           23
 
 #define S_SCD_TIMER_INIT            0
-#define M_SCD_TIMER_INIT            _SB_MAKEMASK(V_SCD_TIMER_WIDTH,S_SCD_TIMER_INIT)
+#define M_SCD_TIMER_INIT            _SB_MAKEMASK(23,S_SCD_TIMER_INIT)
 #define V_SCD_TIMER_INIT(x)         _SB_MAKEVALUE(x,S_SCD_TIMER_INIT)
 #define G_SCD_TIMER_INIT(x)         _SB_GETVALUE(x,S_SCD_TIMER_INIT,M_SCD_TIMER_INIT)
 
+#define V_SCD_TIMER_WIDTH          23
 #define S_SCD_TIMER_CNT             0
 #define M_SCD_TIMER_CNT             _SB_MAKEMASK(V_SCD_TIMER_WIDTH,S_SCD_TIMER_CNT)
 #define V_SCD_TIMER_CNT(x)         _SB_MAKEVALUE(x,S_SCD_TIMER_CNT)
  * System Performance Counters
  */
 
-#if SIBYTE_HDR_FEATURE_1250_112x
 #define S_SPC_CFG_SRC0            0
 #define M_SPC_CFG_SRC0            _SB_MAKEMASK(8,S_SPC_CFG_SRC0)
 #define V_SPC_CFG_SRC0(x)         _SB_MAKEVALUE(x,S_SPC_CFG_SRC0)
 #define V_SPC_CFG_SRC3(x)         _SB_MAKEVALUE(x,S_SPC_CFG_SRC3)
 #define G_SPC_CFG_SRC3(x)         _SB_GETVALUE(x,S_SPC_CFG_SRC3,M_SPC_CFG_SRC3)
 
+#if SIBYTE_HDR_FEATURE_1250_112x
 #define M_SPC_CFG_CLEAR                _SB_MAKEMASK1(32)
 #define M_SPC_CFG_ENABLE       _SB_MAKEMASK1(33)
 #endif
  * Trace Buffer Config register
  */
 
-#if SIBYTE_HDR_FEATURE_1250_112x
-
 #define M_SCD_TRACE_CFG_RESET           _SB_MAKEMASK1(0)
 #define M_SCD_TRACE_CFG_START_READ      _SB_MAKEMASK1(1)
 #define M_SCD_TRACE_CFG_START           _SB_MAKEMASK1(2)
 #define M_SCD_TRACE_CFG_FREEZE_FULL     _SB_MAKEMASK1(5)
 #define M_SCD_TRACE_CFG_DEBUG_FULL      _SB_MAKEMASK1(6)
 #define M_SCD_TRACE_CFG_FULL            _SB_MAKEMASK1(7)
-#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1)
+#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
 #define M_SCD_TRACE_CFG_FORCECNT        _SB_MAKEMASK1(8)
-#endif /* 1250 PASS2 || 112x PASS1 */
+#endif /* 1250 PASS2 || 112x PASS1 || 1480 */
 
+/*
+ * This field is the same on the 1250/112x and 1480, just located in
+ * a slightly different place in the register.
+ */
+#if SIBYTE_HDR_FEATURE_1250_112x
 #define S_SCD_TRACE_CFG_CUR_ADDR        10
+#else
+#if SIBYTE_HDR_FEATURE_CHIP(1480)
+#define S_SCD_TRACE_CFG_CUR_ADDR        24
+#endif /* 1480 */
+#endif  /* 1250/112x */
+
 #define M_SCD_TRACE_CFG_CUR_ADDR        _SB_MAKEMASK(8,S_SCD_TRACE_CFG_CUR_ADDR)
 #define V_SCD_TRACE_CFG_CUR_ADDR(x)     _SB_MAKEVALUE(x,S_SCD_TRACE_CFG_CUR_ADDR)
 #define G_SCD_TRACE_CFG_CUR_ADDR(x)     _SB_GETVALUE(x,S_SCD_TRACE_CFG_CUR_ADDR,M_SCD_TRACE_CFG_CUR_ADDR)
 
-#endif /* 1250/112x */
-
 /*
  * Trace Event registers
  */
index 86db37e..540865f 100644 (file)
 #define SIBYTE_HAVE_IDE    1
 #define SIBYTE_DEFAULT_CONSOLE "ttyS0,115200"
 #endif
+#ifdef CONFIG_SIBYTE_PT1120
+#define SIBYTE_BOARD_NAME "PT1120"
+#define SIBYTE_HAVE_PCMCIA 1
+#define SIBYTE_HAVE_IDE    1
+#define SIBYTE_DEFAULT_CONSOLE "ttyS0,115200"
+#endif
+#ifdef CONFIG_SIBYTE_PT1125
+#define SIBYTE_BOARD_NAME "PT1125"
+#define SIBYTE_HAVE_PCMCIA 1
+#define SIBYTE_HAVE_IDE    1
+#define SIBYTE_DEFAULT_CONSOLE "ttyS0,115200"
+#endif
 #ifdef CONFIG_SIBYTE_LITTLESUR
 #define SIBYTE_BOARD_NAME "BCM91250C2 (LittleSur)"
 #define SIBYTE_HAVE_PCMCIA 0
index 36ebe4e..9594568 100644 (file)
@@ -70,6 +70,8 @@ To add: #define SO_REUSEPORT 0x0200   /* Allow local address and port reuse.  */
 #define SO_SNDBUFFORCE         31
 #define SO_RCVBUFFORCE         33
 #define SO_PASSSEC             34
+#define SO_TIMESTAMPNS         35
+#define SCM_TIMESTAMPNS                SO_TIMESTAMPNS
 
 #ifdef __KERNEL__
 
index 87a50bf..ed1a5f7 100644 (file)
@@ -20,6 +20,7 @@
 #define SIOCSPGRP      _IOW('s', 8, pid_t)
 #define SIOCGPGRP      _IOR('s', 9, pid_t)
 
-#define SIOCGSTAMP     0x8906                  /* Get stamp - linux-specific */
+#define SIOCGSTAMP     0x8906          /* Get stamp (timeval) */
+#define SIOCGSTAMPNS   0x8907          /* Get stamp (timespec) */
 
 #endif /* _ASM_SOCKIOS_H */
index 6cf05f4..fbcda82 100644 (file)
@@ -119,7 +119,6 @@ register struct thread_info *__current_thread_info __asm__("$28");
 #define TIF_POLLING_NRFLAG     17      /* true if poll_idle() is polling TIF_NEED_RESCHED */
 #define TIF_MEMDIE             18
 #define TIF_FREEZE             19
-#define TIF_ALLOW_FP_IN_KERNEL 20
 #define TIF_SYSCALL_TRACE      31      /* syscall trace active */
 
 #define _TIF_SYSCALL_TRACE     (1<<TIF_SYSCALL_TRACE)
index ce2eae1..99e868f 100644 (file)
@@ -33,6 +33,8 @@
 #define SO_PEERCRED    0x4011
 #define SO_TIMESTAMP   0x4012
 #define SCM_TIMESTAMP  SO_TIMESTAMP
+#define SO_TIMESTAMPNS 0x4013
+#define SCM_TIMESTAMPNS        SO_TIMESTAMPNS
 
 /* Security levels - as per NRL IPv6 - don't actually do anything */
 #define SO_SECURITY_AUTHENTICATION             0x4016
index aace496..dabfbc7 100644 (file)
@@ -7,6 +7,7 @@
 #define FIOGETOWN      0x8903
 #define SIOCGPGRP      0x8904
 #define SIOCATMARK     0x8905
-#define SIOCGSTAMP     0x8906          /* Get stamp */
+#define SIOCGSTAMP     0x8906          /* Get stamp (timeval) */
+#define SIOCGSTAMPNS   0x8907          /* Get stamp (timespec) */
 
 #endif
index c8b1da5..403e9fd 100644 (file)
@@ -56,5 +56,7 @@
 
 #define SO_PEERSEC             31
 #define SO_PASSSEC             34
+#define SO_TIMESTAMPNS         35
+#define SCM_TIMESTAMPNS                SO_TIMESTAMPNS
 
 #endif /* _ASM_POWERPC_SOCKET_H */
index 590078d..55cef76 100644 (file)
@@ -14,6 +14,7 @@
 #define FIOGETOWN      0x8903
 #define SIOCGPGRP      0x8904
 #define SIOCATMARK     0x8905
-#define SIOCGSTAMP     0x8906          /* Get stamp */
+#define SIOCGSTAMP     0x8906          /* Get stamp (timeval) */
+#define SIOCGSTAMPNS   0x8907          /* Get stamp (timespec) */
 
 #endif /* _ASM_POWERPC_SOCKIOS_H */
index faa407f..aa40f92 100644 (file)
@@ -14,8 +14,6 @@
 #define __HAVE_ARCH_MEMCMP
 #define __HAVE_ARCH_MEMCHR
 
-extern int strcasecmp(const char *, const char *);
-extern int strncasecmp(const char *, const char *, __kernel_size_t);
 extern char * strcpy(char *,const char *);
 extern char * strncpy(char *,const char *, __kernel_size_t);
 extern __kernel_size_t strlen(const char *);
index 8768983..838684d 100644 (file)
@@ -1,27 +1,70 @@
-#ifndef _S390_BUG_H
-#define _S390_BUG_H
+#ifndef _ASM_S390_BUG_H
+#define _ASM_S390_BUG_H
 
 #include <linux/kernel.h>
 
 #ifdef CONFIG_BUG
 
-static inline __attribute__((noreturn)) void __do_illegal_op(void)
-{
-#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 3)
-       __builtin_trap();
+#ifdef CONFIG_64BIT
+#define S390_LONG ".quad"
 #else
-       asm volatile(".long 0");
+#define S390_LONG ".long"
 #endif
-}
 
-#define BUG() do { \
-       printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \
-       __do_illegal_op(); \
+#ifdef CONFIG_DEBUG_BUGVERBOSE
+
+#define __EMIT_BUG(x) do {                                     \
+       asm volatile(                                           \
+               "0:     j       0b+2\n"                         \
+               "1:\n"                                          \
+               ".section .rodata.str,\"aMS\",@progbits,1\n"    \
+               "2:     .asciz  \""__FILE__"\"\n"               \
+               ".previous\n"                                   \
+               ".section __bug_table,\"a\"\n"                  \
+               "3:\t"  S390_LONG "\t1b,2b\n"                   \
+               "       .short  %0,%1\n"                        \
+               "       .org    3b+%2\n"                        \
+               ".previous\n"                                   \
+               : : "i" (__LINE__),                             \
+                   "i" (x),                                    \
+                   "i" (sizeof(struct bug_entry)));            \
 } while (0)
 
+#else /* CONFIG_DEBUG_BUGVERBOSE */
+
+#define __EMIT_BUG(x) do {                             \
+       asm volatile(                                   \
+               "0:     j       0b+2\n"                 \
+               "1:\n"                                  \
+               ".section __bug_table,\"a\"\n"          \
+               "2:\t"  S390_LONG "\t1b\n"              \
+               "       .short  %0\n"                   \
+               "       .org    2b+%1\n"                \
+               ".previous\n"                           \
+               : : "i" (x),                            \
+                   "i" (sizeof(struct bug_entry)));    \
+} while (0)
+
+#endif /* CONFIG_DEBUG_BUGVERBOSE */
+
+#define BUG()  __EMIT_BUG(0)
+
+#define WARN_ON(x) ({                                  \
+       typeof(x) __ret_warn_on = (x);                  \
+       if (__builtin_constant_p(__ret_warn_on)) {      \
+               if (__ret_warn_on)                      \
+                       __EMIT_BUG(BUGFLAG_WARNING);    \
+       } else {                                        \
+               if (unlikely(__ret_warn_on))            \
+                       __EMIT_BUG(BUGFLAG_WARNING);    \
+       }                                               \
+       unlikely(__ret_warn_on);                        \
+})
+
 #define HAVE_ARCH_BUG
-#endif
+#define HAVE_ARCH_WARN_ON
+#endif /* CONFIG_BUG */
 
 #include <asm-generic/bug.h>
 
-#endif
+#endif /* _ASM_S390_BUG_H */
index d2f9c0d..925b3dd 100644 (file)
@@ -11,6 +11,7 @@ struct ccwgroup_device {
                CCWGROUP_ONLINE,
        } state;
        atomic_t onoff;
+       struct mutex reg_mutex;
        unsigned int count;             /* number of attached slave devices */
        struct device   dev;            /* master device                    */
        struct ccw_device *cdev[0];     /* variable number, allocate as needed */
diff --git a/include/asm-s390/chpid.h b/include/asm-s390/chpid.h
new file mode 100644 (file)
index 0000000..b203336
--- /dev/null
@@ -0,0 +1,53 @@
+/*
+ *  drivers/s390/cio/chpid.h
+ *
+ *    Copyright IBM Corp. 2007
+ *    Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
+ */
+
+#ifndef _ASM_S390_CHPID_H
+#define _ASM_S390_CHPID_H _ASM_S390_CHPID_H
+
+#include <linux/string.h>
+#include <asm/types.h>
+#include <asm/cio.h>
+
+#define __MAX_CHPID 255
+
+struct chp_id {
+       u8 reserved1;
+       u8 cssid;
+       u8 reserved2;
+       u8 id;
+} __attribute__((packed));
+
+static inline void chp_id_init(struct chp_id *chpid)
+{
+       memset(chpid, 0, sizeof(struct chp_id));
+}
+
+static inline int chp_id_is_equal(struct chp_id *a, struct chp_id *b)
+{
+       return (a->id == b->id) && (a->cssid == b->cssid);
+}
+
+static inline void chp_id_next(struct chp_id *chpid)
+{
+       if (chpid->id < __MAX_CHPID)
+               chpid->id++;
+       else {
+               chpid->id = 0;
+               chpid->cssid++;
+       }
+}
+
+static inline int chp_id_is_valid(struct chp_id *chpid)
+{
+       return (chpid->cssid <= __MAX_CSSID);
+}
+
+
+#define chp_id_for_each(c) \
+       for (chp_id_init(c); chp_id_is_valid(c); chp_id_next(c))
+
+#endif /* _ASM_S390_CHPID_H */
index d927850..f738d28 100644 (file)
@@ -13,6 +13,7 @@
 #ifdef __KERNEL__
 
 #define LPM_ANYPATH 0xff
+#define __MAX_CSSID 0
 
 /*
  * subchannel status word
@@ -292,6 +293,13 @@ extern void css_schedule_reprobe(void);
 
 extern void reipl_ccw_dev(struct ccw_dev_id *id);
 
+struct cio_iplinfo {
+       u16 devno;
+       int is_qdio;
+};
+
+extern int cio_get_iplinfo(struct cio_iplinfo *iplinfo);
+
 #endif
 
 #endif
index 0eb6408..bdcd448 100644 (file)
@@ -8,6 +8,8 @@
 #define _ASM_S390_IPL_H
 
 #include <asm/types.h>
+#include <asm/cio.h>
+#include <asm/setup.h>
 
 #define IPL_PARMBLOCK_ORIGIN   0x2000
 
@@ -74,12 +76,12 @@ struct ipl_parameter_block {
 } __attribute__((packed));
 
 /*
- * IPL validity flags and parameters as detected in head.S
+ * IPL validity flags
  */
 extern u32 ipl_flags;
-extern u16 ipl_devno;
 
 extern u32 dump_prefix_page;
+
 extern void do_reipl(void);
 extern void ipl_save_parameters(void);
 
@@ -89,6 +91,35 @@ enum {
        IPL_NSS_VALID           = 4,
 };
 
+enum ipl_type {
+       IPL_TYPE_UNKNOWN        = 1,
+       IPL_TYPE_CCW            = 2,
+       IPL_TYPE_FCP            = 4,
+       IPL_TYPE_FCP_DUMP       = 8,
+       IPL_TYPE_NSS            = 16,
+};
+
+struct ipl_info
+{
+       enum ipl_type type;
+       union {
+               struct {
+                       struct ccw_dev_id dev_id;
+               } ccw;
+               struct {
+                       struct ccw_dev_id dev_id;
+                       u64 wwpn;
+                       u64 lun;
+               } fcp;
+               struct {
+                       char name[NSS_NAME_SIZE + 1];
+               } nss;
+       } data;
+};
+
+extern struct ipl_info ipl_info;
+extern void setup_ipl_info(void);
+
 /*
  * DIAG 308 support
  */
index 4a31d0a..ffc9788 100644 (file)
@@ -147,6 +147,52 @@ void pgm_check_handler(void);
 void mcck_int_handler(void);
 void io_int_handler(void);
 
+struct save_area_s390 {
+       u32     ext_save;
+       u64     timer;
+       u64     clk_cmp;
+       u8      pad1[24];
+       u8      psw[8];
+       u32     pref_reg;
+       u8      pad2[20];
+       u32     acc_regs[16];
+       u64     fp_regs[4];
+       u32     gp_regs[16];
+       u32     ctrl_regs[16];
+}  __attribute__((packed));
+
+struct save_area_s390x {
+       u64     fp_regs[16];
+       u64     gp_regs[16];
+       u8      psw[16];
+       u8      pad1[8];
+       u32     pref_reg;
+       u32     fp_ctrl_reg;
+       u8      pad2[4];
+       u32     tod_reg;
+       u64     timer;
+       u64     clk_cmp;
+       u8      pad3[8];
+       u32     acc_regs[16];
+       u64     ctrl_regs[16];
+}  __attribute__((packed));
+
+union save_area {
+       struct save_area_s390   s390;
+       struct save_area_s390x  s390x;
+};
+
+#define SAVE_AREA_BASE_S390    0xd4
+#define SAVE_AREA_BASE_S390X   0x1200
+
+#ifndef __s390x__
+#define SAVE_AREA_SIZE sizeof(struct save_area_s390)
+#define SAVE_AREA_BASE SAVE_AREA_BASE_S390
+#else
+#define SAVE_AREA_SIZE sizeof(struct save_area_s390x)
+#define SAVE_AREA_BASE SAVE_AREA_BASE_S390X
+#endif
+
 struct _lowcore
 {
 #ifndef __s390x__
index 13c1654..8fe8d42 100644 (file)
@@ -753,14 +753,14 @@ ptep_establish(struct vm_area_struct *vma,
  * should therefore only be called if it is not mapped in any
  * address space.
  */
-static inline int page_test_and_clear_dirty(struct page *page)
+static inline int page_test_dirty(struct page *page)
 {
-       unsigned long physpage = page_to_phys(page);
-       int skey = page_get_storage_key(physpage);
+       return (page_get_storage_key(page_to_phys(page)) & _PAGE_CHANGED) != 0;
+}
 
-       if (skey & _PAGE_CHANGED)
-               page_set_storage_key(physpage, skey & ~_PAGE_CHANGED);
-       return skey & _PAGE_CHANGED;
+static inline void page_clear_dirty(struct page *page)
+{
+       page_set_storage_key(page_to_phys(page), PAGE_DEFAULT_KEY);
 }
 
 /*
@@ -953,7 +953,8 @@ extern void memmap_init(unsigned long, int, unsigned long, unsigned long);
 #define __HAVE_ARCH_PTEP_CLEAR_FLUSH
 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
 #define __HAVE_ARCH_PTE_SAME
-#define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY
+#define __HAVE_ARCH_PAGE_TEST_DIRTY
+#define __HAVE_ARCH_PAGE_CLEAR_DIRTY
 #define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG
 #include <asm-generic/pgtable.h>
 
index 33b80ce..e0fcea8 100644 (file)
@@ -57,6 +57,7 @@ struct cpuinfo_S390
 
 extern void s390_adjust_jiffies(void);
 extern void print_cpu_info(struct cpuinfo_S390 *);
+extern int get_cpu_capability(unsigned int *);
 
 /* Lazy FPU handling on uni-processor */
 extern struct task_struct *last_task_used_math;
@@ -196,6 +197,7 @@ extern unsigned long thread_saved_pc(struct task_struct *t);
 extern char *task_show_regs(struct task_struct *task, char *buffer);
 
 extern void show_registers(struct pt_regs *regs);
+extern void show_code(struct pt_regs *regs);
 extern void show_trace(struct task_struct *task, unsigned long *sp);
 
 unsigned long get_wchan(struct task_struct *p);
index 468b970..21ed647 100644 (file)
@@ -9,6 +9,7 @@
 #define _ASM_S390_SCLP_H
 
 #include <linux/types.h>
+#include <asm/chpid.h>
 
 struct sccb_header {
        u16     length;
@@ -33,7 +34,20 @@ struct sclp_readinfo_sccb {
        u8      _reserved3[4096 - 112]; /* 112-4095 */
 } __attribute__((packed, aligned(4096)));
 
+#define SCLP_CHP_INFO_MASK_SIZE                32
+
+struct sclp_chp_info {
+       u8 recognized[SCLP_CHP_INFO_MASK_SIZE];
+       u8 standby[SCLP_CHP_INFO_MASK_SIZE];
+       u8 configured[SCLP_CHP_INFO_MASK_SIZE];
+};
+
 extern struct sclp_readinfo_sccb s390_readinfo_sccb;
 extern void sclp_readinfo_early(void);
+extern int sclp_sdias_blk_count(void);
+extern int sclp_sdias_copy(void *dest, int blk_num, int nr_blks);
+extern int sclp_chp_configure(struct chp_id chpid);
+extern int sclp_chp_deconfigure(struct chp_id chpid);
+extern int sclp_chp_read_info(struct sclp_chp_info *info);
 
 #endif /* _ASM_S390_SCLP_H */
index 44c7aee..a76a6b8 100644 (file)
@@ -40,6 +40,7 @@ struct mem_chunk {
 };
 
 extern struct mem_chunk memory_chunk[];
+extern unsigned long real_memory_size;
 
 #ifdef CONFIG_S390_SWITCH_AMODE
 extern unsigned int switch_amode;
@@ -77,6 +78,7 @@ extern unsigned long machine_flags;
 #endif /* __s390x__ */
 
 #define MACHINE_HAS_SCLP       (!MACHINE_IS_P390)
+#define ZFCPDUMP_HSA_SIZE      (32UL<<20)
 
 /*
  * Console mode. Override with conmode=
index b957e4c..0a28e6d 100644 (file)
@@ -54,9 +54,6 @@ extern int smp_call_function_on(void (*func) (void *info), void *info,
 
 #define raw_smp_processor_id() (S390_lowcore.cpu_data.cpu_nr)
 
-extern int smp_get_cpu(cpumask_t cpu_map);
-extern void smp_put_cpu(int cpu);
-
 static inline __u16 hard_smp_processor_id(void)
 {
         __u16 cpu_address;
@@ -114,9 +111,8 @@ static inline void smp_send_stop(void)
 }
 
 #define smp_cpu_not_running(cpu)       1
-#define smp_get_cpu(cpu) ({ 0; })
-#define smp_put_cpu(cpu) ({ 0; })
 #define smp_setup_cpu_possible_map()   do { } while (0)
 #endif
 
+extern union save_area *zfcpdump_save_areas[NR_CPUS + 1];
 #endif
index 1778a49..1161ebe 100644 (file)
@@ -57,5 +57,7 @@
 
 #define SO_PEERSEC             31
 #define SO_PASSSEC             34
+#define SO_TIMESTAMPNS         35
+#define SCM_TIMESTAMPNS                SO_TIMESTAMPNS
 
 #endif /* _ASM_SOCKET_H */
index 412aeb4..f4fc16c 100644 (file)
@@ -15,6 +15,7 @@
 #define FIOGETOWN      0x8903
 #define SIOCGPGRP      0x8904
 #define SIOCATMARK     0x8905
-#define SIOCGSTAMP     0x8906          /* Get stamp */
+#define SIOCGSTAMP     0x8906          /* Get stamp (timeval) */
+#define SIOCGSTAMPNS   0x8907          /* Get stamp (timespec) */
 
 #endif
index ca70362..c48d6fc 100644 (file)
@@ -49,5 +49,7 @@
 
 #define SO_PEERSEC             31
 #define SO_PASSSEC             34
+#define SO_TIMESTAMPNS         35
+#define SCM_TIMESTAMPNS                SO_TIMESTAMPNS
 
 #endif /* __ASM_SH_SOCKET_H */
index 08a71df..cf8b96b 100644 (file)
@@ -9,5 +9,6 @@
 #define SIOCSPGRP      _IOW('s', 8, pid_t)
 #define SIOCGPGRP      _IOR('s', 9, pid_t)
 
-#define SIOCGSTAMP     _IOR('s', 100, struct timeval) /* Get stamp - linux-specific */
+#define SIOCGSTAMP     _IOR('s', 100, struct timeval) /* Get stamp (timeval) */
+#define SIOCGSTAMPNS   _IOR('s', 101, struct timespec) /* Get stamp (timespec) */
 #endif /* __ASM_SH_SOCKIOS_H */
index 95bc7db..55f8db6 100644 (file)
@@ -126,9 +126,6 @@ extern void *memchr(const void *__s, int __c, size_t __n);
 #define __HAVE_ARCH_STRLEN
 extern size_t strlen(const char *);
 
-/* arch/sh/lib/strcasecmp.c */
-extern int strcasecmp(const char *, const char *);
-
 #endif /* __KERNEL__ */
 
 #endif /* __ASM_SH_STRING_H */
index 1ae23ae..419e76f 100644 (file)
@@ -20,5 +20,6 @@
 #define SIOCSPGRP      _IOW('s', 8, pid_t)
 #define SIOCGPGRP      _IOR('s', 9, pid_t)
 
-#define SIOCGSTAMP     _IOR('s', 100, struct timeval) /* Get stamp - linux-specific */
+#define SIOCGSTAMP     _IOR('s', 100, struct timeval) /* Get stamp (timeval) */
+#define SIOCGSTAMPNS   _IOR('s', 101, struct timespec) /* Get stamp (timespec) */
 #endif /* __ASM_SH64_SOCKIOS_H */
index 274868d..9ea105e 100644 (file)
@@ -35,8 +35,8 @@ struct property {
 };
 
 struct device_node {
-       char    *name;
-       char    *type;
+       const char      *name;
+       const char      *type;
        phandle node;
        char    *path_component_name;
        char    *full_name;
@@ -85,12 +85,14 @@ extern struct device_node *of_find_node_by_phandle(phandle handle);
 extern struct device_node *of_get_parent(const struct device_node *node);
 extern struct device_node *of_get_next_child(const struct device_node *node,
                                             struct device_node *prev);
-extern struct property *of_find_property(struct device_node *np,
+extern struct property *of_find_property(const struct device_node *np,
                                         const char *name,
                                         int *lenp);
-extern int of_device_is_compatible(struct device_node *device, const char *);
-extern void *of_get_property(struct device_node *node, const char *name,
-                            int *lenp);
+extern int of_device_is_compatible(const struct device_node *device,
+                                  const char *);
+extern const void *of_get_property(const struct device_node *node,
+                                  const char *name,
+                                  int *lenp);
 #define get_property(node,name,lenp) of_get_property(node,name,lenp)
 extern int of_set_property(struct device_node *node, const char *name, void *val, int len);
 extern int of_getintprop_default(struct device_node *np,
index f6c4e5b..7c14239 100644 (file)
@@ -49,6 +49,8 @@
 
 #define SO_PEERSEC             0x001e
 #define SO_PASSSEC             0x001f
+#define SO_TIMESTAMPNS         0x0021
+#define SCM_TIMESTAMPNS                SO_TIMESTAMPNS
 
 /* Security levels - as per NRL IPv6 - don't actually do anything */
 #define SO_SECURITY_AUTHENTICATION             0x5001
index 0c01b59..990ea74 100644 (file)
@@ -7,7 +7,8 @@
 #define FIOGETOWN      0x8903
 #define SIOCGPGRP      0x8904
 #define SIOCATMARK     0x8905
-#define SIOCGSTAMP     0x8906          /* Get stamp */
+#define SIOCGSTAMP     0x8906          /* Get stamp (timeval) */
+#define SIOCGSTAMPNS   0x8907          /* Get stamp (timespec) */
 
 #endif /* !(_ASM_SPARC_SOCKIOS_H) */
 
index f2cc941..e89922d 100644 (file)
@@ -17,8 +17,8 @@
 typedef struct {
        /* Dcache line 1 */
        unsigned int    __softirq_pending; /* must be 1st, see rtrap.S */
-       unsigned int    multiplier;
-       unsigned int    counter;
+       unsigned int    __pad0_1;
+       unsigned int    __pad0_2;
        unsigned int    __pad1;
        unsigned long   clock_tick;     /* %tick's per second */
        unsigned long   udelay_val;
index d8f9872..d5a4559 100644 (file)
@@ -3,5 +3,21 @@
  *
  * This file is released under the GPLv2
  */
-#include <asm-generic/device.h>
+#ifndef _ASM_SPARC64_DEVICE_H
+#define _ASM_SPARC64_DEVICE_H
 
+struct device_node;
+struct of_device;
+
+struct dev_archdata {
+       void                    *iommu;
+       void                    *stc;
+       void                    *host_controller;
+
+       struct device_node      *prom_node;
+       struct of_device        *op;
+
+       unsigned int            msi_num;
+};
+
+#endif /* _ASM_SPARC64_DEVICE_H */
index a4afe9d..9c1c6db 100644 (file)
@@ -8,7 +8,6 @@
 #ifndef __SPARC64_EBUS_H
 #define __SPARC64_EBUS_H
 
-#include <asm/pbm.h>
 #include <asm/oplib.h>
 #include <asm/prom.h>
 #include <asm/of_device.h>
@@ -41,7 +40,6 @@ struct linux_ebus {
        struct of_device                ofdev;
        struct linux_ebus               *next;
        struct linux_ebus_device        *devices;
-       struct pci_pbm_info             *parent;
        struct pci_dev                  *self;
        int                              index;
        int                              is_rio;
index 331013a..4aa0925 100644 (file)
@@ -549,7 +549,7 @@ static int __init ebus_fdthree_p(struct linux_ebus_device *edev)
        if (!strcmp(edev->prom_node->name, "fdthree"))
                return 1;
        if (!strcmp(edev->prom_node->name, "floppy")) {
-               char *compat;
+               const char *compat;
 
                compat = of_get_property(edev->prom_node,
                                         "compatible", NULL);
@@ -661,7 +661,7 @@ static unsigned long __init sun_floppy_init(void)
                struct linux_ebus_device *edev = NULL;
                unsigned long config = 0;
                void __iomem *auxio_reg;
-               char *state_prop;
+               const char *state_prop;
 
                for_each_ebus(ebus) {
                        for_each_ebusdev(edev, ebus) {
index 30b912d..ad595b6 100644 (file)
@@ -24,14 +24,6 @@ extern unsigned long kern_base, kern_size;
 #define page_to_phys(page)     (page_to_pfn(page) << PAGE_SHIFT)
 #define BIO_VMERGE_BOUNDARY    8192
 
-/* Different PCI controllers we support have their PCI MEM space
- * mapped to an either 2GB (Psycho) or 4GB (Sabre) aligned area,
- * so need to chop off the top 33 or 32 bits.
- */
-extern unsigned long pci_memspace_mask;
-
-#define bus_dvma_to_mem(__vaddr) ((__vaddr) & pci_memspace_mask)
-
 static __inline__ u8 _inb(unsigned long addr)
 {
        u8 ret;
index 0de7a3d..e199594 100644 (file)
@@ -7,15 +7,50 @@
 #define _SPARC64_IOMMU_H
 
 /* The format of an iopte in the page tables. */
-#define IOPTE_VALID   0x8000000000000000UL /* IOPTE is valid                  */
-#define IOPTE_64K     0x2000000000000000UL /* IOPTE is for 64k page           */
-#define IOPTE_STBUF   0x1000000000000000UL /* DVMA can use streaming buffer   */
-#define IOPTE_INTRA   0x0800000000000000UL /* SBUS slot-->slot direct transfer*/
-#define IOPTE_CONTEXT 0x07ff800000000000UL /* Context number                 */
-#define IOPTE_PAGE    0x00007fffffffe000UL /* Physical page number (PA[42:13])*/
-#define IOPTE_CACHE   0x0000000000000010UL /* Cached (in UPA E-cache)         */
-#define IOPTE_WRITE   0x0000000000000002UL /* Writeable                       */
+#define IOPTE_VALID   0x8000000000000000UL
+#define IOPTE_64K     0x2000000000000000UL
+#define IOPTE_STBUF   0x1000000000000000UL
+#define IOPTE_INTRA   0x0800000000000000UL
+#define IOPTE_CONTEXT 0x07ff800000000000UL
+#define IOPTE_PAGE    0x00007fffffffe000UL
+#define IOPTE_CACHE   0x0000000000000010UL
+#define IOPTE_WRITE   0x0000000000000002UL
 
 #define IOMMU_NUM_CTXS 4096
 
+struct iommu_arena {
+       unsigned long   *map;
+       unsigned int    hint;
+       unsigned int    limit;
+};
+
+struct iommu {
+       spinlock_t              lock;
+       struct iommu_arena      arena;
+       iopte_t                 *page_table;
+       u32                     page_table_map_base;
+       unsigned long           iommu_control;
+       unsigned long           iommu_tsbbase;
+       unsigned long           iommu_flush;
+       unsigned long           iommu_ctxflush;
+       unsigned long           write_complete_reg;
+       unsigned long           dummy_page;
+       unsigned long           dummy_page_pa;
+       unsigned long           ctx_lowest_free;
+       DECLARE_BITMAP(ctx_bitmap, IOMMU_NUM_CTXS);
+       u32                     dma_addr_mask;
+};
+
+struct strbuf {
+       int                     strbuf_enabled;
+       unsigned long           strbuf_control;
+       unsigned long           strbuf_pflush;
+       unsigned long           strbuf_fsync;
+       unsigned long           strbuf_ctxflush;
+       unsigned long           strbuf_ctxmatch_base;
+       unsigned long           strbuf_flushflag_pa;
+       volatile unsigned long *strbuf_flushflag;
+       volatile unsigned long  __flushflag_buf[(64+(64-1)) / sizeof(long)];
+};
+
 #endif /* !(_SPARC_IOMMU_H) */
index d9728b9..ecd9290 100644 (file)
@@ -7,7 +7,6 @@
 #ifndef __SPARC64_ISA_H
 #define __SPARC64_ISA_H
 
-#include <asm/pbm.h>
 #include <asm/oplib.h>
 #include <asm/prom.h>
 #include <asm/of_device.h>
@@ -29,7 +28,6 @@ struct sparc_isa_bridge {
        struct of_device        ofdev;
        struct sparc_isa_bridge *next;
        struct sparc_isa_device *devices;
-       struct pci_pbm_info     *parent;
        struct pci_dev          *self;
        int                     index;
        struct device_node      *prom_node;
index 284dfd0..6340a52 100644 (file)
@@ -103,7 +103,7 @@ static int ebus_ecpp_p(struct linux_ebus_device *edev)
        if (!strcmp(edev->prom_node->name, "ecpp"))
                return 1;
        if (!strcmp(edev->prom_node->name, "parallel")) {
-               char *compat;
+               const char *compat;
 
                compat = of_get_property(edev->prom_node,
                                         "compatible", NULL);
index 7a246d8..c008cec 100644 (file)
@@ -1,7 +1,6 @@
-/* $Id: pbm.h,v 1.27 2001/08/12 13:18:23 davem Exp $
- * pbm.h: UltraSparc PCI controller software state.
+/* pbm.h: UltraSparc PCI controller software state.
  *
- * Copyright (C) 1997, 1998, 1999 David S. Miller (davem@redhat.com)
+ * Copyright (C) 1997, 1998, 1999, 2007 David S. Miller (davem@davemloft.net)
  */
 
 #ifndef __SPARC64_PBM_H
  * PCI bus.
  */
 
-struct pci_controller_info;
-
-/* This contains the software state necessary to drive a PCI
- * controller's IOMMU.
- */
-struct pci_iommu_arena {
-       unsigned long   *map;
-       unsigned int    hint;
-       unsigned int    limit;
-};
-
-struct pci_iommu {
-       /* This protects the controller's IOMMU and all
-        * streaming buffers underneath.
-        */
-       spinlock_t      lock;
-
-       struct pci_iommu_arena arena;
-
-       /* IOMMU page table, a linear array of ioptes. */
-       iopte_t         *page_table;            /* The page table itself. */
-
-       /* Base PCI memory space address where IOMMU mappings
-        * begin.
-        */
-       u32             page_table_map_base;
-
-       /* IOMMU Controller Registers */
-       unsigned long   iommu_control;          /* IOMMU control register */
-       unsigned long   iommu_tsbbase;          /* IOMMU page table base register */
-       unsigned long   iommu_flush;            /* IOMMU page flush register */
-       unsigned long   iommu_ctxflush;         /* IOMMU context flush register */
-
-       /* This is a register in the PCI controller, which if
-        * read will have no side-effects but will guarantee
-        * completion of all previous writes into IOMMU/STC.
-        */
-       unsigned long   write_complete_reg;
-
-       /* In order to deal with some buggy third-party PCI bridges that
-        * do wrong prefetching, we never mark valid mappings as invalid.
-        * Instead we point them at this dummy page.
-        */
-       unsigned long   dummy_page;
-       unsigned long   dummy_page_pa;
-
-       /* CTX allocation. */
-       unsigned long ctx_lowest_free;
-       unsigned long ctx_bitmap[IOMMU_NUM_CTXS / (sizeof(unsigned long) * 8)];
-
-       /* Here a PCI controller driver describes the areas of
-        * PCI memory space where DMA to/from physical memory
-        * are addressed.  Drivers interrogate the PCI layer
-        * if their device has addressing limitations.  They
-        * do so via pci_dma_supported, and pass in a mask of
-        * DMA address bits their device can actually drive.
-        *
-        * The test for being usable is:
-        *      (device_mask & dma_addr_mask) == dma_addr_mask
-        */
-       u32 dma_addr_mask;
-};
-
-extern void pci_iommu_table_init(struct pci_iommu *iommu, int tsbsize, u32 dma_offset, u32 dma_addr_mask);
-
-/* This describes a PCI bus module's streaming buffer. */
-struct pci_strbuf {
-       int             strbuf_enabled;         /* Present and using it? */
-
-       /* Streaming Buffer Control Registers */
-       unsigned long   strbuf_control;         /* STC control register */
-       unsigned long   strbuf_pflush;          /* STC page flush register */
-       unsigned long   strbuf_fsync;           /* STC flush synchronization reg */
-       unsigned long   strbuf_ctxflush;        /* STC context flush register */
-       unsigned long   strbuf_ctxmatch_base;   /* STC context flush match reg */
-       unsigned long   strbuf_flushflag_pa;    /* Physical address of flush flag */
-       volatile unsigned long *strbuf_flushflag; /* The flush flag itself */
-
-       /* And this is the actual flush flag area.
-        * We allocate extra because the chips require
-        * a 64-byte aligned area.
-        */
-       volatile unsigned long  __flushflag_buf[(64 + (64 - 1)) / sizeof(long)];
-};
+extern void pci_iommu_table_init(struct iommu *iommu, int tsbsize, u32 dma_offset, u32 dma_addr_mask);
 
 #define PCI_STC_FLUSHFLAG_INIT(STC) \
        (*((STC)->strbuf_flushflag) = 0UL)
@@ -126,6 +42,8 @@ struct pci_strbuf {
 #define PROM_PCIRNG_MAX                64
 #define PROM_PCIIMAP_MAX       64
 
+struct pci_controller_info;
+
 struct pci_pbm_info {
        /* PCI controller we sit under. */
        struct pci_controller_info      *parent;
@@ -160,11 +78,6 @@ struct pci_pbm_info {
 
        /* OBP specific information. */
        struct device_node              *prom_node;
-       struct linux_prom_pci_ranges    *pbm_ranges;
-       int                             num_pbm_ranges;
-       struct linux_prom_pci_intmap    *pbm_intmap;
-       int                             num_pbm_intmap;
-       struct linux_prom_pci_intmask   *pbm_intmask;
        u64                             ino_bitmap;
 
        /* PBM I/O and Memory space resources. */
@@ -197,13 +110,10 @@ struct pci_pbm_info {
 #endif /* !(CONFIG_PCI_MSI) */
 
        /* This PBM's streaming buffer. */
-       struct pci_strbuf               stc;
+       struct strbuf                   stc;
 
        /* IOMMU state, potentially shared by both PBM segments. */
-       struct pci_iommu                *iommu;
-
-       /* PCI slot mapping. */
-       unsigned int                    pci_first_slot;
+       struct iommu                    *iommu;
 
        /* Now things for the actual PCI bus probes. */
        unsigned int                    pci_first_busno;
@@ -220,17 +130,12 @@ struct pci_controller_info {
         */
        int                             index;
 
-       /* Do the PBMs both exist in the same PCI domain? */
-       int                             pbms_same_domain;
-
        /* The PCI bus modules controlled by us. */
        struct pci_pbm_info             pbm_A;
        struct pci_pbm_info             pbm_B;
 
        /* Operations which are controller specific. */
        void (*scan_bus)(struct pci_controller_info *);
-       void (*base_address_update)(struct pci_dev *, int);
-       void (*resource_adjust)(struct pci_dev *, struct resource *, struct resource *);
 
 #ifdef CONFIG_PCI_MSI
        int (*setup_msi_irq)(unsigned int *virt_irq_p, struct pci_dev *pdev,
@@ -244,27 +149,4 @@ struct pci_controller_info {
        unsigned int                    pci_last_busno;
 };
 
-/* PCI devices which are not bridges have this placed in their pci_dev
- * sysdata member.  This makes OBP aware PCI device drivers easier to
- * code.
- */
-struct pcidev_cookie {
-       struct pci_pbm_info             *pbm;
-       struct device_node              *prom_node;
-       struct of_device                *op;
-       struct linux_prom_pci_registers prom_regs[PROMREG_MAX];
-       int num_prom_regs;
-       struct linux_prom_pci_registers prom_assignments[PROMREG_MAX];
-       int num_prom_assignments;
-#ifdef CONFIG_PCI_MSI
-       unsigned int                    msi_num;
-#endif
-};
-
-/* Currently these are the same across all PCI controllers
- * we support.  Someday they may not be...
- */
-#define PCI_IRQ_IGN    0x000007c0      /* Interrupt Group Number */
-#define PCI_IRQ_INO    0x0000003f      /* Interrupt Number */
-
 #endif /* !(__SPARC64_PBM_H) */
index b14a725..47cea16 100644 (file)
@@ -54,7 +54,7 @@ struct pci_iommu_ops {
        void (*dma_sync_sg_for_cpu)(struct pci_dev *, struct scatterlist *, int, int);
 };
 
-extern struct pci_iommu_ops *pci_iommu_ops;
+extern const struct pci_iommu_ops *pci_iommu_ops;
 
 /* Allocate and map kernel buffer using consistent mode DMA for a device.
  * hwdev should be valid struct pci_dev pointer for PCI devices.
index b12be7a..46705ef 100644 (file)
@@ -737,20 +737,6 @@ extern unsigned long pte_file(pte_t);
 extern pte_t pgoff_to_pte(unsigned long);
 #define PTE_FILE_MAX_BITS      (64UL - PAGE_SHIFT - 1UL)
 
-extern unsigned long prom_virt_to_phys(unsigned long, int *);
-
-extern unsigned long sun4u_get_pte(unsigned long);
-
-static inline unsigned long __get_phys(unsigned long addr)
-{
-       return sun4u_get_pte(addr);
-}
-
-static inline int __get_iospace(unsigned long addr)
-{
-       return ((sun4u_get_pte(addr) & 0xf0000000) >> 28);
-}
-
 extern unsigned long *sparc64_valid_addr_bitmap;
 
 /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
@@ -791,6 +777,8 @@ extern void pgtable_cache_init(void);
 extern void sun4v_register_fault_status(void);
 extern void sun4v_ktsb_register(void);
 
+extern unsigned long cmdline_memory_size;
+
 #endif /* !(__ASSEMBLY__) */
 
 #endif /* !(_SPARC64_PGTABLE_H) */
index 0eca2d9..ddad5f9 100644 (file)
@@ -36,8 +36,8 @@ struct property {
 
 struct of_irq_controller;
 struct device_node {
-       char    *name;
-       char    *type;
+       const char      *name;
+       const char      *type;
        phandle node;
        char    *path_component_name;
        char    *full_name;
@@ -93,11 +93,13 @@ extern struct device_node *of_find_node_by_phandle(phandle handle);
 extern struct device_node *of_get_parent(const struct device_node *node);
 extern struct device_node *of_get_next_child(const struct device_node *node,
                                             struct device_node *prev);
-extern struct property *of_find_property(struct device_node *np,
+extern struct property *of_find_property(const struct device_node *np,
                                         const char *name,
                                         int *lenp);
-extern int of_device_is_compatible(struct device_node *device, const char *);
-extern void *of_get_property(struct device_node *node, const char *name,
+extern int of_device_is_compatible(const struct device_node *device,
+                                  const char *);
+extern const void *of_get_property(const struct device_node *node,
+                            const char *name,
                             int *lenp);
 #define get_property(node,name,lenp) of_get_property(node,name,lenp)
 extern int of_set_property(struct device_node *node, const char *name, void *val, int len);
index 388249b..cca5480 100644 (file)
@@ -42,15 +42,15 @@ extern int hard_smp_processor_id(void);
 #define raw_smp_processor_id() (current_thread_info()->cpu)
 
 extern void smp_setup_cpu_possible_map(void);
+extern unsigned char boot_cpu_id;
 
 #endif /* !(__ASSEMBLY__) */
 
 #else
 
 #define smp_setup_cpu_possible_map() do { } while (0)
+#define boot_cpu_id    (0)
 
 #endif /* !(CONFIG_SMP) */
 
-#define NO_PROC_ID             0xFF
-
 #endif /* !(_SPARC64_SMP_H) */
index 754d46a..986441d 100644 (file)
@@ -49,6 +49,8 @@
 
 #define SO_PEERSEC             0x001e
 #define SO_PASSSEC             0x001f
+#define SO_TIMESTAMPNS         0x0021
+#define SCM_TIMESTAMPNS                SO_TIMESTAMPNS
 
 /* Security levels - as per NRL IPv6 - don't actually do anything */
 #define SO_SECURITY_AUTHENTICATION             0x5001
index 6735bab..c7d9900 100644 (file)
@@ -7,7 +7,8 @@
 #define FIOGETOWN      0x8903
 #define SIOCGPGRP      0x8904
 #define SIOCATMARK     0x8905
-#define SIOCGSTAMP     0x8906          /* Get stamp */
+#define SIOCGSTAMP     0x8906          /* Get stamp (timeval) */
+#define SIOCGSTAMPNS   0x8907          /* Get stamp (timespec) */
 
 #endif /* !(_ASM_SPARC64_SOCKIOS_H) */
 
index ed5c9d8..77bcd2b 100644 (file)
@@ -3,7 +3,7 @@
 
 #ifdef __KERNEL__
 
-#define SECTION_SIZE_BITS       26
+#define SECTION_SIZE_BITS       31
 #define MAX_PHYSADDR_BITS       42
 #define MAX_PHYSMEM_BITS        42
 
index d435594..ccbd694 100644 (file)
 
 
 struct sparc64_tick_ops {
-       void (*init_tick)(unsigned long);
        unsigned long (*get_tick)(void);
-       unsigned long (*get_compare)(void);
-       unsigned long (*add_tick)(unsigned long, unsigned long);
-       unsigned long (*add_compare)(unsigned long);
+       int (*add_compare)(unsigned long);
        unsigned long softint_mask;
+       void (*disable_irq)(void);
+
+       void (*init_tick)(void);
+       unsigned long (*add_tick)(unsigned long);
+
+       char *name;
 };
 
 extern struct sparc64_tick_ops *tick_ops;
 
-#ifdef CONFIG_SMP
-extern unsigned long timer_tick_offset;
-struct pt_regs;
-extern void timer_tick_interrupt(struct pt_regs *);
-#endif
-
 extern unsigned long sparc64_get_clock_tick(unsigned int cpu);
 
 #endif /* _SPARC64_TIMER_H */
index c2a16e1..bbb9c8f 100644 (file)
        ba,a,pt %xcc, rtrap_irq;                        \
        .previous;
 
-#define TICK_SMP_IRQ                                   \
-       rdpr    %pil, %g2;                              \
-       wrpr    %g0, 15, %pil;                          \
-       sethi   %hi(1f-4), %g7;                         \
-       ba,pt   %xcc, etrap_irq;                        \
-        or     %g7, %lo(1f-4), %g7;                    \
-       nop;                                            \
-       nop;                                            \
-       nop;                                            \
-       .subsection     2;                              \
-1:     call    trace_hardirqs_off;                     \
-        nop;                                           \
-       call    smp_percpu_timer_interrupt;             \
-        add    %sp, PTREGS_OFF, %o0;                   \
-       ba,a,pt %xcc, rtrap_irq;                        \
-       .previous;
-
 #else
 
 #define TRAP_IRQ(routine, level)                       \
         add    %sp, PTREGS_OFF, %o1;                   \
        ba,a,pt %xcc, rtrap_irq;
        
-#define TICK_SMP_IRQ                                   \
-       rdpr    %pil, %g2;                              \
-       wrpr    %g0, 15, %pil;                          \
-       sethi   %hi(109f), %g7;                         \
-       ba,pt   %xcc, etrap_irq;                        \
-109:    or     %g7, %lo(109b), %g7;                    \
-       call    smp_percpu_timer_interrupt;             \
-        add    %sp, PTREGS_OFF, %o0;                   \
-       ba,a,pt %xcc, rtrap_irq;
-
 #endif
 
 #define TRAP_IVEC TRAP_NOSAVE(do_ivec)
index 1e17f74..7b73b2c 100644 (file)
@@ -3,4 +3,5 @@
 
 #include "asm/arch/div64.h"
 
+extern uint64_t div64_64(uint64_t dividend, uint64_t divisor);
 #endif
index 0dfe55a..a4c2493 100644 (file)
@@ -49,5 +49,7 @@
 
 #define SO_PEERSEC             31
 #define SO_PASSSEC             34
+#define SO_TIMESTAMPNS         35
+#define SCM_TIMESTAMPNS                SO_TIMESTAMPNS
 
 #endif /* __V850_SOCKET_H__ */
index cf4874c..823e106 100644 (file)
@@ -7,6 +7,7 @@
 #define FIOGETOWN      0x8903
 #define SIOCGPGRP      0x8904
 #define SIOCATMARK     0x8905
-#define SIOCGSTAMP     0x8906          /* Get stamp */
+#define SIOCGSTAMP     0x8906          /* Get stamp (timeval) */
+#define SIOCGSTAMPNS   0x8907          /* Get stamp (timespec) */
 
 #endif /* __V850_SOCKIOS_H__ */
index b467026..90af60c 100644 (file)
@@ -49,5 +49,7 @@
 
 #define SO_PEERSEC             31
 #define SO_PASSSEC             34
+#define SO_TIMESTAMPNS         35
+#define SCM_TIMESTAMPNS                SO_TIMESTAMPNS
 
 #endif /* _ASM_SOCKET_H */
index 2eefd10..d726ba2 100644 (file)
@@ -7,6 +7,7 @@
 #define FIOGETOWN      0x8903
 #define SIOCGPGRP      0x8904
 #define SIOCATMARK     0x8905
-#define SIOCGSTAMP     0x8906          /* Get stamp */
+#define SIOCGSTAMP     0x8906          /* Get stamp (timeval) */
+#define SIOCGSTAMPNS   0x8907          /* Get stamp (timespec) */
 
 #endif
index c4a1057..20965e3 100644 (file)
 #ifndef _XTENSA_DIV64_H
 #define _XTENSA_DIV64_H
 
+#include <linux/types.h>
+
 #define do_div(n,base) ({ \
        int __res = n % ((unsigned int) base); \
        n /= (unsigned int) base; \
        __res; })
 
+static inline uint64_t div64_64(uint64_t dividend, uint64_t divisor)
+{
+       return dividend / divisor;
+}
 #endif
index 971d231..1f5aeac 100644 (file)
@@ -60,5 +60,7 @@
 #define SO_ACCEPTCONN          30
 #define SO_PEERSEC             31
 #define SO_PASSSEC             34
+#define SO_TIMESTAMPNS         35
+#define SCM_TIMESTAMPNS                SO_TIMESTAMPNS
 
 #endif /* _XTENSA_SOCKET_H */
index 20d2ba1..efe0af3 100644 (file)
@@ -25,6 +25,7 @@
 #define SIOCSPGRP      _IOW('s', 8, pid_t)
 #define SIOCGPGRP      _IOR('s', 9, pid_t)
 
-#define SIOCGSTAMP     0x8906          /* Get stamp - linux-specific */
+#define SIOCGSTAMP     0x8906          /* Get stamp (timeval) */
+#define SIOCGSTAMPNS   0x8907          /* Get stamp (timespec) */
 
 #endif /* _XTENSA_SOCKIOS_H */
diff --git a/include/keys/rxrpc-type.h b/include/keys/rxrpc-type.h
new file mode 100644 (file)
index 0000000..e2ee73a
--- /dev/null
@@ -0,0 +1,22 @@
+/* RxRPC key type
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _KEYS_RXRPC_TYPE_H
+#define _KEYS_RXRPC_TYPE_H
+
+#include <linux/key.h>
+
+/*
+ * key type for AF_RXRPC keys
+ */
+extern struct key_type key_type_rxrpc;
+
+#endif /* _KEYS_USER_TYPE_H */
index e81e301..4ff0f57 100644 (file)
@@ -69,9 +69,7 @@ header-y += hdsmart.h
 header-y += hysdn_if.h
 header-y += i2c-dev.h
 header-y += i8k.h
-header-y += icmp.h
 header-y += if_arcnet.h
-header-y += if_arp.h
 header-y += if_bonding.h
 header-y += if_cablemodem.h
 header-y += if_fc.h
@@ -88,7 +86,6 @@ header-y += if_tunnel.h
 header-y += in6.h
 header-y += in_route.h
 header-y += ioctl.h
-header-y += ip.h
 header-y += ipmi_msgdefs.h
 header-y += ip_mp_alg.h
 header-y += ipsec.h
@@ -116,6 +113,7 @@ header-y += netrom.h
 header-y += nfs2.h
 header-y += nfs4_mount.h
 header-y += nfs_mount.h
+header-y += nl80211.h
 header-y += oom.h
 header-y += param.h
 header-y += pci_regs.h
@@ -210,8 +208,10 @@ unifdef-y += hiddev.h
 unifdef-y += hpet.h
 unifdef-y += i2c.h
 unifdef-y += i2o-dev.h
+unifdef-y += icmp.h
 unifdef-y += icmpv6.h
 unifdef-y += if_addr.h
+unifdef-y += if_arp.h
 unifdef-y += if_bridge.h
 unifdef-y += if_ec.h
 unifdef-y += if_eql.h
@@ -231,6 +231,7 @@ unifdef-y += inet_diag.h
 unifdef-y += in.h
 unifdef-y += inotify.h
 unifdef-y += input.h
+unifdef-y += ip.h
 unifdef-y += ipc.h
 unifdef-y += ipmi.h
 unifdef-y += ipv6.h
index d12984d..ced8a1e 100644 (file)
@@ -101,7 +101,7 @@ struct ddpehdr {
 
 static __inline__ struct ddpehdr *ddp_hdr(struct sk_buff *skb)
 {
-       return (struct ddpehdr *)skb->h.raw;
+       return (struct ddpehdr *)skb_transport_header(skb);
 }
 
 /* AppleTalk AARP headers */
@@ -129,7 +129,7 @@ struct elapaarp {
 
 static __inline__ struct elapaarp *aarp_hdr(struct sk_buff *skb)
 {
-       return (struct elapaarp *)skb->h.raw;
+       return (struct elapaarp *)skb_transport_header(skb);
 }
 
 /* Not specified - how long till we drop a resolved entry */
index 1cb054b..fda2148 100644 (file)
@@ -260,19 +260,20 @@ enum {
 
 static inline struct dccp_hdr *dccp_hdr(const struct sk_buff *skb)
 {
-       return (struct dccp_hdr *)skb->h.raw;
+       return (struct dccp_hdr *)skb_transport_header(skb);
 }
 
 static inline struct dccp_hdr *dccp_zeroed_hdr(struct sk_buff *skb, int headlen)
 {
-       skb->h.raw = skb_push(skb, headlen);
-       memset(skb->h.raw, 0, headlen);
-       return dccp_hdr(skb);
+       skb_push(skb, headlen);
+       skb_reset_transport_header(skb);
+       return memset(skb_transport_header(skb), 0, headlen);
 }
 
 static inline struct dccp_hdr_ext *dccp_hdrx(const struct sk_buff *skb)
 {
-       return (struct dccp_hdr_ext *)(skb->h.raw + sizeof(struct dccp_hdr));
+       return (struct dccp_hdr_ext *)(skb_transport_header(skb) +
+                                      sizeof(struct dccp_hdr));
 }
 
 static inline unsigned int __dccp_basic_hdr_len(const struct dccp_hdr *dh)
@@ -301,12 +302,14 @@ static inline __u64 dccp_hdr_seq(const struct sk_buff *skb)
 
 static inline struct dccp_hdr_request *dccp_hdr_request(struct sk_buff *skb)
 {
-       return (struct dccp_hdr_request *)(skb->h.raw + dccp_basic_hdr_len(skb));
+       return (struct dccp_hdr_request *)(skb_transport_header(skb) +
+                                          dccp_basic_hdr_len(skb));
 }
 
 static inline struct dccp_hdr_ack_bits *dccp_hdr_ack_bits(const struct sk_buff *skb)
 {
-       return (struct dccp_hdr_ack_bits *)(skb->h.raw + dccp_basic_hdr_len(skb));
+       return (struct dccp_hdr_ack_bits *)(skb_transport_header(skb) +
+                                           dccp_basic_hdr_len(skb));
 }
 
 static inline u64 dccp_hdr_ack_seq(const struct sk_buff *skb)
@@ -317,12 +320,14 @@ static inline u64 dccp_hdr_ack_seq(const struct sk_buff *skb)
 
 static inline struct dccp_hdr_response *dccp_hdr_response(struct sk_buff *skb)
 {
-       return (struct dccp_hdr_response *)(skb->h.raw + dccp_basic_hdr_len(skb));
+       return (struct dccp_hdr_response *)(skb_transport_header(skb) +
+                                           dccp_basic_hdr_len(skb));
 }
 
 static inline struct dccp_hdr_reset *dccp_hdr_reset(struct sk_buff *skb)
 {
-       return (struct dccp_hdr_reset *)(skb->h.raw + dccp_basic_hdr_len(skb));
+       return (struct dccp_hdr_reset *)(skb_transport_header(skb) +
+                                        dccp_basic_hdr_len(skb));
 }
 
 static inline unsigned int __dccp_hdr_len(const struct dccp_hdr *dh)
@@ -460,26 +465,27 @@ struct dccp_ackvec;
  * @dccps_service_list - second .. last service code on passive socket
  * @dccps_timestamp_time - time of latest TIMESTAMP option
  * @dccps_timestamp_echo - latest timestamp received on a TIMESTAMP option
- * @dccps_l_ack_ratio -
- * @dccps_r_ack_ratio -
+ * @dccps_l_ack_ratio - feature-local Ack Ratio
+ * @dccps_r_ack_ratio - feature-remote Ack Ratio
  * @dccps_pcslen - sender   partial checksum coverage (via sockopt)
  * @dccps_pcrlen - receiver partial checksum coverage (via sockopt)
  * @dccps_ndp_count - number of Non Data Packets since last data packet
- * @dccps_mss_cache -
- * @dccps_minisock -
+ * @dccps_mss_cache - current value of MSS (path MTU minus header sizes)
+ * @dccps_minisock - associated minisock (accessed via dccp_msk)
  * @dccps_hc_rx_ackvec - rx half connection ack vector
- * @dccps_hc_rx_ccid -
- * @dccps_hc_tx_ccid -
- * @dccps_options_received -
- * @dccps_epoch -
- * @dccps_role - Role of this sock, one of %dccp_role
- * @dccps_hc_rx_insert_options -
- * @dccps_hc_tx_insert_options -
+ * @dccps_hc_rx_ccid - CCID used for the receiver (or receiving half-connection)
+ * @dccps_hc_tx_ccid - CCID used for the sender (or sending half-connection)
+ * @dccps_options_received - parsed set of retrieved options
+ * @dccps_role - role of this sock, one of %dccp_role
+ * @dccps_hc_rx_insert_options - receiver wants to add options when acking
+ * @dccps_hc_tx_insert_options - sender wants to add options when sending
  * @dccps_xmit_timer - timer for when CCID is not ready to send
+ * @dccps_syn_rtt - RTT sample from Request/Response exchange (in usecs)
  */
 struct dccp_sock {
        /* inet_connection_sock has to be the first member of dccp_sock */
        struct inet_connection_sock     dccps_inet_connection;
+#define dccps_syn_rtt                  dccps_inet_connection.icsk_ack.lrcvtime
        __u64                           dccps_swl;
        __u64                           dccps_swh;
        __u64                           dccps_awl;
index 8270aac..87b606b 100644 (file)
@@ -5,8 +5,13 @@
 #include <linux/rtnetlink.h>
 
 /* rule is permanent, and cannot be deleted */
-#define FIB_RULE_PERMANENT     1
-#define FIB_RULE_INVERT                2
+#define FIB_RULE_PERMANENT     0x00000001
+#define FIB_RULE_INVERT                0x00000002
+#define FIB_RULE_UNRESOLVED    0x00000004
+#define FIB_RULE_DEV_DETACHED  0x00000008
+
+/* try to find source address in routing lookups */
+#define FIB_RULE_FIND_SADDR    0x00010000
 
 struct fib_rule_hdr
 {
@@ -29,7 +34,7 @@ enum
        FRA_DST,        /* destination address */
        FRA_SRC,        /* source address */
        FRA_IFNAME,     /* interface name */
-       FRA_UNUSED1,
+       FRA_GOTO,       /* target to jump to (FR_ACT_GOTO) */
        FRA_UNUSED2,
        FRA_PRIORITY,   /* priority/preference */
        FRA_UNUSED3,
@@ -51,8 +56,8 @@ enum
 {
        FR_ACT_UNSPEC,
        FR_ACT_TO_TBL,          /* Pass to fixed table */
-       FR_ACT_RES1,
-       FR_ACT_RES2,
+       FR_ACT_GOTO,            /* Jump to another rule */
+       FR_ACT_NOP,             /* No operation */
        FR_ACT_RES3,
        FR_ACT_RES4,
        FR_ACT_BLACKHOLE,       /* Drop without notification */
index d4b3339..0fe562a 100644 (file)
@@ -132,8 +132,8 @@ static __inline__ __be16 hdlc_type_trans(struct sk_buff *skb,
 {
        hdlc_device *hdlc = dev_to_hdlc(dev);
 
-       skb->mac.raw  = skb->data;
-       skb->dev      = dev;
+       skb->dev = dev;
+       skb_reset_mac_header(skb);
 
        if (hdlc->proto->type_trans)
                return hdlc->proto->type_trans(skb, dev);
index 24da4fb..474f2a5 100644 (file)
@@ -82,6 +82,15 @@ struct icmphdr {
   } un;
 };
 
+#ifdef __KERNEL__
+#include <linux/skbuff.h>
+
+static inline struct icmphdr *icmp_hdr(const struct sk_buff *skb)
+{
+       return (struct icmphdr *)skb_transport_header(skb);
+}
+#endif
+
 /*
  *     constants for (set|get)sockopt
  */
index 68d3526..7c5e981 100644 (file)
@@ -75,6 +75,15 @@ struct icmp6hdr {
 #define icmp6_router_pref      icmp6_dataun.u_nd_ra.router_pref
 };
 
+#ifdef __KERNEL__
+#include <linux/skbuff.h>
+
+static inline struct icmp6hdr *icmp6_hdr(const struct sk_buff *skb)
+{
+       return (struct icmp6hdr *)skb_transport_header(skb);
+}
+#endif
+
 #define ICMPV6_ROUTER_PREF_LOW         0x3
 #define ICMPV6_ROUTER_PREF_MEDIUM      0x0
 #define ICMPV6_ROUTER_PREF_HIGH                0x1
index d557e4c..43f3bed 100644 (file)
@@ -39,6 +39,7 @@ enum
 #define IFA_F_TEMPORARY                IFA_F_SECONDARY
 
 #define        IFA_F_NODAD             0x02
+#define IFA_F_OPTIMISTIC       0x04
 #define        IFA_F_HOMEADDRESS       0x10
 #define IFA_F_DEPRECATED       0x20
 #define IFA_F_TENTATIVE                0x40
index 7f57142..ed7b93c 100644 (file)
@@ -148,4 +148,13 @@ struct arphdr
 
 };
 
+#ifdef __KERNEL__
+#include <linux/skbuff.h>
+
+static inline struct arphdr *arp_hdr(const struct sk_buff *skb)
+{
+       return (struct arphdr *)skb_network_header(skb);
+}
+#endif
+
 #endif /* _LINUX_IF_ARP_H */
index fd1b6eb..4ff211d 100644 (file)
@@ -105,7 +105,8 @@ struct __fdb_entry
 #include <linux/netdevice.h>
 
 extern void brioctl_set(int (*ioctl_hook)(unsigned int, void __user *));
-extern int (*br_handle_frame_hook)(struct net_bridge_port *p, struct sk_buff **pskb);
+extern struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p,
+                                              struct sk_buff *skb);
 extern int (*br_should_route_hook)(struct sk_buff **pskb);
 
 #endif
index ab08f35..1db774c 100644 (file)
@@ -61,6 +61,7 @@
 #define ETH_P_8021Q    0x8100          /* 802.1Q VLAN Extended Header  */
 #define ETH_P_IPX      0x8137          /* IPX over DIX                 */
 #define ETH_P_IPV6     0x86DD          /* IPv6 over bluebook           */
+#define ETH_P_PAUSE    0x8808          /* IEEE Pause frames. See 802.3 31B */
 #define ETH_P_SLOW     0x8809          /* Slow Protocol. See 802.3ad 43B */
 #define ETH_P_WCCP     0x883E          /* Web-cache coordination protocol
                                         * defined in draft-wilson-wrec-wccp-v2-00.txt */
@@ -112,7 +113,7 @@ struct ethhdr {
 
 static inline struct ethhdr *eth_hdr(const struct sk_buff *skb)
 {
-       return (struct ethhdr *)skb->mac.raw;
+       return (struct ethhdr *)skb_mac_header(skb);
 }
 
 #ifdef CONFIG_SYSCTL
index 35ed3b5..604c243 100644 (file)
@@ -126,6 +126,7 @@ enum
        IFLA_INET6_STATS,       /* statistics                   */
        IFLA_INET6_MCAST,       /* MC things. What of them?     */
        IFLA_INET6_CACHEINFO,   /* time values and max reasm size */
+       IFLA_INET6_ICMP6STATS,  /* statistics (icmpv6)          */
        __IFLA_INET6_MAX
 };
 
index f3de05c..ad09609 100644 (file)
@@ -42,6 +42,7 @@ struct sockaddr_ll
 #define PACKET_STATISTICS              6
 #define PACKET_COPY_THRESH             7
 #define PACKET_AUXDATA                 8
+#define PACKET_ORIGDEV                 9
 
 struct tpacket_stats
 {
index e33ee76..6f987be 100644 (file)
@@ -111,7 +111,17 @@ struct pppoe_hdr {
        struct pppoe_tag tag[0];
 } __attribute__ ((packed));
 
+/* Length of entire PPPoE + PPP header */
+#define PPPOE_SES_HLEN 8
+
 #ifdef __KERNEL__
+#include <linux/skbuff.h>
+
+static inline struct pppoe_hdr *pppoe_hdr(const struct sk_buff *skb)
+{
+       return (struct pppoe_hdr *)skb_network_header(skb);
+}
+
 struct pppoe_opt {
        struct net_device      *dev;      /* device associated with socket*/
        int                     ifindex;  /* ifindex of device associated with socket */
index 2f94cf2..046e9d9 100644 (file)
@@ -47,7 +47,7 @@ struct trh_hdr {
 
 static inline struct trh_hdr *tr_hdr(const struct sk_buff *skb)
 {
-       return (struct trh_hdr *)skb->mac.raw;
+       return (struct trh_hdr *)skb_mac_header(skb);
 }
 #ifdef CONFIG_SYSCTL
 extern struct ctl_table tr_table[];
index d103580..81e9bc9 100644 (file)
@@ -51,7 +51,7 @@ struct vlan_ethhdr {
 
 static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb)
 {
-       return (struct vlan_ethhdr *)skb->mac.raw;
+       return (struct vlan_ethhdr *)skb_mac_header(skb);
 }
 
 struct vlan_hdr {
@@ -275,8 +275,8 @@ static inline struct sk_buff *__vlan_put_tag(struct sk_buff *skb, unsigned short
        veth->h_vlan_TCI = htons(tag);
 
        skb->protocol = __constant_htons(ETH_P_8021Q);
-       skb->mac.raw -= VLAN_HLEN;
-       skb->nh.raw -= VLAN_HLEN;
+       skb->mac_header -= VLAN_HLEN;
+       skb->network_header -= VLAN_HLEN;
 
        return skb;
 }
diff --git a/include/linux/if_wanpipe_common.h b/include/linux/if_wanpipe_common.h
deleted file mode 100644 (file)
index 6e5461d..0000000
+++ /dev/null
@@ -1,58 +0,0 @@
-/*****************************************************************************
-* if_wanipe_common.h   Sangoma Driver/Socket common area definitions.
-*
-* Author:       Nenad Corbic <ncorbic@sangoma.com>
-*
-* Copyright:    (c) 2000 Sangoma Technologies Inc.
-*
-*               This program is free software; you can redistribute it and/or
-*               modify it under the terms of the GNU General Public License
-*               as published by the Free Software Foundation; either version
-*               2 of the License, or (at your option) any later version.
-* ============================================================================
-* Jan 13, 2000  Nenad Corbic      Initial version
-*****************************************************************************/
-
-
-#ifndef _WANPIPE_SOCK_DRIVER_COMMON_H
-#define _WANPIPE_SOCK_DRIVER_COMMON_H
-
-typedef struct {
-       struct net_device *slave;
-       atomic_t packet_sent;
-       atomic_t receive_block;
-       atomic_t command;
-       atomic_t disconnect;
-       atomic_t driver_busy;
-       long common_critical;
-       struct timer_list *tx_timer;
-       struct sock *sk;                /* Wanpipe Sock bind's here */ 
-       int (*func)(struct sk_buff *skb, struct net_device *dev, 
-                   struct sock *sk);
-
-       struct work_struct wanpipe_work;    /* deferred keventd work */
-       unsigned char rw_bind;                    /* Sock bind state */
-       unsigned char usedby;
-       unsigned char state;
-       unsigned char svc;
-       unsigned short lcn;
-       void *mbox;
-} wanpipe_common_t;
-
-
-enum {
-       WANSOCK_UNCONFIGURED,   /* link/channel is not configured */
-       WANSOCK_DISCONNECTED,   /* link/channel is disconnected */
-       WANSOCK_CONNECTING,             /* connection is in progress */
-       WANSOCK_CONNECTED,              /* link/channel is operational */
-       WANSOCK_LIMIT,          /* for verification only */
-       WANSOCK_DUALPORT,               /* for Dual Port cards */
-       WANSOCK_DISCONNECTING,
-       WANSOCK_BINDED,
-       WANSOCK_BIND_LISTEN,
-       WANSOCK_LISTEN
-};
-
-#endif
-
-
index a113fe6..f510e7e 100644 (file)
@@ -80,6 +80,27 @@ struct igmpv3_query {
        __be32 srcs[0];
 };
 
+#ifdef __KERNEL__
+#include <linux/skbuff.h>
+
+static inline struct igmphdr *igmp_hdr(const struct sk_buff *skb)
+{
+       return (struct igmphdr *)skb_transport_header(skb);
+}
+
+static inline struct igmpv3_report *
+                       igmpv3_report_hdr(const struct sk_buff *skb)
+{
+       return (struct igmpv3_report *)skb_transport_header(skb);
+}
+
+static inline struct igmpv3_query *
+                       igmpv3_query_hdr(const struct sk_buff *skb)
+{
+       return (struct igmpv3_query *)skb_transport_header(skb);
+}
+#endif
+
 #define IGMP_HOST_MEMBERSHIP_QUERY     0x11    /* From RFC1112 */
 #define IGMP_HOST_MEMBERSHIP_REPORT    0x12    /* Ditto */
 #define IGMP_DVMRP                     0x13    /* DVMRP routing */
index 1912e7c..3975cbf 100644 (file)
@@ -83,6 +83,7 @@ struct in_addr {
 #define IP_PMTUDISC_DONT               0       /* Never send DF frames */
 #define IP_PMTUDISC_WANT               1       /* Use per route hints  */
 #define IP_PMTUDISC_DO                 2       /* Always DF            */
+#define IP_PMTUDISC_PROBE              3       /* Ignore dst pmtu      */
 
 #define IP_MULTICAST_IF                        32
 #define IP_MULTICAST_TTL               33
index 4e8350a..2a61c82 100644 (file)
@@ -44,10 +44,8 @@ struct in6_addr
  * NOTE: Be aware the IN6ADDR_* constants and in6addr_* externals are defined
  * in network byte order, not in host byte order as are the IPv4 equivalents
  */
-#if 0
 extern const struct in6_addr in6addr_any;
 #define IN6ADDR_ANY_INIT { { { 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 } } }
-#endif
 extern const struct in6_addr in6addr_loopback;
 #define IN6ADDR_LOOPBACK_INIT { { { 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1 } } }
 
@@ -179,6 +177,7 @@ struct in6_flowlabel_req
 #define IPV6_PMTUDISC_DONT             0
 #define IPV6_PMTUDISC_WANT             1
 #define IPV6_PMTUDISC_DO               2
+#define IPV6_PMTUDISC_PROBE            3
 
 /* Flowlabel */
 #define IPV6_FLOWLABEL_MGR     32
index 1d36b97..bd0a2a8 100644 (file)
@@ -104,6 +104,20 @@ struct iphdr {
        /*The options start here. */
 };
 
+#ifdef __KERNEL__
+#include <linux/skbuff.h>
+
+static inline struct iphdr *ip_hdr(const struct sk_buff *skb)
+{
+       return (struct iphdr *)skb_network_header(skb);
+}
+
+static inline struct iphdr *ipip_hdr(const struct sk_buff *skb)
+{
+       return (struct iphdr *)skb_transport_header(skb);
+}
+#endif
+
 struct ip_auth_hdr {
        __u8  nexthdr;
        __u8  hdrlen;           /* This one is measured in 32 bit units! */
index f824113..09ea01a 100644 (file)
@@ -177,6 +177,10 @@ struct ipv6_devconf {
 #endif
 #endif
        __s32           proxy_ndp;
+       __s32           accept_source_route;
+#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
+       __s32           optimistic_dad;
+#endif
        void            *sysctl;
 };
 
@@ -205,6 +209,9 @@ enum {
        DEVCONF_RTR_PROBE_INTERVAL,
        DEVCONF_ACCEPT_RA_RT_INFO_MAX_PLEN,
        DEVCONF_PROXY_NDP,
+       __DEVCONF_OPTIMISTIC_DAD,
+       DEVCONF_ACCEPT_SOURCE_ROUTE,
+       DEVCONF_OPTIMISTIC_DAD,
        DEVCONF_MAX
 };
 
@@ -216,6 +223,16 @@ enum {
 #include <net/if_inet6.h>       /* struct ipv6_mc_socklist */
 #include <net/inet_sock.h>
 
+static inline struct ipv6hdr *ipv6_hdr(const struct sk_buff *skb)
+{
+       return (struct ipv6hdr *)skb_network_header(skb);
+}
+
+static inline struct ipv6hdr *ipipv6_hdr(const struct sk_buff *skb)
+{
+       return (struct ipv6hdr *)skb_transport_header(skb);
+}
+
 /* 
    This structure contains results of exthdrs parsing
    as offsets from skb->nh.
index 82c7ae4..2a2f99f 100644 (file)
@@ -84,7 +84,7 @@ static inline u32 jhash(const void *key, u32 length, u32 initval)
 /* A special optimized version that handles 1 or more of u32s.
  * The length parameter here is the number of u32s in the key.
  */
-static inline u32 jhash2(u32 *k, u32 length, u32 initval)
+static inline u32 jhash2(const u32 *k, u32 length, u32 initval)
 {
        u32 a, b, c, len;
 
index 169f05e..a9220e7 100644 (file)
@@ -160,6 +160,8 @@ struct key {
         */
        union {
                struct list_head        link;
+               unsigned long           x[2];
+               void                    *p[2];
        } type_data;
 
        /* key data
index 248305b..81bb9c7 100644 (file)
@@ -259,6 +259,12 @@ static inline s64 ktime_to_ns(const ktime_t kt)
 
 #endif
 
+static inline s64 ktime_to_us(const ktime_t kt)
+{
+       struct timeval tv = ktime_to_timeval(kt);
+       return (s64) tv.tv_sec * USEC_PER_SEC + tv.tv_usec;
+}
+
 /*
  * The resolution of the clocks. The resolution value is returned in
  * the clock_getres() system call to give application programmers an
index 4db21e6..efc4517 100644 (file)
@@ -24,7 +24,7 @@
 struct poll_table_struct;
 struct inode;
 
-#define NPROTO         33              /* should be enough for now..   */
+#define NPROTO         34              /* should be enough for now..   */
 
 #define SYS_SOCKET     1               /* sys_socket(2)                */
 #define SYS_BIND       2               /* sys_bind(2)                  */
index 1a52854..e027a37 100644 (file)
@@ -42,6 +42,8 @@
 struct vlan_group;
 struct ethtool_ops;
 struct netpoll_info;
+/* 802.11 specific */
+struct wireless_dev;
                                        /* source back-compat hooks */
 #define SET_ETHTOOL_OPS(netdev,ops) \
        ( (netdev)->ethtool_ops = (ops) )
@@ -323,6 +325,7 @@ struct net_device
 #define NETIF_F_VLAN_CHALLENGED        1024    /* Device cannot handle VLAN packets */
 #define NETIF_F_GSO            2048    /* Enable software GSO. */
 #define NETIF_F_LLTX           4096    /* LockLess TX */
+#define NETIF_F_INTERNAL_STATS 8192    /* Use stats structure in net_device */
 
        /* Segmentation offload features */
 #define NETIF_F_GSO_SHIFT      16
@@ -347,13 +350,15 @@ struct net_device
 
 
        struct net_device_stats* (*get_stats)(struct net_device *dev);
+       struct net_device_stats stats;
 
+#ifdef CONFIG_WIRELESS_EXT
        /* List of functions to handle Wireless Extensions (instead of ioctl).
         * See <net/iw_handler.h> for details. Jean II */
        const struct iw_handler_def *   wireless_handlers;
        /* Instance data managed by the core of Wireless Extensions. */
        struct iw_public_data * wireless_data;
-
+#endif
        const struct ethtool_ops *ethtool_ops;
 
        /*
@@ -398,6 +403,8 @@ struct net_device
        void                    *ip6_ptr;       /* IPv6 specific data */
        void                    *ec_ptr;        /* Econet specific data */
        void                    *ax25_ptr;      /* AX.25 specific data */
+       struct wireless_dev     *ieee80211_ptr; /* IEEE 802.11 specific data,
+                                                  assign before registering */
 
 /*
  * Cache line mostly used on receive path (including eth_type_trans())
index 70d3b4f..10b5c62 100644 (file)
@@ -281,9 +281,6 @@ extern void nf_reinject(struct sk_buff *skb,
                        struct nf_info *info,
                        unsigned int verdict);
 
-extern void (*ip_ct_attach)(struct sk_buff *, struct sk_buff *);
-extern void nf_ct_attach(struct sk_buff *, struct sk_buff *);
-
 /* FIXME: Before cache is ever used, this must be implemented for real. */
 extern void nf_invalidate_cache(int pf);
 
@@ -388,11 +385,18 @@ static inline int nf_hook(int pf, unsigned int hook, struct sk_buff **pskb,
 {
        return 1;
 }
-static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {}
 struct flowi;
 static inline void
 nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, int family) {}
 #endif /*CONFIG_NETFILTER*/
 
+#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+extern void (*ip_ct_attach)(struct sk_buff *, struct sk_buff *);
+extern void nf_ct_attach(struct sk_buff *, struct sk_buff *);
+extern void (*nf_ct_destroy)(struct nf_conntrack *);
+#else
+static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {}
+#endif
+
 #endif /*__KERNEL__*/
 #endif /*__LINUX_NETFILTER_H*/
index 007af4c..22ce299 100644 (file)
@@ -30,6 +30,11 @@ enum tcp_conntrack {
 /* Be liberal in window checking */
 #define IP_CT_TCP_FLAG_BE_LIBERAL              0x08
 
+struct nf_ct_tcp_flags {
+       u_int8_t flags;
+       u_int8_t mask;
+};
+
 #ifdef __KERNEL__
 
 struct ip_ct_tcp_state {
index 1e9c821..0f9311d 100644 (file)
@@ -62,11 +62,11 @@ struct nfattr
 #define NFA_DATA(nfa)   ((void *)(((char *)(nfa)) + NFA_LENGTH(0)))
 #define NFA_PAYLOAD(nfa) ((int)((nfa)->nfa_len) - NFA_LENGTH(0))
 #define NFA_NEST(skb, type) \
-({     struct nfattr *__start = (struct nfattr *) (skb)->tail; \
+({     struct nfattr *__start = (struct nfattr *)skb_tail_pointer(skb); \
        NFA_PUT(skb, (NFNL_NFA_NEST | type), 0, NULL); \
        __start;  })
 #define NFA_NEST_END(skb, start) \
-({      (start)->nfa_len = ((skb)->tail - (unsigned char *) (start)); \
+({      (start)->nfa_len = skb_tail_pointer(skb) - (unsigned char *)(start); \
         (skb)->len; })
 #define NFA_NEST_CANCEL(skb, start) \
 ({      if (start) \
@@ -111,7 +111,7 @@ struct nfgenmsg {
 struct nfnl_callback
 {
        int (*call)(struct sock *nl, struct sk_buff *skb, 
-               struct nlmsghdr *nlh, struct nfattr *cda[], int *errp);
+               struct nlmsghdr *nlh, struct nfattr *cda[]);
        u_int16_t attr_count;   /* number of nfattr's */
 };
 
@@ -129,19 +129,6 @@ extern void __nfa_fill(struct sk_buff *skb, int attrtype,
 ({ if (skb_tailroom(skb) < (int)NFA_SPACE(attrlen)) goto nfattr_failure; \
    __nfa_fill(skb, attrtype, attrlen, data); })
 
-extern struct semaphore nfnl_sem;
-
-#define nfnl_shlock()          down(&nfnl_sem)
-#define nfnl_shlock_nowait()   down_trylock(&nfnl_sem)
-
-#define nfnl_shunlock()                do { up(&nfnl_sem); \
-                                    if(nfnl && nfnl->sk_receive_queue.qlen) \
-                                           nfnl->sk_data_ready(nfnl, 0); \
-                               } while(0)
-
-extern void nfnl_lock(void);
-extern void nfnl_unlock(void);
-
 extern int nfnetlink_subsys_register(struct nfnetlink_subsystem *n);
 extern int nfnetlink_subsys_unregister(struct nfnetlink_subsystem *n);
 
index b5883cc..d7c3503 100644 (file)
@@ -83,6 +83,10 @@ enum ctattr_protoinfo {
 enum ctattr_protoinfo_tcp {
        CTA_PROTOINFO_TCP_UNSPEC,
        CTA_PROTOINFO_TCP_STATE,
+       CTA_PROTOINFO_TCP_WSCALE_ORIGINAL,
+       CTA_PROTOINFO_TCP_WSCALE_REPLY,
+       CTA_PROTOINFO_TCP_FLAGS_ORIGINAL,
+       CTA_PROTOINFO_TCP_FLAGS_REPLY,
        __CTA_PROTOINFO_TCP_MAX
 };
 #define CTA_PROTOINFO_TCP_MAX (__CTA_PROTOINFO_TCP_MAX - 1)
index 55689f3..1906003 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/netfilter.h>
 #include <linux/if_ether.h>
 #include <linux/if_vlan.h>
+#include <linux/if_pppox.h>
 
 /* Bridge Hooks */
 /* After promisc drops, checksum checks. */
@@ -58,8 +59,14 @@ static inline int nf_bridge_maybe_copy_header(struct sk_buff *skb)
  * enough room for the encapsulating header (if there is one). */
 static inline int nf_bridge_pad(const struct sk_buff *skb)
 {
-       return (skb->nf_bridge && skb->protocol == htons(ETH_P_8021Q))
-               ? VLAN_HLEN : 0;
+       int padding = 0;
+
+       if (skb->nf_bridge && skb->protocol == htons(ETH_P_8021Q))
+               padding = VLAN_HLEN;
+       else if (skb->nf_bridge && skb->protocol == htons(ETH_P_PPP_SES))
+               padding = PPPOE_SES_HLEN;
+
+       return padding;
 }
 
 struct bridge_skb_cb {
index 07f044f..a11b0c2 100644 (file)
@@ -54,7 +54,7 @@ struct ebt_802_3_hdr {
 
 static inline struct ebt_802_3_hdr *ebt_802_3_hdr(const struct sk_buff *skb)
 {
-       return (struct ebt_802_3_hdr *)skb->mac.raw;
+       return (struct ebt_802_3_hdr *)skb_mac_header(skb);
 }
 #endif
 
index 97e4dbd..cbf4843 100644 (file)
@@ -8,8 +8,10 @@
 #define EBT_ARP_DST_IP 0x10
 #define EBT_ARP_SRC_MAC 0x20
 #define EBT_ARP_DST_MAC 0x40
+#define EBT_ARP_GRAT 0x80
 #define EBT_ARP_MASK (EBT_ARP_OPCODE | EBT_ARP_HTYPE | EBT_ARP_PTYPE | \
-   EBT_ARP_SRC_IP | EBT_ARP_DST_IP | EBT_ARP_SRC_MAC | EBT_ARP_DST_MAC)
+   EBT_ARP_SRC_IP | EBT_ARP_DST_IP | EBT_ARP_SRC_MAC | EBT_ARP_DST_MAC | \
+   EBT_ARP_GRAT)
 #define EBT_ARP_MATCH "arp"
 
 struct ebt_arp_info
index 1803378..7185792 100644 (file)
@@ -1,9 +1,3 @@
-header-y += ip_conntrack_helper.h
-header-y += ip_conntrack_protocol.h
-header-y += ip_conntrack_sctp.h
-header-y += ip_conntrack_tcp.h
-header-y += ip_conntrack_tftp.h
-header-y += ip_nat_pptp.h
 header-y += ipt_addrtype.h
 header-y += ipt_ah.h
 header-y += ipt_CLASSIFY.h
@@ -49,13 +43,5 @@ header-y += ipt_ttl.h
 header-y += ipt_TTL.h
 header-y += ipt_ULOG.h
 
-unifdef-y += ip_conntrack.h
-unifdef-y += ip_conntrack_h323.h
-unifdef-y += ip_conntrack_irc.h
-unifdef-y += ip_conntrack_pptp.h
-unifdef-y += ip_conntrack_proto_gre.h
-unifdef-y += ip_conntrack_tuple.h
-unifdef-y += ip_nat.h
-unifdef-y += ip_nat_rule.h
 unifdef-y += ip_queue.h
 unifdef-y += ip_tables.h
diff --git a/include/linux/netfilter_ipv4/ip_conntrack.h b/include/linux/netfilter_ipv4/ip_conntrack.h
deleted file mode 100644 (file)
index da9274e..0000000
+++ /dev/null
@@ -1,402 +0,0 @@
-#ifndef _IP_CONNTRACK_H
-#define _IP_CONNTRACK_H
-
-#include <linux/netfilter/nf_conntrack_common.h>
-
-#ifdef __KERNEL__
-#include <linux/netfilter_ipv4/ip_conntrack_tuple.h>
-#include <linux/bitops.h>
-#include <linux/compiler.h>
-#include <asm/atomic.h>
-
-#include <linux/timer.h>
-#include <linux/netfilter_ipv4/ip_conntrack_tcp.h>
-#include <linux/netfilter_ipv4/ip_conntrack_icmp.h>
-#include <linux/netfilter_ipv4/ip_conntrack_proto_gre.h>
-#include <linux/netfilter_ipv4/ip_conntrack_sctp.h>
-
-/* per conntrack: protocol private data */
-union ip_conntrack_proto {
-       /* insert conntrack proto private data here */
-       struct ip_ct_gre gre;
-       struct ip_ct_sctp sctp;
-       struct ip_ct_tcp tcp;
-       struct ip_ct_icmp icmp;
-};
-
-union ip_conntrack_expect_proto {
-       /* insert expect proto private data here */
-};
-
-/* Add protocol helper include file here */
-#include <linux/netfilter_ipv4/ip_conntrack_h323.h>
-#include <linux/netfilter_ipv4/ip_conntrack_pptp.h>
-#include <linux/netfilter_ipv4/ip_conntrack_amanda.h>
-#include <linux/netfilter_ipv4/ip_conntrack_ftp.h>
-#include <linux/netfilter_ipv4/ip_conntrack_irc.h>
-
-/* per conntrack: application helper private data */
-union ip_conntrack_help {
-       /* insert conntrack helper private data (master) here */
-       struct ip_ct_h323_master ct_h323_info;
-       struct ip_ct_pptp_master ct_pptp_info;
-       struct ip_ct_ftp_master ct_ftp_info;
-       struct ip_ct_irc_master ct_irc_info;
-};
-
-#ifdef CONFIG_IP_NF_NAT_NEEDED
-#include <linux/netfilter_ipv4/ip_nat.h>
-#include <linux/netfilter_ipv4/ip_nat_pptp.h>
-
-/* per conntrack: nat application helper private data */
-union ip_conntrack_nat_help {
-       /* insert nat helper private data here */
-       struct ip_nat_pptp nat_pptp_info;
-};
-#endif
-
-#include <linux/types.h>
-#include <linux/skbuff.h>
-
-#ifdef CONFIG_NETFILTER_DEBUG
-#define IP_NF_ASSERT(x)                                                        \
-do {                                                                   \
-       if (!(x))                                                       \
-               /* Wooah!  I'm tripping my conntrack in a frenzy of     \
-                  netplay... */                                        \
-               printk("NF_IP_ASSERT: %s:%i(%s)\n",                     \
-                      __FILE__, __LINE__, __FUNCTION__);               \
-} while(0)
-#else
-#define IP_NF_ASSERT(x)
-#endif
-
-struct ip_conntrack_helper;
-
-struct ip_conntrack
-{
-       /* Usage count in here is 1 for hash table/destruct timer, 1 per skb,
-           plus 1 for any connection(s) we are `master' for */
-       struct nf_conntrack ct_general;
-
-       /* Have we seen traffic both ways yet? (bitset) */
-       unsigned long status;
-
-       /* Timer function; drops refcnt when it goes off. */
-       struct timer_list timeout;
-
-#ifdef CONFIG_IP_NF_CT_ACCT
-       /* Accounting Information (same cache line as other written members) */
-       struct ip_conntrack_counter counters[IP_CT_DIR_MAX];
-#endif
-       /* If we were expected by an expectation, this will be it */
-       struct ip_conntrack *master;
-
-       /* Current number of expected connections */
-       unsigned int expecting;
-
-       /* Unique ID that identifies this conntrack*/
-       unsigned int id;
-
-       /* Helper, if any. */
-       struct ip_conntrack_helper *helper;
-
-       /* Storage reserved for other modules: */
-       union ip_conntrack_proto proto;
-
-       union ip_conntrack_help help;
-
-#ifdef CONFIG_IP_NF_NAT_NEEDED
-       struct {
-               struct ip_nat_info info;
-               union ip_conntrack_nat_help help;
-#if defined(CONFIG_IP_NF_TARGET_MASQUERADE) || \
-       defined(CONFIG_IP_NF_TARGET_MASQUERADE_MODULE)
-               int masq_index;
-#endif
-       } nat;
-#endif /* CONFIG_IP_NF_NAT_NEEDED */
-
-#if defined(CONFIG_IP_NF_CONNTRACK_MARK)
-       u_int32_t mark;
-#endif
-
-#ifdef CONFIG_IP_NF_CONNTRACK_SECMARK
-       u_int32_t secmark;
-#endif
-
-       /* Traversed often, so hopefully in different cacheline to top */
-       /* These are my tuples; original and reply */
-       struct ip_conntrack_tuple_hash tuplehash[IP_CT_DIR_MAX];
-};
-
-struct ip_conntrack_expect
-{
-       /* Internal linked list (global expectation list) */
-       struct list_head list;
-
-       /* We expect this tuple, with the following mask */
-       struct ip_conntrack_tuple tuple, mask;
-       /* Function to call after setup and insertion */
-       void (*expectfn)(struct ip_conntrack *new,
-                        struct ip_conntrack_expect *this);
-
-       /* The conntrack of the master connection */
-       struct ip_conntrack *master;
-
-       /* Timer function; deletes the expectation. */
-       struct timer_list timeout;
-
-       /* Usage count. */
-       atomic_t use;
-
-       /* Unique ID */
-       unsigned int id;
-
-       /* Flags */
-       unsigned int flags;
-
-#ifdef CONFIG_IP_NF_NAT_NEEDED
-       __be32 saved_ip;
-       /* This is the original per-proto part, used to map the
-        * expected connection the way the recipient expects. */
-       union ip_conntrack_manip_proto saved_proto;
-       /* Direction relative to the master connection. */
-       enum ip_conntrack_dir dir;
-#endif
-};
-
-#define IP_CT_EXPECT_PERMANENT 0x1
-
-static inline struct ip_conntrack *
-tuplehash_to_ctrack(const struct ip_conntrack_tuple_hash *hash)
-{
-       return container_of(hash, struct ip_conntrack,
-                           tuplehash[hash->tuple.dst.dir]);
-}
-
-/* get master conntrack via master expectation */
-#define master_ct(conntr) (conntr->master)
-
-/* Alter reply tuple (maybe alter helper). */
-extern void
-ip_conntrack_alter_reply(struct ip_conntrack *conntrack,
-                        const struct ip_conntrack_tuple *newreply);
-
-/* Is this tuple taken? (ignoring any belonging to the given
-   conntrack). */
-extern int
-ip_conntrack_tuple_taken(const struct ip_conntrack_tuple *tuple,
-                        const struct ip_conntrack *ignored_conntrack);
-
-/* Return conntrack_info and tuple hash for given skb. */
-static inline struct ip_conntrack *
-ip_conntrack_get(const struct sk_buff *skb, enum ip_conntrack_info *ctinfo)
-{
-       *ctinfo = skb->nfctinfo;
-       return (struct ip_conntrack *)skb->nfct;
-}
-
-/* decrement reference count on a conntrack */
-static inline void
-ip_conntrack_put(struct ip_conntrack *ct)
-{
-       IP_NF_ASSERT(ct);
-       nf_conntrack_put(&ct->ct_general);
-}
-
-extern int invert_tuplepr(struct ip_conntrack_tuple *inverse,
-                         const struct ip_conntrack_tuple *orig);
-
-extern void __ip_ct_refresh_acct(struct ip_conntrack *ct,
-                                enum ip_conntrack_info ctinfo,
-                                const struct sk_buff *skb,
-                                unsigned long extra_jiffies,
-                                int do_acct);
-
-/* Refresh conntrack for this many jiffies and do accounting */
-static inline void ip_ct_refresh_acct(struct ip_conntrack *ct, 
-                                     enum ip_conntrack_info ctinfo,
-                                     const struct sk_buff *skb,
-                                     unsigned long extra_jiffies)
-{
-       __ip_ct_refresh_acct(ct, ctinfo, skb, extra_jiffies, 1);
-}
-
-/* Refresh conntrack for this many jiffies */
-static inline void ip_ct_refresh(struct ip_conntrack *ct,
-                                const struct sk_buff *skb,
-                                unsigned long extra_jiffies)
-{
-       __ip_ct_refresh_acct(ct, 0, skb, extra_jiffies, 0);
-}
-
-/* These are for NAT.  Icky. */
-/* Update TCP window tracking data when NAT mangles the packet */
-extern void ip_conntrack_tcp_update(struct sk_buff *skb,
-                                   struct ip_conntrack *conntrack,
-                                   enum ip_conntrack_dir dir);
-
-/* Call me when a conntrack is destroyed. */
-extern void (*ip_conntrack_destroyed)(struct ip_conntrack *conntrack);
-
-/* Fake conntrack entry for untracked connections */
-extern struct ip_conntrack ip_conntrack_untracked;
-
-/* Returns new sk_buff, or NULL */
-struct sk_buff *
-ip_ct_gather_frags(struct sk_buff *skb, u_int32_t user);
-
-/* Iterate over all conntracks: if iter returns true, it's deleted. */
-extern void
-ip_ct_iterate_cleanup(int (*iter)(struct ip_conntrack *i, void *data),
-                     void *data);
-
-extern struct ip_conntrack_helper *
-__ip_conntrack_helper_find_byname(const char *);
-extern struct ip_conntrack_helper *
-ip_conntrack_helper_find_get(const struct ip_conntrack_tuple *tuple);
-extern void ip_conntrack_helper_put(struct ip_conntrack_helper *helper);
-
-extern struct ip_conntrack_protocol *
-__ip_conntrack_proto_find(u_int8_t protocol);
-extern struct ip_conntrack_protocol *
-ip_conntrack_proto_find_get(u_int8_t protocol);
-extern void ip_conntrack_proto_put(struct ip_conntrack_protocol *proto);
-
-extern void ip_ct_remove_expectations(struct ip_conntrack *ct);
-
-extern struct ip_conntrack *ip_conntrack_alloc(struct ip_conntrack_tuple *,
-                                              struct ip_conntrack_tuple *);
-
-extern void ip_conntrack_free(struct ip_conntrack *ct);
-
-extern void ip_conntrack_hash_insert(struct ip_conntrack *ct);
-
-extern struct ip_conntrack_expect *
-__ip_conntrack_expect_find(const struct ip_conntrack_tuple *tuple);
-
-extern struct ip_conntrack_expect *
-ip_conntrack_expect_find_get(const struct ip_conntrack_tuple *tuple);
-
-extern struct ip_conntrack_tuple_hash *
-__ip_conntrack_find(const struct ip_conntrack_tuple *tuple,
-                    const struct ip_conntrack *ignored_conntrack);
-
-extern void ip_conntrack_flush(void);
-
-/* It's confirmed if it is, or has been in the hash table. */
-static inline int is_confirmed(struct ip_conntrack *ct)
-{
-       return test_bit(IPS_CONFIRMED_BIT, &ct->status);
-}
-
-static inline int is_dying(struct ip_conntrack *ct)
-{
-       return test_bit(IPS_DYING_BIT, &ct->status);
-}
-
-extern unsigned int ip_conntrack_htable_size;
-extern int ip_conntrack_checksum;
-#define CONNTRACK_STAT_INC(count) (__get_cpu_var(ip_conntrack_stat).count++)
-#define CONNTRACK_STAT_INC_ATOMIC(count)               \
-do {                                                   \
-       local_bh_disable();                             \
-       __get_cpu_var(ip_conntrack_stat).count++;       \
-       local_bh_enable();                              \
-} while (0)
-
-#ifdef CONFIG_IP_NF_CONNTRACK_EVENTS
-#include <linux/notifier.h>
-#include <linux/interrupt.h>
-
-struct ip_conntrack_ecache {
-       struct ip_conntrack *ct;
-       unsigned int events;
-};
-DECLARE_PER_CPU(struct ip_conntrack_ecache, ip_conntrack_ecache);
-
-#define CONNTRACK_ECACHE(x)    (__get_cpu_var(ip_conntrack_ecache).x)
-extern struct atomic_notifier_head ip_conntrack_chain;
-extern struct atomic_notifier_head ip_conntrack_expect_chain;
-
-static inline int ip_conntrack_register_notifier(struct notifier_block *nb)
-{
-       return atomic_notifier_chain_register(&ip_conntrack_chain, nb);
-}
-
-static inline int ip_conntrack_unregister_notifier(struct notifier_block *nb)
-{
-       return atomic_notifier_chain_unregister(&ip_conntrack_chain, nb);
-}
-
-static inline int 
-ip_conntrack_expect_register_notifier(struct notifier_block *nb)
-{
-       return atomic_notifier_chain_register(&ip_conntrack_expect_chain, nb);
-}
-
-static inline int
-ip_conntrack_expect_unregister_notifier(struct notifier_block *nb)
-{
-       return atomic_notifier_chain_unregister(&ip_conntrack_expect_chain,
-                       nb);
-}
-
-extern void ip_ct_deliver_cached_events(const struct ip_conntrack *ct);
-extern void __ip_ct_event_cache_init(struct ip_conntrack *ct);
-
-static inline void 
-ip_conntrack_event_cache(enum ip_conntrack_events event,
-                        const struct sk_buff *skb)
-{
-       struct ip_conntrack *ct = (struct ip_conntrack *)skb->nfct;
-       struct ip_conntrack_ecache *ecache;
-       
-       local_bh_disable();
-       ecache = &__get_cpu_var(ip_conntrack_ecache);
-       if (ct != ecache->ct)
-               __ip_ct_event_cache_init(ct);
-       ecache->events |= event;
-       local_bh_enable();
-}
-
-static inline void ip_conntrack_event(enum ip_conntrack_events event,
-                                     struct ip_conntrack *ct)
-{
-       if (is_confirmed(ct) && !is_dying(ct))
-               atomic_notifier_call_chain(&ip_conntrack_chain, event, ct);
-}
-
-static inline void 
-ip_conntrack_expect_event(enum ip_conntrack_expect_events event,
-                         struct ip_conntrack_expect *exp)
-{
-       atomic_notifier_call_chain(&ip_conntrack_expect_chain, event, exp);
-}
-#else /* CONFIG_IP_NF_CONNTRACK_EVENTS */
-static inline void ip_conntrack_event_cache(enum ip_conntrack_events event, 
-                                           const struct sk_buff *skb) {}
-static inline void ip_conntrack_event(enum ip_conntrack_events event, 
-                                     struct ip_conntrack *ct) {}
-static inline void ip_ct_deliver_cached_events(const struct ip_conntrack *ct) {}
-static inline void 
-ip_conntrack_expect_event(enum ip_conntrack_expect_events event, 
-                         struct ip_conntrack_expect *exp) {}
-#endif /* CONFIG_IP_NF_CONNTRACK_EVENTS */
-
-#ifdef CONFIG_IP_NF_NAT_NEEDED
-static inline int ip_nat_initialized(struct ip_conntrack *conntrack,
-                                    enum ip_nat_manip_type manip)
-{
-       if (manip == IP_NAT_MANIP_SRC)
-               return test_bit(IPS_SRC_NAT_DONE_BIT, &conntrack->status);
-       return test_bit(IPS_DST_NAT_DONE_BIT, &conntrack->status);
-}
-#endif /* CONFIG_IP_NF_NAT_NEEDED */
-
-#endif /* __KERNEL__ */
-#endif /* _IP_CONNTRACK_H */
diff --git a/include/linux/netfilter_ipv4/ip_conntrack_amanda.h b/include/linux/netfilter_ipv4/ip_conntrack_amanda.h
deleted file mode 100644 (file)
index de3e41f..0000000
+++ /dev/null
@@ -1,11 +0,0 @@
-#ifndef _IP_CONNTRACK_AMANDA_H
-#define _IP_CONNTRACK_AMANDA_H
-/* AMANDA tracking. */
-
-struct ip_conntrack_expect;
-extern unsigned int (*ip_nat_amanda_hook)(struct sk_buff **pskb,
-                                         enum ip_conntrack_info ctinfo,
-                                         unsigned int matchoff,
-                                         unsigned int matchlen,
-                                         struct ip_conntrack_expect *exp);
-#endif /* _IP_CONNTRACK_AMANDA_H */
diff --git a/include/linux/netfilter_ipv4/ip_conntrack_core.h b/include/linux/netfilter_ipv4/ip_conntrack_core.h
deleted file mode 100644 (file)
index e3a6df0..0000000
+++ /dev/null
@@ -1,61 +0,0 @@
-#ifndef _IP_CONNTRACK_CORE_H
-#define _IP_CONNTRACK_CORE_H
-#include <linux/netfilter.h>
-
-#define MAX_IP_CT_PROTO 256
-extern struct ip_conntrack_protocol *ip_ct_protos[MAX_IP_CT_PROTO];
-
-/* This header is used to share core functionality between the
-   standalone connection tracking module, and the compatibility layer's use
-   of connection tracking. */
-extern unsigned int ip_conntrack_in(unsigned int hooknum,
-                                   struct sk_buff **pskb,
-                                   const struct net_device *in,
-                                   const struct net_device *out,
-                                   int (*okfn)(struct sk_buff *));
-
-extern int ip_conntrack_init(void);
-extern void ip_conntrack_cleanup(void);
-
-struct ip_conntrack_protocol;
-
-extern int
-ip_ct_get_tuple(const struct iphdr *iph,
-               const struct sk_buff *skb,
-               unsigned int dataoff,
-               struct ip_conntrack_tuple *tuple,
-               const struct ip_conntrack_protocol *protocol);
-
-extern int
-ip_ct_invert_tuple(struct ip_conntrack_tuple *inverse,
-                  const struct ip_conntrack_tuple *orig,
-                  const struct ip_conntrack_protocol *protocol);
-
-/* Find a connection corresponding to a tuple. */
-struct ip_conntrack_tuple_hash *
-ip_conntrack_find_get(const struct ip_conntrack_tuple *tuple,
-                     const struct ip_conntrack *ignored_conntrack);
-
-extern int __ip_conntrack_confirm(struct sk_buff **pskb);
-
-/* Confirm a connection: returns NF_DROP if packet must be dropped. */
-static inline int ip_conntrack_confirm(struct sk_buff **pskb)
-{
-       struct ip_conntrack *ct = (struct ip_conntrack *)(*pskb)->nfct;
-       int ret = NF_ACCEPT;
-
-       if (ct) {
-               if (!is_confirmed(ct) && !is_dying(ct))
-                       ret = __ip_conntrack_confirm(pskb);
-               ip_ct_deliver_cached_events(ct);
-       }
-       return ret;
-}
-
-extern void ip_ct_unlink_expect(struct ip_conntrack_expect *exp);
-
-extern struct list_head *ip_conntrack_hash;
-extern struct list_head ip_conntrack_expect_list;
-extern rwlock_t ip_conntrack_lock;
-#endif /* _IP_CONNTRACK_CORE_H */
-
diff --git a/include/linux/netfilter_ipv4/ip_conntrack_ftp.h b/include/linux/netfilter_ipv4/ip_conntrack_ftp.h
deleted file mode 100644 (file)
index 2129fc3..0000000
+++ /dev/null
@@ -1,44 +0,0 @@
-#ifndef _IP_CONNTRACK_FTP_H
-#define _IP_CONNTRACK_FTP_H
-/* FTP tracking. */
-
-/* This enum is exposed to userspace */
-enum ip_ct_ftp_type
-{
-       /* PORT command from client */
-       IP_CT_FTP_PORT,
-       /* PASV response from server */
-       IP_CT_FTP_PASV,
-       /* EPRT command from client */
-       IP_CT_FTP_EPRT,
-       /* EPSV response from server */
-       IP_CT_FTP_EPSV,
-};
-
-#ifdef __KERNEL__
-
-#define FTP_PORT       21
-
-#define NUM_SEQ_TO_REMEMBER 2
-/* This structure exists only once per master */
-struct ip_ct_ftp_master {
-       /* Valid seq positions for cmd matching after newline */
-       u_int32_t seq_aft_nl[IP_CT_DIR_MAX][NUM_SEQ_TO_REMEMBER];
-       /* 0 means seq_match_aft_nl not set */
-       int seq_aft_nl_num[IP_CT_DIR_MAX];
-};
-
-struct ip_conntrack_expect;
-
-/* For NAT to hook in when we find a packet which describes what other
- * connection we should expect. */
-extern unsigned int (*ip_nat_ftp_hook)(struct sk_buff **pskb,
-                                      enum ip_conntrack_info ctinfo,
-                                      enum ip_ct_ftp_type type,
-                                      unsigned int matchoff,
-                                      unsigned int matchlen,
-                                      struct ip_conntrack_expect *exp,
-                                      u32 *seq);
-#endif /* __KERNEL__ */
-
-#endif /* _IP_CONNTRACK_FTP_H */
diff --git a/include/linux/netfilter_ipv4/ip_conntrack_h323.h b/include/linux/netfilter_ipv4/ip_conntrack_h323.h
deleted file mode 100644 (file)
index 18f7698..0000000
+++ /dev/null
@@ -1,89 +0,0 @@
-#ifndef _IP_CONNTRACK_H323_H
-#define _IP_CONNTRACK_H323_H
-
-#ifdef __KERNEL__
-
-#include <linux/netfilter/nf_conntrack_h323_asn1.h>
-
-#define RAS_PORT 1719
-#define Q931_PORT 1720
-#define H323_RTP_CHANNEL_MAX 4 /* Audio, video, FAX and other */
-
-/* This structure exists only once per master */
-struct ip_ct_h323_master {
-
-       /* Original and NATed Q.931 or H.245 signal ports */
-       u_int16_t sig_port[IP_CT_DIR_MAX];
-
-       /* Original and NATed RTP ports */
-       u_int16_t rtp_port[H323_RTP_CHANNEL_MAX][IP_CT_DIR_MAX];
-
-       union {
-               /* RAS connection timeout */
-               u_int32_t timeout;
-
-               /* Next TPKT length (for separate TPKT header and data) */
-               u_int16_t tpkt_len[IP_CT_DIR_MAX];
-       };
-};
-
-struct ip_conntrack_expect;
-
-extern int get_h225_addr(unsigned char *data, TransportAddress * addr,
-                        __be32 * ip, u_int16_t * port);
-extern void ip_conntrack_h245_expect(struct ip_conntrack *new,
-                                    struct ip_conntrack_expect *this);
-extern void ip_conntrack_q931_expect(struct ip_conntrack *new,
-                                    struct ip_conntrack_expect *this);
-extern int (*set_h245_addr_hook) (struct sk_buff ** pskb,
-                                 unsigned char **data, int dataoff,
-                                 H245_TransportAddress * addr,
-                                 __be32 ip, u_int16_t port);
-extern int (*set_h225_addr_hook) (struct sk_buff ** pskb,
-                                 unsigned char **data, int dataoff,
-                                 TransportAddress * addr,
-                                 __be32 ip, u_int16_t port);
-extern int (*set_sig_addr_hook) (struct sk_buff ** pskb,
-                                struct ip_conntrack * ct,
-                                enum ip_conntrack_info ctinfo,
-                                unsigned char **data,
-                                TransportAddress * addr, int count);
-extern int (*set_ras_addr_hook) (struct sk_buff ** pskb,
-                                struct ip_conntrack * ct,
-                                enum ip_conntrack_info ctinfo,
-                                unsigned char **data,
-                                TransportAddress * addr, int count);
-extern int (*nat_rtp_rtcp_hook) (struct sk_buff ** pskb,
-                                struct ip_conntrack * ct,
-                                enum ip_conntrack_info ctinfo,
-                                unsigned char **data, int dataoff,
-                                H245_TransportAddress * addr,
-                                u_int16_t port, u_int16_t rtp_port,
-                                struct ip_conntrack_expect * rtp_exp,
-                                struct ip_conntrack_expect * rtcp_exp);
-extern int (*nat_t120_hook) (struct sk_buff ** pskb, struct ip_conntrack * ct,
-                            enum ip_conntrack_info ctinfo,
-                            unsigned char **data, int dataoff,
-                            H245_TransportAddress * addr, u_int16_t port,
-                            struct ip_conntrack_expect * exp);
-extern int (*nat_h245_hook) (struct sk_buff ** pskb, struct ip_conntrack * ct,
-                            enum ip_conntrack_info ctinfo,
-                            unsigned char **data, int dataoff,
-                            TransportAddress * addr, u_int16_t port,
-                            struct ip_conntrack_expect * exp);
-extern int (*nat_callforwarding_hook) (struct sk_buff ** pskb,
-                                      struct ip_conntrack * ct,
-                                      enum ip_conntrack_info ctinfo,
-                                      unsigned char **data, int dataoff,
-                                      TransportAddress * addr,
-                                      u_int16_t port,
-                                      struct ip_conntrack_expect * exp);
-extern int (*nat_q931_hook) (struct sk_buff ** pskb, struct ip_conntrack * ct,
-                            enum ip_conntrack_info ctinfo,
-                            unsigned char **data, TransportAddress * addr,
-                            int idx, u_int16_t port,
-                            struct ip_conntrack_expect * exp);
-
-#endif
-
-#endif
diff --git a/include/linux/netfilter_ipv4/ip_conntrack_helper.h b/include/linux/netfilter_ipv4/ip_conntrack_helper.h
deleted file mode 100644 (file)
index 77fe868..0000000
+++ /dev/null
@@ -1,46 +0,0 @@
-/* IP connection tracking helpers. */
-#ifndef _IP_CONNTRACK_HELPER_H
-#define _IP_CONNTRACK_HELPER_H
-#include <linux/netfilter_ipv4/ip_conntrack.h>
-
-struct module;
-
-struct ip_conntrack_helper
-{      
-       struct list_head list;          /* Internal use. */
-
-       const char *name;               /* name of the module */
-       struct module *me;              /* pointer to self */
-       unsigned int max_expected;      /* Maximum number of concurrent 
-                                        * expected connections */
-       unsigned int timeout;           /* timeout for expecteds */
-
-       /* Mask of things we will help (compared against server response) */
-       struct ip_conntrack_tuple tuple;
-       struct ip_conntrack_tuple mask;
-       
-       /* Function to call when data passes; return verdict, or -1 to
-           invalidate. */
-       int (*help)(struct sk_buff **pskb,
-                   struct ip_conntrack *ct,
-                   enum ip_conntrack_info conntrackinfo);
-
-       void (*destroy)(struct ip_conntrack *ct);
-
-       int (*to_nfattr)(struct sk_buff *skb, const struct ip_conntrack *ct);
-};
-
-extern int ip_conntrack_helper_register(struct ip_conntrack_helper *);
-extern void ip_conntrack_helper_unregister(struct ip_conntrack_helper *);
-
-/* Allocate space for an expectation: this is mandatory before calling 
-   ip_conntrack_expect_related.  You will have to call put afterwards. */
-extern struct ip_conntrack_expect *
-ip_conntrack_expect_alloc(struct ip_conntrack *master);
-extern void ip_conntrack_expect_put(struct ip_conntrack_expect *exp);
-
-/* Add an expected connection: can have more than one per connection */
-extern int ip_conntrack_expect_related(struct ip_conntrack_expect *exp);
-extern void ip_conntrack_unexpect_related(struct ip_conntrack_expect *exp);
-
-#endif /*_IP_CONNTRACK_HELPER_H*/
diff --git a/include/linux/netfilter_ipv4/ip_conntrack_icmp.h b/include/linux/netfilter_ipv4/ip_conntrack_icmp.h
deleted file mode 100644 (file)
index eed5ee3..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _IP_CONNTRACK_ICMP_H
-#define _IP_CONNTRACK_ICMP_H
-
-#include <net/netfilter/ipv4/nf_conntrack_icmp.h>
-
-#endif /* _IP_CONNTRACK_ICMP_H */
diff --git a/include/linux/netfilter_ipv4/ip_conntrack_irc.h b/include/linux/netfilter_ipv4/ip_conntrack_irc.h
deleted file mode 100644 (file)
index 16601e0..0000000
+++ /dev/null
@@ -1,32 +0,0 @@
-/* IRC extension for IP connection tracking.
- * (C) 2000 by Harald Welte <laforge@gnumonks.org>
- * based on RR's ip_conntrack_ftp.h
- *
- * ip_conntrack_irc.h,v 1.6 2000/11/07 18:26:42 laforge Exp
- *
- *      This program is free software; you can redistribute it and/or
- *      modify it under the terms of the GNU General Public License
- *      as published by the Free Software Foundation; either version
- *      2 of the License, or (at your option) any later version.
- *
- *
- */
-#ifndef _IP_CONNTRACK_IRC_H
-#define _IP_CONNTRACK_IRC_H
-
-/* This structure exists only once per master */
-struct ip_ct_irc_master {
-};
-
-#ifdef __KERNEL__
-extern unsigned int (*ip_nat_irc_hook)(struct sk_buff **pskb,
-                                      enum ip_conntrack_info ctinfo,
-                                      unsigned int matchoff,
-                                      unsigned int matchlen,
-                                      struct ip_conntrack_expect *exp);
-
-#define IRC_PORT       6667
-
-#endif /* __KERNEL__ */
-
-#endif /* _IP_CONNTRACK_IRC_H */
diff --git a/include/linux/netfilter_ipv4/ip_conntrack_pptp.h b/include/linux/netfilter_ipv4/ip_conntrack_pptp.h
deleted file mode 100644 (file)
index 2644b1f..0000000
+++ /dev/null
@@ -1,326 +0,0 @@
-/* PPTP constants and structs */
-#ifndef _CONNTRACK_PPTP_H
-#define _CONNTRACK_PPTP_H
-
-/* state of the control session */
-enum pptp_ctrlsess_state {
-       PPTP_SESSION_NONE,                      /* no session present */
-       PPTP_SESSION_ERROR,                     /* some session error */
-       PPTP_SESSION_STOPREQ,                   /* stop_sess request seen */
-       PPTP_SESSION_REQUESTED,                 /* start_sess request seen */
-       PPTP_SESSION_CONFIRMED,                 /* session established */
-};
-
-/* state of the call inside the control session */
-enum pptp_ctrlcall_state {
-       PPTP_CALL_NONE,
-       PPTP_CALL_ERROR,
-       PPTP_CALL_OUT_REQ,
-       PPTP_CALL_OUT_CONF,
-       PPTP_CALL_IN_REQ,
-       PPTP_CALL_IN_REP,
-       PPTP_CALL_IN_CONF,
-       PPTP_CALL_CLEAR_REQ,
-};
-
-
-/* conntrack private data */
-struct ip_ct_pptp_master {
-       enum pptp_ctrlsess_state sstate;        /* session state */
-
-       /* everything below is going to be per-expectation in newnat,
-        * since there could be more than one call within one session */
-       enum pptp_ctrlcall_state cstate;        /* call state */
-       __be16 pac_call_id;                     /* call id of PAC, host byte order */
-       __be16 pns_call_id;                     /* call id of PNS, host byte order */
-
-       /* in pre-2.6.11 this used to be per-expect. Now it is per-conntrack
-        * and therefore imposes a fixed limit on the number of maps */
-       struct ip_ct_gre_keymap *keymap_orig, *keymap_reply;
-};
-
-/* conntrack_expect private member */
-struct ip_ct_pptp_expect {
-       enum pptp_ctrlcall_state cstate;        /* call state */
-       __be16 pac_call_id;                     /* call id of PAC */
-       __be16 pns_call_id;                     /* call id of PNS */
-};
-
-
-#ifdef __KERNEL__
-
-#define IP_CONNTR_PPTP         PPTP_CONTROL_PORT
-
-#define PPTP_CONTROL_PORT      1723
-
-#define PPTP_PACKET_CONTROL    1
-#define PPTP_PACKET_MGMT       2
-
-#define PPTP_MAGIC_COOKIE      0x1a2b3c4d
-
-struct pptp_pkt_hdr {
-       __u16   packetLength;
-       __be16  packetType;
-       __be32  magicCookie;
-};
-
-/* PptpControlMessageType values */
-#define PPTP_START_SESSION_REQUEST     1
-#define PPTP_START_SESSION_REPLY       2
-#define PPTP_STOP_SESSION_REQUEST      3
-#define PPTP_STOP_SESSION_REPLY                4
-#define PPTP_ECHO_REQUEST              5
-#define PPTP_ECHO_REPLY                        6
-#define PPTP_OUT_CALL_REQUEST          7
-#define PPTP_OUT_CALL_REPLY            8
-#define PPTP_IN_CALL_REQUEST           9
-#define PPTP_IN_CALL_REPLY             10
-#define PPTP_IN_CALL_CONNECT           11
-#define PPTP_CALL_CLEAR_REQUEST                12
-#define PPTP_CALL_DISCONNECT_NOTIFY    13
-#define PPTP_WAN_ERROR_NOTIFY          14
-#define PPTP_SET_LINK_INFO             15
-
-#define PPTP_MSG_MAX                   15
-
-/* PptpGeneralError values */
-#define PPTP_ERROR_CODE_NONE           0
-#define PPTP_NOT_CONNECTED             1
-#define PPTP_BAD_FORMAT                        2
-#define PPTP_BAD_VALUE                 3
-#define PPTP_NO_RESOURCE               4
-#define PPTP_BAD_CALLID                        5
-#define PPTP_REMOVE_DEVICE_ERROR       6
-
-struct PptpControlHeader {
-       __be16  messageType;
-       __u16   reserved;
-};
-
-/* FramingCapability Bitmap Values */
-#define PPTP_FRAME_CAP_ASYNC           0x1
-#define PPTP_FRAME_CAP_SYNC            0x2
-
-/* BearerCapability Bitmap Values */
-#define PPTP_BEARER_CAP_ANALOG         0x1
-#define PPTP_BEARER_CAP_DIGITAL                0x2
-
-struct PptpStartSessionRequest {
-       __be16  protocolVersion;
-       __u16   reserved1;
-       __be32  framingCapability;
-       __be32  bearerCapability;
-       __be16  maxChannels;
-       __be16  firmwareRevision;
-       __u8    hostName[64];
-       __u8    vendorString[64];
-};
-
-/* PptpStartSessionResultCode Values */
-#define PPTP_START_OK                  1
-#define PPTP_START_GENERAL_ERROR       2
-#define PPTP_START_ALREADY_CONNECTED   3
-#define PPTP_START_NOT_AUTHORIZED      4
-#define PPTP_START_UNKNOWN_PROTOCOL    5
-
-struct PptpStartSessionReply {
-       __be16  protocolVersion;
-       __u8    resultCode;
-       __u8    generalErrorCode;
-       __be32  framingCapability;
-       __be32  bearerCapability;
-       __be16  maxChannels;
-       __be16  firmwareRevision;
-       __u8    hostName[64];
-       __u8    vendorString[64];
-};
-
-/* PptpStopReasons */
-#define PPTP_STOP_NONE                 1
-#define PPTP_STOP_PROTOCOL             2
-#define PPTP_STOP_LOCAL_SHUTDOWN       3
-
-struct PptpStopSessionRequest {
-       __u8    reason;
-       __u8    reserved1;
-       __u16   reserved2;
-};
-
-/* PptpStopSessionResultCode */
-#define PPTP_STOP_OK                   1
-#define PPTP_STOP_GENERAL_ERROR                2
-
-struct PptpStopSessionReply {
-       __u8    resultCode;
-       __u8    generalErrorCode;
-       __u16   reserved1;
-};
-
-struct PptpEchoRequest {
-       __be32 identNumber;
-};
-
-/* PptpEchoReplyResultCode */
-#define PPTP_ECHO_OK                   1
-#define PPTP_ECHO_GENERAL_ERROR                2
-
-struct PptpEchoReply {
-       __be32  identNumber;
-       __u8    resultCode;
-       __u8    generalErrorCode;
-       __u16   reserved;
-};
-
-/* PptpFramingType */
-#define PPTP_ASYNC_FRAMING             1
-#define PPTP_SYNC_FRAMING              2
-#define PPTP_DONT_CARE_FRAMING         3
-
-/* PptpCallBearerType */
-#define PPTP_ANALOG_TYPE               1
-#define PPTP_DIGITAL_TYPE              2
-#define PPTP_DONT_CARE_BEARER_TYPE     3
-
-struct PptpOutCallRequest {
-       __be16  callID;
-       __be16  callSerialNumber;
-       __be32  minBPS;
-       __be32  maxBPS;
-       __be32  bearerType;
-       __be32  framingType;
-       __be16  packetWindow;
-       __be16  packetProcDelay;
-       __be16  phoneNumberLength;
-       __u16   reserved1;
-       __u8    phoneNumber[64];
-       __u8    subAddress[64];
-};
-
-/* PptpCallResultCode */
-#define PPTP_OUTCALL_CONNECT           1
-#define PPTP_OUTCALL_GENERAL_ERROR     2
-#define PPTP_OUTCALL_NO_CARRIER                3
-#define PPTP_OUTCALL_BUSY              4
-#define PPTP_OUTCALL_NO_DIAL_TONE      5
-#define PPTP_OUTCALL_TIMEOUT           6
-#define PPTP_OUTCALL_DONT_ACCEPT       7
-
-struct PptpOutCallReply {
-       __be16  callID;
-       __be16  peersCallID;
-       __u8    resultCode;
-       __u8    generalErrorCode;
-       __be16  causeCode;
-       __be32  connectSpeed;
-       __be16  packetWindow;
-       __be16  packetProcDelay;
-       __be32  physChannelID;
-};
-
-struct PptpInCallRequest {
-       __be16  callID;
-       __be16  callSerialNumber;
-       __be32  callBearerType;
-       __be32  physChannelID;
-       __be16  dialedNumberLength;
-       __be16  dialingNumberLength;
-       __u8    dialedNumber[64];
-       __u8    dialingNumber[64];
-       __u8    subAddress[64];
-};
-
-/* PptpInCallResultCode */
-#define PPTP_INCALL_ACCEPT             1
-#define PPTP_INCALL_GENERAL_ERROR      2
-#define PPTP_INCALL_DONT_ACCEPT                3
-
-struct PptpInCallReply {
-       __be16  callID;
-       __be16  peersCallID;
-       __u8    resultCode;
-       __u8    generalErrorCode;
-       __be16  packetWindow;
-       __be16  packetProcDelay;
-       __u16   reserved;
-};
-
-struct PptpInCallConnected {
-       __be16  peersCallID;
-       __u16   reserved;
-       __be32  connectSpeed;
-       __be16  packetWindow;
-       __be16  packetProcDelay;
-       __be32  callFramingType;
-};
-
-struct PptpClearCallRequest {
-       __be16  callID;
-       __u16   reserved;
-};
-
-struct PptpCallDisconnectNotify {
-       __be16  callID;
-       __u8    resultCode;
-       __u8    generalErrorCode;
-       __be16  causeCode;
-       __u16   reserved;
-       __u8    callStatistics[128];
-};
-
-struct PptpWanErrorNotify {
-       __be16  peersCallID;
-       __u16   reserved;
-       __be32  crcErrors;
-       __be32  framingErrors;
-       __be32  hardwareOverRuns;
-       __be32  bufferOverRuns;
-       __be32  timeoutErrors;
-       __be32  alignmentErrors;
-};
-
-struct PptpSetLinkInfo {
-       __be16  peersCallID;
-       __u16   reserved;
-       __be32  sendAccm;
-       __be32  recvAccm;
-};
-
-union pptp_ctrl_union {
-       struct PptpStartSessionRequest  sreq;
-       struct PptpStartSessionReply    srep;
-       struct PptpStopSessionRequest   streq;
-       struct PptpStopSessionReply     strep;
-       struct PptpOutCallRequest       ocreq;
-       struct PptpOutCallReply         ocack;
-       struct PptpInCallRequest        icreq;
-       struct PptpInCallReply          icack;
-       struct PptpInCallConnected      iccon;
-       struct PptpClearCallRequest     clrreq;
-       struct PptpCallDisconnectNotify disc;
-       struct PptpWanErrorNotify       wanerr;
-       struct PptpSetLinkInfo          setlink;
-};
-
-extern int
-(*ip_nat_pptp_hook_outbound)(struct sk_buff **pskb,
-                         struct ip_conntrack *ct,
-                         enum ip_conntrack_info ctinfo,
-                         struct PptpControlHeader *ctlh,
-                         union pptp_ctrl_union *pptpReq);
-
-extern int
-(*ip_nat_pptp_hook_inbound)(struct sk_buff **pskb,
-                         struct ip_conntrack *ct,
-                         enum ip_conntrack_info ctinfo,
-                         struct PptpControlHeader *ctlh,
-                         union pptp_ctrl_union *pptpReq);
-
-extern void
-(*ip_nat_pptp_hook_exp_gre)(struct ip_conntrack_expect *exp_orig,
-                           struct ip_conntrack_expect *exp_reply);
-
-extern void
-(*ip_nat_pptp_hook_expectfn)(struct ip_conntrack *ct,
-                            struct ip_conntrack_expect *exp);
-#endif /* __KERNEL__ */
-#endif /* _CONNTRACK_PPTP_H */
diff --git a/include/linux/netfilter_ipv4/ip_conntrack_proto_gre.h b/include/linux/netfilter_ipv4/ip_conntrack_proto_gre.h
deleted file mode 100644 (file)
index e371e0f..0000000
+++ /dev/null
@@ -1,114 +0,0 @@
-#ifndef _CONNTRACK_PROTO_GRE_H
-#define _CONNTRACK_PROTO_GRE_H
-#include <asm/byteorder.h>
-
-/* GRE PROTOCOL HEADER */
-
-/* GRE Version field */
-#define GRE_VERSION_1701       0x0
-#define GRE_VERSION_PPTP       0x1
-
-/* GRE Protocol field */
-#define GRE_PROTOCOL_PPTP      0x880B
-
-/* GRE Flags */
-#define GRE_FLAG_C             0x80
-#define GRE_FLAG_R             0x40
-#define GRE_FLAG_K             0x20
-#define GRE_FLAG_S             0x10
-#define GRE_FLAG_A             0x80
-
-#define GRE_IS_C(f)    ((f)&GRE_FLAG_C)
-#define GRE_IS_R(f)    ((f)&GRE_FLAG_R)
-#define GRE_IS_K(f)    ((f)&GRE_FLAG_K)
-#define GRE_IS_S(f)    ((f)&GRE_FLAG_S)
-#define GRE_IS_A(f)    ((f)&GRE_FLAG_A)
-
-/* GRE is a mess: Four different standards */
-struct gre_hdr {
-#if defined(__LITTLE_ENDIAN_BITFIELD)
-       __u16   rec:3,
-               srr:1,
-               seq:1,
-               key:1,
-               routing:1,
-               csum:1,
-               version:3,
-               reserved:4,
-               ack:1;
-#elif defined(__BIG_ENDIAN_BITFIELD)
-       __u16   csum:1,
-               routing:1,
-               key:1,
-               seq:1,
-               srr:1,
-               rec:3,
-               ack:1,
-               reserved:4,
-               version:3;
-#else
-#error "Adjust your <asm/byteorder.h> defines"
-#endif
-       __be16  protocol;
-};
-
-/* modified GRE header for PPTP */
-struct gre_hdr_pptp {
-       __u8   flags;           /* bitfield */
-       __u8   version;         /* should be GRE_VERSION_PPTP */
-       __be16 protocol;        /* should be GRE_PROTOCOL_PPTP */
-       __be16 payload_len;     /* size of ppp payload, not inc. gre header */
-       __be16 call_id;         /* peer's call_id for this session */
-       __be32 seq;             /* sequence number.  Present if S==1 */
-       __be32 ack;             /* seq number of highest packet recieved by */
-                               /*  sender in this session */
-};
-
-
-/* this is part of ip_conntrack */
-struct ip_ct_gre {
-       unsigned int stream_timeout;
-       unsigned int timeout;
-};
-
-#ifdef __KERNEL__
-struct ip_conntrack_expect;
-struct ip_conntrack;
-
-/* structure for original <-> reply keymap */
-struct ip_ct_gre_keymap {
-       struct list_head list;
-
-       struct ip_conntrack_tuple tuple;
-};
-
-/* add new tuple->key_reply pair to keymap */
-int ip_ct_gre_keymap_add(struct ip_conntrack *ct,
-                        struct ip_conntrack_tuple *t,
-                        int reply);
-
-/* delete keymap entries */
-void ip_ct_gre_keymap_destroy(struct ip_conntrack *ct);
-
-
-/* get pointer to gre key, if present */
-static inline __be32 *gre_key(struct gre_hdr *greh)
-{
-       if (!greh->key)
-               return NULL;
-       if (greh->csum || greh->routing)
-               return (__be32 *) (greh+sizeof(*greh)+4);
-       return (__be32 *) (greh+sizeof(*greh));
-}
-
-/* get pointer ot gre csum, if present */
-static inline __sum16 *gre_csum(struct gre_hdr *greh)
-{
-       if (!greh->csum)
-               return NULL;
-       return (__sum16 *) (greh+sizeof(*greh));
-}
-
-#endif /* __KERNEL__ */
-
-#endif /* _CONNTRACK_PROTO_GRE_H */
diff --git a/include/linux/netfilter_ipv4/ip_conntrack_protocol.h b/include/linux/netfilter_ipv4/ip_conntrack_protocol.h
deleted file mode 100644 (file)
index 2c76b87..0000000
+++ /dev/null
@@ -1,98 +0,0 @@
-/* Header for use in defining a given protocol for connection tracking. */
-#ifndef _IP_CONNTRACK_PROTOCOL_H
-#define _IP_CONNTRACK_PROTOCOL_H
-#include <linux/netfilter_ipv4/ip_conntrack.h>
-#include <linux/netfilter/nfnetlink_conntrack.h>
-
-struct seq_file;
-
-struct ip_conntrack_protocol
-{
-       /* Protocol number. */
-       u_int8_t proto;
-
-       /* Protocol name */
-       const char *name;
-
-       /* Try to fill in the third arg: dataoff is offset past IP
-           hdr.  Return true if possible. */
-       int (*pkt_to_tuple)(const struct sk_buff *skb,
-                          unsigned int dataoff,
-                          struct ip_conntrack_tuple *tuple);
-
-       /* Invert the per-proto part of the tuple: ie. turn xmit into reply.
-        * Some packets can't be inverted: return 0 in that case.
-        */
-       int (*invert_tuple)(struct ip_conntrack_tuple *inverse,
-                           const struct ip_conntrack_tuple *orig);
-
-       /* Print out the per-protocol part of the tuple. Return like seq_* */
-       int (*print_tuple)(struct seq_file *,
-                          const struct ip_conntrack_tuple *);
-
-       /* Print out the private part of the conntrack. */
-       int (*print_conntrack)(struct seq_file *, const struct ip_conntrack *);
-
-       /* Returns verdict for packet, or -1 for invalid. */
-       int (*packet)(struct ip_conntrack *conntrack,
-                     const struct sk_buff *skb,
-                     enum ip_conntrack_info ctinfo);
-
-       /* Called when a new connection for this protocol found;
-        * returns TRUE if it's OK.  If so, packet() called next. */
-       int (*new)(struct ip_conntrack *conntrack, const struct sk_buff *skb);
-
-       /* Called when a conntrack entry is destroyed */
-       void (*destroy)(struct ip_conntrack *conntrack);
-
-       int (*error)(struct sk_buff *skb, enum ip_conntrack_info *ctinfo,
-                    unsigned int hooknum);
-
-       /* convert protoinfo to nfnetink attributes */
-       int (*to_nfattr)(struct sk_buff *skb, struct nfattr *nfa,
-                        const struct ip_conntrack *ct);
-
-       /* convert nfnetlink attributes to protoinfo */
-       int (*from_nfattr)(struct nfattr *tb[], struct ip_conntrack *ct);
-
-       int (*tuple_to_nfattr)(struct sk_buff *skb,
-                              const struct ip_conntrack_tuple *t);
-       int (*nfattr_to_tuple)(struct nfattr *tb[],
-                              struct ip_conntrack_tuple *t);
-
-       /* Module (if any) which this is connected to. */
-       struct module *me;
-};
-
-/* Protocol registration. */
-extern int ip_conntrack_protocol_register(struct ip_conntrack_protocol *proto);
-extern void ip_conntrack_protocol_unregister(struct ip_conntrack_protocol *proto);
-/* Existing built-in protocols */
-extern struct ip_conntrack_protocol ip_conntrack_protocol_tcp;
-extern struct ip_conntrack_protocol ip_conntrack_protocol_udp;
-extern struct ip_conntrack_protocol ip_conntrack_protocol_icmp;
-extern struct ip_conntrack_protocol ip_conntrack_generic_protocol;
-extern int ip_conntrack_protocol_tcp_init(void);
-
-/* Log invalid packets */
-extern unsigned int ip_ct_log_invalid;
-
-extern int ip_ct_port_tuple_to_nfattr(struct sk_buff *,
-                                     const struct ip_conntrack_tuple *);
-extern int ip_ct_port_nfattr_to_tuple(struct nfattr *tb[],
-                                     struct ip_conntrack_tuple *);
-
-#ifdef CONFIG_SYSCTL
-#ifdef DEBUG_INVALID_PACKETS
-#define LOG_INVALID(proto) \
-       (ip_ct_log_invalid == (proto) || ip_ct_log_invalid == IPPROTO_RAW)
-#else
-#define LOG_INVALID(proto) \
-       ((ip_ct_log_invalid == (proto) || ip_ct_log_invalid == IPPROTO_RAW) \
-        && net_ratelimit())
-#endif
-#else
-#define LOG_INVALID(proto) 0
-#endif /* CONFIG_SYSCTL */
-
-#endif /*_IP_CONNTRACK_PROTOCOL_H*/
diff --git a/include/linux/netfilter_ipv4/ip_conntrack_sctp.h b/include/linux/netfilter_ipv4/ip_conntrack_sctp.h
deleted file mode 100644 (file)
index 4099a04..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _IP_CONNTRACK_SCTP_H
-#define _IP_CONNTRACK_SCTP_H
-
-#include <linux/netfilter/nf_conntrack_sctp.h>
-
-#endif /* _IP_CONNTRACK_SCTP_H */
diff --git a/include/linux/netfilter_ipv4/ip_conntrack_sip.h b/include/linux/netfilter_ipv4/ip_conntrack_sip.h
deleted file mode 100644 (file)
index bef6c64..0000000
+++ /dev/null
@@ -1,40 +0,0 @@
-#ifndef __IP_CONNTRACK_SIP_H__
-#define __IP_CONNTRACK_SIP_H__
-#ifdef __KERNEL__
-
-#define SIP_PORT       5060
-#define SIP_TIMEOUT    3600
-
-enum sip_header_pos {
-       POS_REG_REQ_URI,
-       POS_REQ_URI,
-       POS_FROM,
-       POS_TO,
-       POS_VIA,
-       POS_CONTACT,
-       POS_CONTENT,
-       POS_MEDIA,
-       POS_OWNER,
-       POS_CONNECTION,
-       POS_SDP_HEADER,
-};
-
-extern unsigned int (*ip_nat_sip_hook)(struct sk_buff **pskb,
-                                      enum ip_conntrack_info ctinfo,
-                                      struct ip_conntrack *ct,
-                                      const char **dptr);
-extern unsigned int (*ip_nat_sdp_hook)(struct sk_buff **pskb,
-                                      enum ip_conntrack_info ctinfo,
-                                      struct ip_conntrack_expect *exp,
-                                      const char *dptr);
-
-extern int ct_sip_get_info(const char *dptr, size_t dlen,
-                          unsigned int *matchoff,
-                          unsigned int *matchlen,
-                          enum sip_header_pos pos);
-extern int ct_sip_lnlen(const char *line, const char *limit);
-extern const char *ct_sip_search(const char *needle, const char *haystack,
-                                size_t needle_len, size_t haystack_len,
-                                int case_sensitive);
-#endif /* __KERNEL__ */
-#endif /* __IP_CONNTRACK_SIP_H__ */
diff --git a/include/linux/netfilter_ipv4/ip_conntrack_tcp.h b/include/linux/netfilter_ipv4/ip_conntrack_tcp.h
deleted file mode 100644 (file)
index 876b8fb..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _IP_CONNTRACK_TCP_H
-#define _IP_CONNTRACK_TCP_H
-
-#include <linux/netfilter/nf_conntrack_tcp.h>
-
-#endif /* _IP_CONNTRACK_TCP_H */
diff --git a/include/linux/netfilter_ipv4/ip_conntrack_tftp.h b/include/linux/netfilter_ipv4/ip_conntrack_tftp.h
deleted file mode 100644 (file)
index a404fc0..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-#ifndef _IP_CT_TFTP
-#define _IP_CT_TFTP
-
-#define TFTP_PORT 69
-
-struct tftphdr {
-       __be16 opcode;
-};
-
-#define TFTP_OPCODE_READ       1
-#define TFTP_OPCODE_WRITE      2
-#define TFTP_OPCODE_DATA       3
-#define TFTP_OPCODE_ACK                4
-#define TFTP_OPCODE_ERROR      5
-
-extern unsigned int (*ip_nat_tftp_hook)(struct sk_buff **pskb,
-                                enum ip_conntrack_info ctinfo,
-                                struct ip_conntrack_expect *exp);
-
-#endif /* _IP_CT_TFTP */
diff --git a/include/linux/netfilter_ipv4/ip_conntrack_tuple.h b/include/linux/netfilter_ipv4/ip_conntrack_tuple.h
deleted file mode 100644 (file)
index c228bde..0000000
+++ /dev/null
@@ -1,146 +0,0 @@
-#ifndef _IP_CONNTRACK_TUPLE_H
-#define _IP_CONNTRACK_TUPLE_H
-
-#include <linux/types.h>
-#include <linux/netfilter/nf_conntrack_tuple_common.h>
-
-/* A `tuple' is a structure containing the information to uniquely
-  identify a connection.  ie. if two packets have the same tuple, they
-  are in the same connection; if not, they are not.
-
-  We divide the structure along "manipulatable" and
-  "non-manipulatable" lines, for the benefit of the NAT code.
-*/
-
-/* The protocol-specific manipulable parts of the tuple: always in
-   network order! */
-union ip_conntrack_manip_proto
-{
-       /* Add other protocols here. */
-       u_int16_t all;
-
-       struct {
-               __be16 port;
-       } tcp;
-       struct {
-               __be16 port;
-       } udp;
-       struct {
-               __be16 id;
-       } icmp;
-       struct {
-               __be16 port;
-       } sctp;
-       struct {
-               __be16 key;     /* key is 32bit, pptp only uses 16 */
-       } gre;
-};
-
-/* The manipulable part of the tuple. */
-struct ip_conntrack_manip
-{
-       __be32 ip;
-       union ip_conntrack_manip_proto u;
-};
-
-/* This contains the information to distinguish a connection. */
-struct ip_conntrack_tuple
-{
-       struct ip_conntrack_manip src;
-
-       /* These are the parts of the tuple which are fixed. */
-       struct {
-               __be32 ip;
-               union {
-                       /* Add other protocols here. */
-                       u_int16_t all;
-
-                       struct {
-                               __be16 port;
-                       } tcp;
-                       struct {
-                               __be16 port;
-                       } udp;
-                       struct {
-                               u_int8_t type, code;
-                       } icmp;
-                       struct {
-                               __be16 port;
-                       } sctp;
-                       struct {
-                               __be16 key;     /* key is 32bit, 
-                                                * pptp only uses 16 */
-                       } gre;
-               } u;
-
-               /* The protocol. */
-               u_int8_t protonum;
-
-               /* The direction (for tuplehash) */
-               u_int8_t dir;
-       } dst;
-};
-
-/* This is optimized opposed to a memset of the whole structure.  Everything we
- * really care about is the  source/destination unions */
-#define IP_CT_TUPLE_U_BLANK(tuple)                             \
-       do {                                                    \
-               (tuple)->src.u.all = 0;                         \
-               (tuple)->dst.u.all = 0;                         \
-       } while (0)
-
-#ifdef __KERNEL__
-
-#define DUMP_TUPLE(tp)                                         \
-DEBUGP("tuple %p: %u %u.%u.%u.%u:%hu -> %u.%u.%u.%u:%hu\n",    \
-       (tp), (tp)->dst.protonum,                               \
-       NIPQUAD((tp)->src.ip), ntohs((tp)->src.u.all),          \
-       NIPQUAD((tp)->dst.ip), ntohs((tp)->dst.u.all))
-
-/* If we're the first tuple, it's the original dir. */
-#define DIRECTION(h) ((enum ip_conntrack_dir)(h)->tuple.dst.dir)
-
-/* Connections have two entries in the hash table: one for each way */
-struct ip_conntrack_tuple_hash
-{
-       struct list_head list;
-
-       struct ip_conntrack_tuple tuple;
-};
-
-#endif /* __KERNEL__ */
-
-static inline int ip_ct_tuple_src_equal(const struct ip_conntrack_tuple *t1,
-                                       const struct ip_conntrack_tuple *t2)
-{
-       return t1->src.ip == t2->src.ip
-               && t1->src.u.all == t2->src.u.all;
-}
-
-static inline int ip_ct_tuple_dst_equal(const struct ip_conntrack_tuple *t1,
-                                       const struct ip_conntrack_tuple *t2)
-{
-       return t1->dst.ip == t2->dst.ip
-               && t1->dst.u.all == t2->dst.u.all
-               && t1->dst.protonum == t2->dst.protonum;
-}
-
-static inline int ip_ct_tuple_equal(const struct ip_conntrack_tuple *t1,
-                                   const struct ip_conntrack_tuple *t2)
-{
-       return ip_ct_tuple_src_equal(t1, t2) && ip_ct_tuple_dst_equal(t1, t2);
-}
-
-static inline int ip_ct_tuple_mask_cmp(const struct ip_conntrack_tuple *t,
-                                      const struct ip_conntrack_tuple *tuple,
-                                      const struct ip_conntrack_tuple *mask)
-{
-       return !(((t->src.ip ^ tuple->src.ip) & mask->src.ip)
-                || ((t->dst.ip ^ tuple->dst.ip) & mask->dst.ip)
-                || ((t->src.u.all ^ tuple->src.u.all) & mask->src.u.all)
-                || ((t->dst.u.all ^ tuple->dst.u.all) & mask->dst.u.all)
-                || ((t->dst.protonum ^ tuple->dst.protonum)
-                    & mask->dst.protonum));
-}
-
-#endif /* _IP_CONNTRACK_TUPLE_H */
diff --git a/include/linux/netfilter_ipv4/ip_nat.h b/include/linux/netfilter_ipv4/ip_nat.h
deleted file mode 100644 (file)
index bbca89a..0000000
+++ /dev/null
@@ -1,79 +0,0 @@
-#ifndef _IP_NAT_H
-#define _IP_NAT_H
-#include <linux/netfilter_ipv4.h>
-#include <linux/netfilter_ipv4/ip_conntrack_tuple.h>
-
-#define IP_NAT_MAPPING_TYPE_MAX_NAMELEN 16
-
-enum ip_nat_manip_type
-{
-       IP_NAT_MANIP_SRC,
-       IP_NAT_MANIP_DST
-};
-
-/* SRC manip occurs POST_ROUTING or LOCAL_IN */
-#define HOOK2MANIP(hooknum) ((hooknum) != NF_IP_POST_ROUTING && (hooknum) != NF_IP_LOCAL_IN)
-
-#define IP_NAT_RANGE_MAP_IPS 1
-#define IP_NAT_RANGE_PROTO_SPECIFIED 2
-#define IP_NAT_RANGE_PROTO_RANDOM 4 /* add randomness to "port" selection */
-
-/* NAT sequence number modifications */
-struct ip_nat_seq {
-       /* position of the last TCP sequence number 
-        * modification (if any) */
-       u_int32_t correction_pos;
-       /* sequence number offset before and after last modification */
-       int16_t offset_before, offset_after;
-};
-
-/* Single range specification. */
-struct ip_nat_range
-{
-       /* Set to OR of flags above. */
-       unsigned int flags;
-
-       /* Inclusive: network order. */
-       __be32 min_ip, max_ip;
-
-       /* Inclusive: network order */
-       union ip_conntrack_manip_proto min, max;
-};
-
-/* For backwards compat: don't use in modern code. */
-struct ip_nat_multi_range_compat
-{
-       unsigned int rangesize; /* Must be 1. */
-
-       /* hangs off end. */
-       struct ip_nat_range range[1];
-};
-
-#ifdef __KERNEL__
-#include <linux/list.h>
-
-/* Protects NAT hash tables, and NAT-private part of conntracks. */
-extern rwlock_t ip_nat_lock;
-
-/* The structure embedded in the conntrack structure. */
-struct ip_nat_info
-{
-       struct list_head bysource;
-       struct ip_nat_seq seq[IP_CT_DIR_MAX];
-};
-
-struct ip_conntrack;
-
-/* Set up the info structure to map into this range. */
-extern unsigned int ip_nat_setup_info(struct ip_conntrack *conntrack,
-                                     const struct ip_nat_range *range,
-                                     unsigned int hooknum);
-
-/* Is this tuple already taken? (not by us)*/
-extern int ip_nat_used_tuple(const struct ip_conntrack_tuple *tuple,
-                            const struct ip_conntrack *ignored_conntrack);
-
-#else  /* !__KERNEL__: iptables wants this to compile. */
-#define ip_nat_multi_range ip_nat_multi_range_compat
-#endif /*__KERNEL__*/
-#endif
diff --git a/include/linux/netfilter_ipv4/ip_nat_core.h b/include/linux/netfilter_ipv4/ip_nat_core.h
deleted file mode 100644 (file)
index 60566f9..0000000
+++ /dev/null
@@ -1,18 +0,0 @@
-#ifndef _IP_NAT_CORE_H
-#define _IP_NAT_CORE_H
-#include <linux/list.h>
-#include <linux/netfilter_ipv4/ip_conntrack.h>
-
-/* This header used to share core functionality between the standalone
-   NAT module, and the compatibility layer's use of NAT for masquerading. */
-
-extern unsigned int ip_nat_packet(struct ip_conntrack *ct,
-                              enum ip_conntrack_info conntrackinfo,
-                              unsigned int hooknum,
-                              struct sk_buff **pskb);
-
-extern int ip_nat_icmp_reply_translation(struct ip_conntrack *ct,
-                                        enum ip_conntrack_info ctinfo,
-                                        unsigned int hooknum,
-                                        struct sk_buff **pskb);
-#endif /* _IP_NAT_CORE_H */
diff --git a/include/linux/netfilter_ipv4/ip_nat_helper.h b/include/linux/netfilter_ipv4/ip_nat_helper.h
deleted file mode 100644 (file)
index bf9cb10..0000000
+++ /dev/null
@@ -1,33 +0,0 @@
-#ifndef _IP_NAT_HELPER_H
-#define _IP_NAT_HELPER_H
-/* NAT protocol helper routines. */
-
-#include <linux/netfilter_ipv4/ip_conntrack.h>
-#include <linux/module.h>
-
-struct sk_buff;
-
-/* These return true or false. */
-extern int ip_nat_mangle_tcp_packet(struct sk_buff **skb,
-                               struct ip_conntrack *ct,
-                               enum ip_conntrack_info ctinfo,
-                               unsigned int match_offset,
-                               unsigned int match_len,
-                               const char *rep_buffer,
-                               unsigned int rep_len);
-extern int ip_nat_mangle_udp_packet(struct sk_buff **skb,
-                               struct ip_conntrack *ct,
-                               enum ip_conntrack_info ctinfo,
-                               unsigned int match_offset,
-                               unsigned int match_len,
-                               const char *rep_buffer,
-                               unsigned int rep_len);
-extern int ip_nat_seq_adjust(struct sk_buff **pskb, 
-                            struct ip_conntrack *ct, 
-                            enum ip_conntrack_info ctinfo);
-
-/* Setup NAT on this expected conntrack so it follows master, but goes
- * to port ct->master->saved_proto. */
-extern void ip_nat_follow_master(struct ip_conntrack *ct,
-                                struct ip_conntrack_expect *this);
-#endif
diff --git a/include/linux/netfilter_ipv4/ip_nat_pptp.h b/include/linux/netfilter_ipv4/ip_nat_pptp.h
deleted file mode 100644 (file)
index 36668bf..0000000
+++ /dev/null
@@ -1,11 +0,0 @@
-/* PPTP constants and structs */
-#ifndef _NAT_PPTP_H
-#define _NAT_PPTP_H
-
-/* conntrack private data */
-struct ip_nat_pptp {
-       __be16 pns_call_id;             /* NAT'ed PNS call id */
-       __be16 pac_call_id;             /* NAT'ed PAC call id */
-};
-
-#endif /* _NAT_PPTP_H */
diff --git a/include/linux/netfilter_ipv4/ip_nat_protocol.h b/include/linux/netfilter_ipv4/ip_nat_protocol.h
deleted file mode 100644 (file)
index 612a436..0000000
+++ /dev/null
@@ -1,74 +0,0 @@
-/* Header for use in defining a given protocol. */
-#ifndef _IP_NAT_PROTOCOL_H
-#define _IP_NAT_PROTOCOL_H
-#include <linux/init.h>
-#include <linux/list.h>
-
-#include <linux/netfilter_ipv4/ip_nat.h>
-#include <linux/netfilter/nfnetlink_conntrack.h>
-
-struct iphdr;
-struct ip_nat_range;
-
-struct ip_nat_protocol
-{
-       /* Protocol name */
-       const char *name;
-
-       /* Protocol number. */
-       unsigned int protonum;
-
-       struct module *me;
-
-       /* Translate a packet to the target according to manip type.
-          Return true if succeeded. */
-       int (*manip_pkt)(struct sk_buff **pskb,
-                        unsigned int iphdroff,
-                        const struct ip_conntrack_tuple *tuple,
-                        enum ip_nat_manip_type maniptype);
-
-       /* Is the manipable part of the tuple between min and max incl? */
-       int (*in_range)(const struct ip_conntrack_tuple *tuple,
-                       enum ip_nat_manip_type maniptype,
-                       const union ip_conntrack_manip_proto *min,
-                       const union ip_conntrack_manip_proto *max);
-
-       /* Alter the per-proto part of the tuple (depending on
-          maniptype), to give a unique tuple in the given range if
-          possible; return false if not.  Per-protocol part of tuple
-          is initialized to the incoming packet. */
-       int (*unique_tuple)(struct ip_conntrack_tuple *tuple,
-                           const struct ip_nat_range *range,
-                           enum ip_nat_manip_type maniptype,
-                           const struct ip_conntrack *conntrack);
-
-       int (*range_to_nfattr)(struct sk_buff *skb,
-                              const struct ip_nat_range *range);
-
-       int (*nfattr_to_range)(struct nfattr *tb[],
-                              struct ip_nat_range *range);
-};
-
-/* Protocol registration. */
-extern int ip_nat_protocol_register(struct ip_nat_protocol *proto);
-extern void ip_nat_protocol_unregister(struct ip_nat_protocol *proto);
-
-extern struct ip_nat_protocol *ip_nat_proto_find_get(u_int8_t protocol);
-extern void ip_nat_proto_put(struct ip_nat_protocol *proto);
-
-/* Built-in protocols. */
-extern struct ip_nat_protocol ip_nat_protocol_tcp;
-extern struct ip_nat_protocol ip_nat_protocol_udp;
-extern struct ip_nat_protocol ip_nat_protocol_icmp;
-extern struct ip_nat_protocol ip_nat_unknown_protocol;
-
-extern int init_protocols(void) __init;
-extern void cleanup_protocols(void);
-extern struct ip_nat_protocol *find_nat_proto(u_int16_t protonum);
-
-extern int ip_nat_port_range_to_nfattr(struct sk_buff *skb,
-                                      const struct ip_nat_range *range);
-extern int ip_nat_port_nfattr_to_range(struct nfattr *tb[],
-                                      struct ip_nat_range *range);
-
-#endif /*_IP_NAT_PROTO_H*/
diff --git a/include/linux/netfilter_ipv4/ip_nat_rule.h b/include/linux/netfilter_ipv4/ip_nat_rule.h
deleted file mode 100644 (file)
index 73b9552..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-#ifndef _IP_NAT_RULE_H
-#define _IP_NAT_RULE_H
-#include <linux/netfilter_ipv4/ip_conntrack.h>
-#include <linux/netfilter_ipv4/ip_tables.h>
-#include <linux/netfilter_ipv4/ip_nat.h>
-
-#ifdef __KERNEL__
-
-extern int ip_nat_rule_init(void) __init;
-extern void ip_nat_rule_cleanup(void);
-extern int ip_nat_rule_find(struct sk_buff **pskb,
-                           unsigned int hooknum,
-                           const struct net_device *in,
-                           const struct net_device *out,
-                           struct ip_conntrack *ct,
-                           struct ip_nat_info *info);
-
-extern unsigned int
-alloc_null_binding(struct ip_conntrack *conntrack,
-                  struct ip_nat_info *info,
-                  unsigned int hooknum);
-
-extern unsigned int
-alloc_null_binding_confirmed(struct ip_conntrack *conntrack,
-                            struct ip_nat_info *info,
-                            unsigned int hooknum);
-#endif
-#endif /* _IP_NAT_RULE_H */
index cc4c0b2..be6e682 100644 (file)
@@ -13,7 +13,7 @@ struct ipt_same_info
        u_int32_t *iparray;
 
        /* hangs off end. */
-       struct ip_nat_range range[IPT_SAME_MAX_RANGE];
+       struct nf_nat_range range[IPT_SAME_MAX_RANGE];
 };
 
 #endif /*_IPT_SAME_H*/
index 2a20f48..f41688f 100644 (file)
@@ -138,6 +138,11 @@ struct nlattr
 #include <linux/capability.h>
 #include <linux/skbuff.h>
 
+static inline struct nlmsghdr *nlmsg_hdr(const struct sk_buff *skb)
+{
+       return (struct nlmsghdr *)skb->data;
+}
+
 struct netlink_skb_parms
 {
        struct ucred            creds;          /* Skb credentials      */
@@ -152,7 +157,10 @@ struct netlink_skb_parms
 #define NETLINK_CREDS(skb)     (&NETLINK_CB((skb)).creds)
 
 
-extern struct sock *netlink_kernel_create(int unit, unsigned int groups, void (*input)(struct sock *sk, int len), struct module *module);
+extern struct sock *netlink_kernel_create(int unit, unsigned int groups,
+                                         void (*input)(struct sock *sk, int len),
+                                         struct mutex *cb_mutex,
+                                         struct module *module);
 extern void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err);
 extern int netlink_has_listeners(struct sock *sk, unsigned int group);
 extern int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 pid, int nonblock);
@@ -171,9 +179,16 @@ int netlink_sendskb(struct sock *sk, struct sk_buff *skb, int protocol);
 
 /*
  *     skb should fit one page. This choice is good for headerless malloc.
+ *     But we should limit to 8K so that userspace does not have to
+ *     use enormous buffer sizes on recvmsg() calls just to avoid
+ *     MSG_TRUNC when PAGE_SIZE is very large.
  */
-#define NLMSG_GOODORDER 0
-#define NLMSG_GOODSIZE (SKB_MAX_ORDER(0, NLMSG_GOODORDER))
+#if PAGE_SIZE < 8192UL
+#define NLMSG_GOODSIZE SKB_WITH_OVERHEAD(PAGE_SIZE)
+#else
+#define NLMSG_GOODSIZE SKB_WITH_OVERHEAD(8192UL)
+#endif
+
 #define NLMSG_DEFAULT_SIZE (NLMSG_GOODSIZE - NLMSG_HDRLEN)
 
 
@@ -217,18 +232,6 @@ __nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq, int type, int len, int flags)
 #define NLMSG_PUT(skb, pid, seq, type, len) \
        NLMSG_NEW(skb, pid, seq, type, len, 0)
 
-#define NLMSG_NEW_ANSWER(skb, cb, type, len, flags) \
-       NLMSG_NEW(skb, NETLINK_CB((cb)->skb).pid, \
-                 (cb)->nlh->nlmsg_seq, type, len, flags)
-
-#define NLMSG_END(skb, nlh) \
-({     (nlh)->nlmsg_len = (skb)->tail - (unsigned char *) (nlh); \
-       (skb)->len; })
-
-#define NLMSG_CANCEL(skb, nlh) \
-({     skb_trim(skb, (unsigned char *) (nlh) - (skb)->data); \
-       -1; })
-
 extern int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
                              struct nlmsghdr *nlh,
                              int (*dump)(struct sk_buff *skb, struct netlink_callback*),
index d111be6..16b0266 100644 (file)
@@ -49,8 +49,6 @@ struct nfs_page {
 };
 
 #define NFS_WBACK_BUSY(req)    (test_bit(PG_BUSY,&(req)->wb_flags))
-#define NFS_NEED_COMMIT(req)   (test_bit(PG_NEED_COMMIT,&(req)->wb_flags))
-#define NFS_NEED_RESCHED(req)  (test_bit(PG_NEED_RESCHED,&(req)->wb_flags))
 
 extern struct nfs_page *nfs_create_request(struct nfs_open_context *ctx,
                                            struct inode *inode,
@@ -121,34 +119,6 @@ nfs_list_remove_request(struct nfs_page *req)
        req->wb_list_head = NULL;
 }
 
-static inline int
-nfs_defer_commit(struct nfs_page *req)
-{
-       return !test_and_set_bit(PG_NEED_COMMIT, &req->wb_flags);
-}
-
-static inline void
-nfs_clear_commit(struct nfs_page *req)
-{
-       smp_mb__before_clear_bit();
-       clear_bit(PG_NEED_COMMIT, &req->wb_flags);
-       smp_mb__after_clear_bit();
-}
-
-static inline int
-nfs_defer_reschedule(struct nfs_page *req)
-{
-       return !test_and_set_bit(PG_NEED_RESCHED, &req->wb_flags);
-}
-
-static inline void
-nfs_clear_reschedule(struct nfs_page *req)
-{
-       smp_mb__before_clear_bit();
-       clear_bit(PG_NEED_RESCHED, &req->wb_flags);
-       smp_mb__after_clear_bit();
-}
-
 static inline struct nfs_page *
 nfs_list_entry(struct list_head *head)
 {
diff --git a/include/linux/nl80211.h b/include/linux/nl80211.h
new file mode 100644 (file)
index 0000000..9a30ba2
--- /dev/null
@@ -0,0 +1,38 @@
+#ifndef __LINUX_NL80211_H
+#define __LINUX_NL80211_H
+/*
+ * 802.11 netlink interface public header
+ *
+ * Copyright 2006, 2007 Johannes Berg <johannes@sipsolutions.net>
+ */
+
+/**
+ * enum nl80211_iftype - (virtual) interface types
+ * @NL80211_IFTYPE_UNSPECIFIED: unspecified type, driver decides
+ * @NL80211_IFTYPE_ADHOC: independent BSS member
+ * @NL80211_IFTYPE_STATION: managed BSS member
+ * @NL80211_IFTYPE_AP: access point
+ * @NL80211_IFTYPE_AP_VLAN: VLAN interface for access points
+ * @NL80211_IFTYPE_WDS: wireless distribution interface
+ * @NL80211_IFTYPE_MONITOR: monitor interface receiving all frames
+ * @__NL80211_IFTYPE_AFTER_LAST: internal use
+ *
+ * These values are used with the NL80211_ATTR_IFTYPE
+ * to set the type of an interface.
+ *
+ */
+enum nl80211_iftype {
+       NL80211_IFTYPE_UNSPECIFIED,
+       NL80211_IFTYPE_ADHOC,
+       NL80211_IFTYPE_STATION,
+       NL80211_IFTYPE_AP,
+       NL80211_IFTYPE_AP_VLAN,
+       NL80211_IFTYPE_WDS,
+       NL80211_IFTYPE_MONITOR,
+
+       /* keep last */
+       __NL80211_IFTYPE_AFTER_LAST
+};
+#define NL80211_IFTYPE_MAX (__NL80211_IFTYPE_AFTER_LAST - 1)
+
+#endif /* __LINUX_NL80211_H */
index 9cd0d0e..9632659 100644 (file)
 static inline void SetPageUptodate(struct page *page)
 {
        if (!test_and_set_bit(PG_uptodate, &page->flags))
-               page_test_and_clear_dirty(page);
+               page_clear_dirty(page);
 }
 #else
 #define SetPageUptodate(page)  set_bit(PG_uptodate, &(page)->flags)
index 4a629ea..1fae30a 100644 (file)
@@ -574,13 +574,6 @@ extern int rtattr_parse(struct rtattr *tb[], int maxattr, struct rtattr *rta, in
 #define rtattr_parse_nested(tb, max, rta) \
        rtattr_parse((tb), (max), RTA_DATA((rta)), RTA_PAYLOAD((rta)))
 
-struct rtnetlink_link
-{
-       int (*doit)(struct sk_buff *, struct nlmsghdr*, void *attr);
-       int (*dumpit)(struct sk_buff *, struct netlink_callback *cb);
-};
-
-extern struct rtnetlink_link * rtnetlink_links[NPROTO];
 extern int rtnetlink_send(struct sk_buff *skb, u32 pid, u32 group, int echo);
 extern int rtnl_unicast(struct sk_buff *skb, u32 pid);
 extern int rtnl_notify(struct sk_buff *skb, u32 pid, u32 group,
@@ -605,7 +598,7 @@ extern void __rta_fill(struct sk_buff *skb, int attrtype, int attrlen, const voi
 
 #define RTA_PUT_NOHDR(skb, attrlen, data) \
 ({     RTA_APPEND(skb, RTA_ALIGN(attrlen), data); \
-       memset(skb->tail - (RTA_ALIGN(attrlen) - attrlen), 0, \
+       memset(skb_tail_pointer(skb) - (RTA_ALIGN(attrlen) - attrlen), 0, \
               RTA_ALIGN(attrlen) - attrlen); })
 
 #define RTA_PUT_U8(skb, attrtype, value) \
@@ -637,12 +630,12 @@ extern void __rta_fill(struct sk_buff *skb, int attrtype, int attrlen, const voi
        RTA_PUT(skb, attrtype, 0, NULL);
 
 #define RTA_NEST(skb, type) \
-({     struct rtattr *__start = (struct rtattr *) (skb)->tail; \
+({     struct rtattr *__start = (struct rtattr *)skb_tail_pointer(skb); \
        RTA_PUT(skb, type, 0, NULL); \
        __start;  })
 
 #define RTA_NEST_END(skb, start) \
-({     (start)->rta_len = ((skb)->tail - (unsigned char *) (start)); \
+({     (start)->rta_len = skb_tail_pointer(skb) - (unsigned char *)(start); \
        (skb)->len; })
 
 #define RTA_NEST_CANCEL(skb, start) \
diff --git a/include/linux/rxrpc.h b/include/linux/rxrpc.h
new file mode 100644 (file)
index 0000000..f7b826b
--- /dev/null
@@ -0,0 +1,62 @@
+/* AF_RXRPC parameters
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_RXRPC_H
+#define _LINUX_RXRPC_H
+
+#include <linux/in.h>
+#include <linux/in6.h>
+
+/*
+ * RxRPC socket address
+ */
+struct sockaddr_rxrpc {
+       sa_family_t     srx_family;     /* address family */
+       u16             srx_service;    /* service desired */
+       u16             transport_type; /* type of transport socket (SOCK_DGRAM) */
+       u16             transport_len;  /* length of transport address */
+       union {
+               sa_family_t family;             /* transport address family */
+               struct sockaddr_in sin;         /* IPv4 transport address */
+               struct sockaddr_in6 sin6;       /* IPv6 transport address */
+       } transport;
+};
+
+/*
+ * RxRPC socket options
+ */
+#define RXRPC_SECURITY_KEY             1       /* [clnt] set client security key */
+#define RXRPC_SECURITY_KEYRING         2       /* [srvr] set ring of server security keys */
+#define RXRPC_EXCLUSIVE_CONNECTION     3       /* [clnt] use exclusive RxRPC connection */
+#define RXRPC_MIN_SECURITY_LEVEL       4       /* minimum security level */
+
+/*
+ * RxRPC control messages
+ * - terminal messages mean that a user call ID tag can be recycled
+ */
+#define RXRPC_USER_CALL_ID     1       /* user call ID specifier */
+#define RXRPC_ABORT            2       /* abort request / notification [terminal] */
+#define RXRPC_ACK              3       /* [Server] RPC op final ACK received [terminal] */
+#define RXRPC_NET_ERROR                5       /* network error received [terminal] */
+#define RXRPC_BUSY             6       /* server busy received [terminal] */
+#define RXRPC_LOCAL_ERROR      7       /* local error generated [terminal] */
+#define RXRPC_NEW_CALL         8       /* [Server] new incoming call notification */
+#define RXRPC_ACCEPT           9       /* [Server] accept request */
+
+/*
+ * RxRPC security levels
+ */
+#define RXRPC_SECURITY_PLAIN   0       /* plain secure-checksummed packets only */
+#define RXRPC_SECURITY_AUTH    1       /* authenticated packets */
+#define RXRPC_SECURITY_ENCRYPT 2       /* encrypted packets */
+
+
+#endif /* _LINUX_RXRPC_H */
index d4f8656..d70df61 100644 (file)
@@ -63,6 +63,15 @@ typedef struct sctphdr {
        __be32 checksum;
 } __attribute__((packed)) sctp_sctphdr_t;
 
+#ifdef __KERNEL__
+#include <linux/skbuff.h>
+
+static inline struct sctphdr *sctp_hdr(const struct sk_buff *skb)
+{
+       return (struct sctphdr *)skb_transport_header(skb);
+}
+#endif
+
 /* Section 3.2.  Chunk Field Descriptions. */
 typedef struct sctp_chunkhdr {
        __u8 type;
diff --git a/include/linux/sdla_fr.h b/include/linux/sdla_fr.h
deleted file mode 100644 (file)
index cdfa77f..0000000
+++ /dev/null
@@ -1,638 +0,0 @@
-/*****************************************************************************
-* sdla_fr.h    Sangoma frame relay firmware API definitions.
-*
-* Author:       Gideon Hack    
-*              Nenad Corbic <ncorbic@sangoma.com>      
-*
-* Copyright:   (c) 1995-2000 Sangoma Technologies Inc.
-*
-*              This program is free software; you can redistribute it and/or
-*              modify it under the terms of the GNU General Public License
-*              as published by the Free Software Foundation; either version
-*              2 of the License, or (at your option) any later version.
-* ============================================================================
-* Oct 04, 1999  Gideon Hack     Updated API structures
-* Jun 02, 1999  Gideon Hack    Modifications for S514 support
-* Oct 12, 1997 Jaspreet Singh  Added FR_READ_DLCI_IB_MAPPING
-* Jul 21, 1997         Jaspreet Singh  Changed FRRES_TOO_LONG and FRRES_TOO_MANY to 
-*                              0x05 and 0x06 respectively.
-* Dec 23, 1996 Gene Kozin      v2.0
-* Apr 29, 1996 Gene Kozin      v1.0 (merged version S502 & S508 definitions).
-* Sep 26, 1995 Gene Kozin      Initial version.
-*****************************************************************************/
-#ifndef        _SDLA_FR_H
-#define        _SDLA_FR_H
-
-/*----------------------------------------------------------------------------
- * Notes:
- * ------
- * 1. All structures defined in this file are byte-alined.  
- *
- *     Compiler        Platform
- *     --------        --------
- *     GNU C           Linux
- */
-
-#ifndef        PACKED
-#    define    PACKED  __attribute__((packed))
-#endif /* PACKED */
-
-/* Adapter memory layout */
-#define        FR_MB_VECTOR    0xE000  /* mailbox window vector */
-#define        FR502_RX_VECTOR 0xA000  /* S502 direct receive window vector */
-#define        FR502_MBOX_OFFS 0xF60   /* S502 mailbox offset */
-#define        FR508_MBOX_OFFS 0       /* S508 mailbox offset */
-#define        FR502_FLAG_OFFS 0x1FF0  /* S502 status flags offset */
-#define        FR508_FLAG_OFFS 0x1000  /* S508 status flags offset */
-#define        FR502_RXMB_OFFS 0x900   /* S502 direct receive mailbox offset */
-#define        FR508_TXBC_OFFS 0x1100  /* S508 Tx buffer info offset */
-#define        FR508_RXBC_OFFS 0x1120  /* S508 Rx buffer info offset */
-
-/* Important constants */
-#define FR502_MAX_DATA 4096    /* maximum data buffer length */
-#define FR508_MAX_DATA 4080    /* maximum data buffer length */
-#define MIN_LGTH_FR_DATA_CFG         300     /* min Information frame length
-(for configuration purposes) */
-#define FR_MAX_NO_DATA_BYTES_IN_FRAME  15354   /* max Information frame length */
-#define HIGHEST_VALID_DLCI     991
-
-/****** Data Structures *****************************************************/
-
-/*----------------------------------------------------------------------------
- * Frame relay command block.
- */
-typedef struct fr_cmd
-{
-       unsigned char  command  PACKED; /* command code */
-       unsigned short length   PACKED; /* length of data buffer */
-       unsigned char  result   PACKED; /* return code */
-       unsigned short dlci     PACKED; /* DLCI number */
-       unsigned char  attr     PACKED; /* FECN, BECN, DE and C/R bits */
-       unsigned short rxlost1  PACKED; /* frames discarded at int. level */
-       unsigned long  rxlost2  PACKED; /* frames discarded at app. level */
-       unsigned char  rsrv[2]  PACKED; /* reserved for future use */
-} fr_cmd_t;
-
-/* 'command' field defines */
-#define        FR_WRITE                0x01
-#define        FR_READ                 0x02
-#define        FR_ISSUE_IS_FRAME       0x03
-#define FR_SET_CONFIG          0x10
-#define FR_READ_CONFIG         0x11
-#define FR_COMM_DISABLE                0x12
-#define FR_COMM_ENABLE         0x13
-#define FR_READ_STATUS         0x14
-#define FR_READ_STATISTICS     0x15
-#define FR_FLUSH_STATISTICS    0x16
-#define        FR_LIST_ACTIVE_DLCI     0x17
-#define FR_FLUSH_DATA_BUFFERS  0x18
-#define FR_READ_ADD_DLC_STATS  0x19
-#define        FR_ADD_DLCI             0x20
-#define        FR_DELETE_DLCI          0x21
-#define        FR_ACTIVATE_DLCI        0x22
-#define        FR_DEACTIVATE_DLCI      0x22
-#define FR_READ_MODEM_STATUS   0x30
-#define FR_SET_MODEM_STATUS    0x31
-#define FR_READ_ERROR_STATS    0x32
-#define FR_FLUSH_ERROR_STATS   0x33
-#define FR_READ_DLCI_IB_MAPPING 0x34
-#define FR_READ_CODE_VERSION   0x40
-#define        FR_SET_INTR_MODE        0x50
-#define        FR_READ_INTR_MODE       0x51
-#define FR_SET_TRACE_CONFIG    0x60
-#define FR_FT1_STATUS_CTRL     0x80
-#define FR_SET_FT1_MODE                0x81
-
-/* Special UDP drivers management commands */
-#define FPIPE_ENABLE_TRACING           0x41
-#define FPIPE_DISABLE_TRACING          0x42
-#define FPIPE_GET_TRACE_INFO            0x43
-#define FPIPE_FT1_READ_STATUS           0x44
-#define FPIPE_DRIVER_STAT_IFSEND        0x45
-#define FPIPE_DRIVER_STAT_INTR          0x46
-#define FPIPE_DRIVER_STAT_GEN           0x47
-#define FPIPE_FLUSH_DRIVER_STATS        0x48
-#define FPIPE_ROUTER_UP_TIME            0x49
-
-/* 'result' field defines */
-#define FRRES_OK               0x00    /* command executed successfully */
-#define        FRRES_DISABLED          0x01    /* communications not enabled */
-#define        FRRES_INOPERATIVE       0x02    /* channel inoperative */
-#define        FRRES_DLCI_INACTIVE     0x03    /* DLCI is inactive */
-#define        FRRES_DLCI_INVALID      0x04    /* DLCI is not configured */
-#define        FRRES_TOO_LONG          0x05
-#define        FRRES_TOO_MANY          0x06
-#define        FRRES_CIR_OVERFLOW      0x07    /* Tx throughput has exceeded CIR */
-#define        FRRES_BUFFER_OVERFLOW   0x08
-#define        FRRES_MODEM_FAILURE     0x10    /* DCD and/or CTS dropped */
-#define        FRRES_CHANNEL_DOWN      0x11    /* channel became inoperative */
-#define        FRRES_CHANNEL_UP        0x12    /* channel became operative */
-#define        FRRES_DLCI_CHANGE       0x13    /* DLCI status (or number) changed */
-#define        FRRES_DLCI_MISMATCH     0x14
-#define        FRRES_INVALID_CMD       0x1F    /* invalid command */
-
-/* 'attr' field defines */
-#define        FRATTR_
-
-/*----------------------------------------------------------------------------
- * Frame relay mailbox.
- *     This structure is located at offset FR50?_MBOX_OFFS into FR_MB_VECTOR.
- *     For S502 it is also located at offset FR502_RXMB_OFFS into
- *     FR502_RX_VECTOR.
- */
-typedef struct fr_mbox
-{
-       unsigned char opflag    PACKED; /* 00h: execution flag */
-       fr_cmd_t cmd            PACKED; /* 01h: command block */
-       unsigned char data[1]   PACKED; /* 10h: variable length data buffer */
-} fr_mbox_t;
-
-/*----------------------------------------------------------------------------
- * S502 frame relay status flags.
- *     This structure is located at offset FR502_FLAG_OFFS into FR_MB_VECTOR.
- */
-typedef struct fr502_flags
-{      
-       unsigned char rsrv1[1]  PACKED; /* 00h: */
-       unsigned char tx_ready  PACKED; /* 01h: Tx buffer available */
-       unsigned char rx_ready  PACKED; /* 02h: Rx frame available */
-       unsigned char event     PACKED; /* 03h: asynchronous event */
-       unsigned char mstatus   PACKED; /* 04h: modem status */
-       unsigned char rsrv2[8]  PACKED; /* 05h: */
-       unsigned char iflag     PACKED; /* 0Dh: interrupt flag */
-       unsigned char imask     PACKED; /* 0Eh: interrupt mask */
-} fr502_flags_t;
-
-/*----------------------------------------------------------------------------
- * S508 frame relay status flags.
- *     This structure is located at offset FR508_FLAG_OFFS into FR_MB_VECTOR.
- */
-typedef struct fr508_flags
-{
-       unsigned char rsrv1[3]  PACKED; /* 00h: reserved */
-       unsigned char event     PACKED; /* 03h: asynchronous event */
-       unsigned char mstatus   PACKED; /* 04h: modem status */
-       unsigned char rsrv2[11] PACKED; /* 05h: reserved */
-       unsigned char iflag     PACKED; /* 10h: interrupt flag */
-       unsigned char imask     PACKED; /* 11h: interrupt mask */
-       unsigned long tse_offs  PACKED; /* 12h: Tx status element */
-       unsigned short dlci     PACKED; /* 16h: DLCI NUMBER */
-} fr508_flags_t;
-
-/* 'event' field defines */
-#define        FR_EVENT_STATUS         0x01    /* channel status change */
-#define        FR_EVENT_DLC_STATUS     0x02    /* DLC status change */
-#define        FR_EVENT_BAD_DLCI       0x04    /* FSR included wrong DLCI */
-#define        FR_EVENT_LINK_DOWN      0x40    /* DCD or CTS low */
-
-/* 'mstatus' field defines */
-#define        FR_MDM_DCD              0x08    /* mdm_status: DCD */
-#define        FR_MDM_CTS              0x20    /* mdm_status: CTS */
-
-/* 'iflag' & 'imask' fields defines */
-#define        FR_INTR_RXRDY           0x01    /* Rx ready */
-#define        FR_INTR_TXRDY           0x02    /* Tx ready */
-#define        FR_INTR_MODEM           0x04    /* modem status change (DCD, CTS) */
-#define        FR_INTR_READY           0x08    /* interface command completed */
-#define        FR_INTR_DLC             0x10    /* DLC status change */
-#define        FR_INTR_TIMER           0x20    /* millisecond timer */
-#define FR_INTR_TX_MULT_DLCIs  0x80    /* Tx interrupt on multiple DLCIs */
-
-
-/*----------------------------------------------------------------------------
- * Receive Buffer Configuration Info. S508 only!
- *     This structure is located at offset FR508_RXBC_OFFS into FR_MB_VECTOR.
- */
-typedef struct fr_buf_info
-{
-       unsigned short rse_num  PACKED; /* 00h: number of status elements */
-       unsigned long rse_base  PACKED; /* 02h: receive status array base */
-       unsigned long rse_next  PACKED; /* 06h: next status element */
-       unsigned long buf_base  PACKED; /* 0Ah: rotational buffer base */
-       unsigned short reserved PACKED; /* 0Eh:  */
-       unsigned long buf_top   PACKED; /* 10h: rotational buffer top */
-} fr_buf_info_t;
-
-/*----------------------------------------------------------------------------
- * Buffer Status Element. S508 only!
- *     Array of structures of this type is located at offset defined by the
- *     'rse_base' field of the frBufInfo_t structure into absolute adapter
- *     memory address space.
- */
-typedef struct fr_rx_buf_ctl
-{
-       unsigned char flag      PACKED; /* 00h: ready flag */
-       unsigned short length   PACKED; /* 01h: frame length */
-       unsigned short dlci     PACKED; /* 03h: DLCI */
-       unsigned char attr      PACKED; /* 05h: FECN/BECN/DE/CR */
-       unsigned short tmstamp  PACKED; /* 06h: time stamp */
-       unsigned short rsrv[2]  PACKED; /* 08h:  */
-       unsigned long offset    PACKED; /* 0Ch: buffer absolute address */
-} fr_rx_buf_ctl_t;
-
-typedef struct  fr_tx_buf_ctl
-{
-        unsigned char flag      PACKED; /* 00h: ready flag */
-       unsigned short rsrv0[2] PACKED; /* 01h: */
-        unsigned short length   PACKED; /* 05h: frame length */
-        unsigned short dlci     PACKED; /* 07h: DLCI */
-        unsigned char attr      PACKED; /* 09h: FECN/BECN/DE/CR */
-        unsigned short rsrv1   PACKED; /* 0Ah:  */
-        unsigned long offset    PACKED; /* 0Ch: buffer absolute address */
-} fr_tx_buf_ctl_t;
-
-/*----------------------------------------------------------------------------
- * Global Configuration Block. Passed to FR_SET_CONFIG command when dlci == 0.
- */
-typedef struct fr_conf
-{
-       unsigned short station  PACKED; /* 00h: CPE/Node */
-       unsigned short options  PACKED; /* 02h: configuration options */
-       unsigned short kbps     PACKED; /* 04h: baud rate in kbps */
-       unsigned short port     PACKED; /* 06h: RS-232/V.35 */
-       unsigned short mtu      PACKED; /* 08h: max. transmit length */
-       unsigned short t391     PACKED; /* 0Ah:  */
-       unsigned short t392     PACKED; /* 0Ch:  */
-       unsigned short n391     PACKED; /* 0Eh:  */
-       unsigned short n392     PACKED; /* 10h:  */
-       unsigned short n393     PACKED; /* 12h:  */
-       unsigned short cir_fwd  PACKED; /* 14h:  */
-       unsigned short bc_fwd   PACKED; /* 16h:  */
-       unsigned short be_fwd   PACKED; /* 18h:  */
-       unsigned short cir_bwd  PACKED; /* 1Ah:  */
-       unsigned short bc_bwd   PACKED; /* 1Ch:  */
-       unsigned short be_bwd   PACKED; /* 1Eh:  */
-       unsigned short dlci[0]  PACKED; /* 20h:  */
-} fr_conf_t;
-
-/* 'station_type' defines */
-#define        FRCFG_STATION_CPE       0
-#define        FRCFG_STATION_NODE      1
-
-/* 'conf_flags' defines */
-#define        FRCFG_IGNORE_TX_CIR     0x0001
-#define        FRCFG_IGNORE_RX_CIR     0x0002
-#define        FRCFG_DONT_RETRANSMIT   0x0004
-#define        FRCFG_IGNORE_CBS        0x0008
-#define        FRCFG_THROUGHPUT        0x0010  /* enable throughput calculation */
-#define        FRCFG_DIRECT_RX         0x0080  /* enable direct receive buffer */
-#define        FRCFG_AUTO_CONFIG       0x8000  /* enable  auto DLCI configuration */
-
-/* 'baud_rate' defines */
-#define        FRCFG_BAUD_1200         12
-#define        FRCFG_BAUD_2400         24
-#define        FRCFG_BAUD_4800         48
-#define        FRCFG_BAUD_9600         96
-#define        FRCFG_BAUD_19200        19
-#define        FRCFG_BAUD_38400        38
-#define        FRCFG_BAUD_56000        56
-#define        FRCFG_BAUD_64000        64
-#define        FRCFG_BAUD_128000       128
-
-/* 'port_mode' defines */
-#define        FRCFG_MODE_EXT_CLK      0x0000
-#define        FRCFG_MODE_INT_CLK      0x0001
-#define        FRCFG_MODE_V35          0x0000  /* S508 only */
-#define        FRCFG_MODE_RS232        0x0002  /* S508 only */
-
-/* defines for line tracing */
-
-/* the line trace status element presented by the frame relay code */
-typedef struct {
-        unsigned char flag      PACKED; /* ready flag */
-        unsigned short length   PACKED; /* trace length */
-        unsigned char rsrv0[2]  PACKED; /* reserved */
-        unsigned char attr      PACKED; /* trace attributes */
-        unsigned short tmstamp  PACKED; /* time stamp */
-        unsigned char rsrv1[4]  PACKED; /* reserved */
-        unsigned long offset    PACKED; /* buffer absolute address */
-} fr_trc_el_t;
-
-typedef struct {
-        unsigned char status           PACKED; /* status flag */
-       unsigned char data_passed       PACKED; /* 0 if no data passed, 1 if */
-                                               /* data passed */
-        unsigned short length          PACKED; /* frame length */
-        unsigned short tmstamp         PACKED; /* time stamp */
-} fpipemon_trc_hdr_t;
-
-typedef struct {
-       fpipemon_trc_hdr_t fpipemon_trc_hdr                     PACKED;
-        unsigned char data[FR_MAX_NO_DATA_BYTES_IN_FRAME]      PACKED;
-} fpipemon_trc_t;
-
-/* bit settings for the 'status' byte  - note that bits 1, 2 and 3 are used */
-/* for returning the number of frames being passed to fpipemon */
-#define TRC_OUTGOING_FRM       0x01
-#define TRC_ABORT_ERROR         0x10
-#define TRC_CRC_ERROR           0x20
-#define TRC_OVERRUN_ERROR       0x40
-#define MORE_TRC_DATA          0x80
-
-#define MAX_FRMS_TRACED                0x07
-
-#define NO_TRC_ELEMENTS_OFF            0x9000
-#define BASE_TRC_ELEMENTS_OFF          0x9002
-#define TRC_ACTIVE                     0x01
-#define FLUSH_TRC_BUFFERS              0x02
-#define FLUSH_TRC_STATISTICS           0x04
-#define TRC_SIGNALLING_FRMS            0x10
-#define TRC_INFO_FRMS                  0x20
-#define ACTIVATE_TRC   (TRC_ACTIVE | TRC_SIGNALLING_FRMS | TRC_INFO_FRMS)
-#define RESET_TRC      (FLUSH_TRC_BUFFERS | FLUSH_TRC_STATISTICS)
-
-/*----------------------------------------------------------------------------
- * Channel configuration.
- *     This structure is passed to the FR_SET_CONFIG command when dlci != 0.
- */
-typedef struct fr_dlc_conf
-{
-       unsigned short conf_flags       PACKED; /* 00h: configuration bits */
-       unsigned short cir_fwd          PACKED; /* 02h:  */
-       unsigned short bc_fwd           PACKED; /* 04h:  */
-       unsigned short be_fwd           PACKED; /* 06h:  */
-       unsigned short cir_bwd          PACKED; /* 08h:  */
-       unsigned short bc_bwd           PACKED; /* 0Ah:  */
-       unsigned short be_bwd           PACKED; /* 0Ch:  */
-} fr_dlc_conf_t;
-
-/*----------------------------------------------------------------------------
- * S502 interrupt mode control block.
- *     This structure is passed to the FR_SET_INTR_FLAGS and returned by the
- *     FR_READ_INTR_FLAGS commands.
- */
-typedef struct fr502_intr_ctl
-{
-       unsigned char mode      PACKED; /* 00h: interrupt enable flags */
-       unsigned short tx_len   PACKED; /* 01h: required Tx buffer size */
-} fr502_intr_ctl_t;
-
-/*----------------------------------------------------------------------------
- * S508 interrupt mode control block.
- *     This structure is passed to the FR_SET_INTR_FLAGS and returned by the
- *     FR_READ_INTR_FLAGS commands.
- */
-typedef struct fr508_intr_ctl
-{
-       unsigned char mode      PACKED; /* 00h: interrupt enable flags */
-       unsigned short tx_len   PACKED; /* 01h: required Tx buffer size */
-       unsigned char irq       PACKED; /* 03h: IRQ level to activate */
-       unsigned char flags     PACKED; /* 04h: ?? */
-       unsigned short timeout  PACKED; /* 05h: ms, for timer interrupt */
-} fr508_intr_ctl_t;
-
-/*----------------------------------------------------------------------------
- * Channel status.
- *     This structure is returned by the FR_READ_STATUS command.
- */
-typedef struct fr_dlc_Status
-{
-       unsigned char status            PACKED; /* 00h: link/DLCI status */
-       struct
-       {
-               unsigned short dlci     PACKED; /* 01h: DLCI number */
-               unsigned char status    PACKED; /* 03h: DLCI status */
-       } circuit[1]                    PACKED;
-} fr_dlc_status_t;
-
-/* 'status' defines */
-#define        FR_LINK_INOPER  0x00            /* for global status (DLCI == 0) */
-#define        FR_LINK_OPER    0x01
-#define        FR_DLCI_DELETED 0x01            /* for circuit status (DLCI != 0) */
-#define        FR_DLCI_ACTIVE  0x02
-#define        FR_DLCI_WAITING 0x04
-#define        FR_DLCI_NEW     0x08
-#define        FR_DLCI_REPORT  0x40
-
-/*----------------------------------------------------------------------------
- * Global Statistics Block.
- *     This structure is returned by the FR_READ_STATISTICS command when
- *     dcli == 0.
- */
-typedef struct fr_link_stat
-{
-       unsigned short rx_too_long      PACKED; /* 00h:  */
-       unsigned short rx_dropped       PACKED; /* 02h:  */
-       unsigned short rx_dropped2      PACKED; /* 04h:  */
-       unsigned short rx_bad_dlci      PACKED; /* 06h:  */
-       unsigned short rx_bad_format    PACKED; /* 08h:  */
-       unsigned short retransmitted    PACKED; /* 0Ah:  */
-       unsigned short cpe_tx_FSE       PACKED; /* 0Ch:  */
-       unsigned short cpe_tx_LIV       PACKED; /* 0Eh:  */
-       unsigned short cpe_rx_FSR       PACKED; /* 10h:  */
-       unsigned short cpe_rx_LIV       PACKED; /* 12h:  */
-       unsigned short node_rx_FSE      PACKED; /* 14h:  */
-       unsigned short node_rx_LIV      PACKED; /* 16h:  */
-       unsigned short node_tx_FSR      PACKED; /* 18h:  */
-       unsigned short node_tx_LIV      PACKED; /* 1Ah:  */
-       unsigned short rx_ISF_err       PACKED; /* 1Ch:  */
-       unsigned short rx_unsolicited   PACKED; /* 1Eh:  */
-       unsigned short rx_SSN_err       PACKED; /* 20h:  */
-       unsigned short rx_RSN_err       PACKED; /* 22h:  */
-       unsigned short T391_timeouts    PACKED; /* 24h:  */
-       unsigned short T392_timeouts    PACKED; /* 26h:  */
-       unsigned short N392_reached     PACKED; /* 28h:  */
-       unsigned short cpe_SSN_RSN      PACKED; /* 2Ah:  */
-       unsigned short current_SSN      PACKED; /* 2Ch:  */
-       unsigned short current_RSN      PACKED; /* 2Eh:  */
-       unsigned short curreny_T391     PACKED; /* 30h:  */
-       unsigned short current_T392     PACKED; /* 32h:  */
-       unsigned short current_N392     PACKED; /* 34h:  */
-       unsigned short current_N393     PACKED; /* 36h:  */
-} fr_link_stat_t;
-
-/*----------------------------------------------------------------------------
- * DLCI statistics.
- *     This structure is returned by the FR_READ_STATISTICS command when
- *     dlci != 0.
- */
-typedef struct fr_dlci_stat
-{
-       unsigned long tx_frames         PACKED; /* 00h:  */
-       unsigned long tx_bytes          PACKED; /* 04h:  */
-       unsigned long rx_frames         PACKED; /* 08h:  */
-       unsigned long rx_bytes          PACKED; /* 0Ch:  */
-       unsigned long rx_dropped        PACKED; /* 10h:  */
-       unsigned long rx_inactive       PACKED; /* 14h:  */
-       unsigned long rx_exceed_CIR     PACKED; /* 18h:  */
-       unsigned long rx_DE_set         PACKED; /* 1Ch:  */
-       unsigned long tx_throughput     PACKED; /* 20h:  */
-       unsigned long tx_calc_timer     PACKED; /* 24h:  */
-       unsigned long rx_throughput     PACKED; /* 28h:  */
-       unsigned long rx_calc_timer     PACKED; /* 2Ch:  */
-} fr_dlci_stat_t;
-
-/*----------------------------------------------------------------------------
- * Communications error statistics.
- *     This structure is returned by the FR_READ_ERROR_STATS command.
- */
-typedef struct fr_comm_stat
-{
-       unsigned char rx_overruns       PACKED; /* 00h:  */
-       unsigned char rx_bad_crc        PACKED; /* 01h:  */
-       unsigned char rx_aborts         PACKED; /* 02h:  */
-       unsigned char rx_too_long       PACKED; /* 03h:  */
-       unsigned char tx_aborts         PACKED; /* 04h:  */
-       unsigned char tx_underruns      PACKED; /* 05h:  */
-       unsigned char tx_missed_undr    PACKED; /* 06h:  */
-       unsigned char dcd_dropped       PACKED; /* 07h:  */
-       unsigned char cts_dropped       PACKED; /* 08h:  */
-} fr_comm_stat_t;
-
-/*----------------------------------------------------------------------------
- * Defines for the FR_ISSUE_IS_FRAME command.
- */
-#define        FR_ISF_LVE      2               /* issue Link Verification Enquiry */
-#define        FR_ISF_FSE      3               /* issue Full Status Enquiry */
-
-/*----------------------------------------------------------------------------
- * Frame Relay ARP Header -- Used for Dynamic route creation with InvARP 
- */
-
-typedef struct arphdr_fr
-       {
-       unsigned short ar_hrd PACKED;           /* format of hardware addr */
-       unsigned short ar_pro PACKED;           /* format of protocol addr */
-       unsigned char  ar_hln PACKED;           /* length of hardware addr */   
-       unsigned char  ar_pln PACKED;           /* length of protocol addr */
-       unsigned short ar_op  PACKED;           /* ARP opcode              */
-       unsigned short ar_sha PACKED;           /* Sender DLCI addr 2 bytes */
-       unsigned long  ar_sip PACKED;           /* Sender IP   addr 4 bytes */
-       unsigned short ar_tha PACKED;           /* Target DLCI addr 2 bytes */
-       unsigned long  ar_tip PACKED;           /* Target IP   addr 4 bytes */
-       } arphdr_fr_t;
-
-/*----------------------------------------------------------------------------
- * Frame Relay RFC 1490 SNAP Header -- Used to check for ARP packets
- */
-typedef struct arphdr_1490
-       {
-       unsigned char control PACKED;           /* UI, etc...  */
-       unsigned char pad     PACKED;           /* Pad */
-       unsigned char NLPID   PACKED;           /* SNAP */
-       unsigned char OUI[3]  PACKED;           /* Ethertype, etc... */
-       unsigned short PID    PACKED;           /* ARP, IP, etc... */
-       }  arphdr_1490_t;
-
-/* UDP/IP packet (for UDP management) layout */
-
-/* The embedded control block for UDP mgmt
-   This is essentially a mailbox structure, without the large data field */
-
-typedef struct {
-        unsigned char  opp_flag PACKED; /* the opp flag */
-        unsigned char  command  PACKED; /* command code */
-        unsigned short length   PACKED; /* length of data buffer */
-        unsigned char  result   PACKED; /* return code */
-        unsigned short dlci     PACKED; /* DLCI number */
-        unsigned char  attr     PACKED; /* FECN, BECN, DE and C/R bits */
-        unsigned short rxlost1  PACKED; /* frames discarded at int. level */
-        unsigned long  rxlost2  PACKED; /* frames discarded at app. level */
-        unsigned char  rsrv[2]  PACKED; /* reserved for future use */
-} cblock_t;
-
-
-/* UDP management packet layout (data area of ip packet) */
-
-typedef struct {
-        unsigned char   control                 PACKED;
-        unsigned char   NLPID                   PACKED;
-} fr_encap_hdr_t;
-
-typedef struct {
-//     fr_encap_hdr_t          fr_encap_hdr    PACKED;
-       ip_pkt_t                ip_pkt          PACKED;
-       udp_pkt_t               udp_pkt         PACKED;
-       wp_mgmt_t               wp_mgmt         PACKED;
-        cblock_t                cblock          PACKED;
-        unsigned char           data[4080]      PACKED;
-} fr_udp_pkt_t;
-
-
-/* valid ip_protocol for UDP management */
-#define UDPMGMT_UDP_PROTOCOL 0x11
-
-#define UDPMGMT_FPIPE_SIGNATURE         "FPIPE8ND"
-#define UDPMGMT_DRVRSTATS_SIGNATURE     "DRVSTATS"
-
-/* values for request/reply byte */
-#define UDPMGMT_REQUEST        0x01
-#define UDPMGMT_REPLY  0x02
-#define UDP_OFFSET     12
-
-typedef struct {
-        unsigned long if_send_entry;
-        unsigned long if_send_skb_null;
-        unsigned long if_send_broadcast;
-        unsigned long if_send_multicast;
-        unsigned long if_send_critical_ISR;
-        unsigned long if_send_critical_non_ISR;
-        unsigned long if_send_busy;
-        unsigned long if_send_busy_timeout;
-       unsigned long if_send_DRVSTATS_request;
-        unsigned long if_send_FPIPE_request;
-        unsigned long if_send_wan_disconnected;
-        unsigned long if_send_dlci_disconnected;
-        unsigned long if_send_no_bfrs;
-        unsigned long if_send_adptr_bfrs_full;
-        unsigned long if_send_bfrs_passed_to_adptr;
-       unsigned long if_send_consec_send_fail;
-} drvstats_if_send_t; 
-
-typedef struct {
-        unsigned long rx_intr_no_socket;
-        unsigned long rx_intr_dev_not_started;
-        unsigned long rx_intr_DRVSTATS_request;
-        unsigned long rx_intr_FPIPE_request;
-        unsigned long rx_intr_bfr_not_passed_to_stack;
-        unsigned long rx_intr_bfr_passed_to_stack;
- } drvstats_rx_intr_t;
-
-typedef struct {
-        unsigned long UDP_FPIPE_mgmt_kmalloc_err;
-        unsigned long UDP_FPIPE_mgmt_direction_err;
-        unsigned long UDP_FPIPE_mgmt_adptr_type_err;
-        unsigned long UDP_FPIPE_mgmt_adptr_cmnd_OK;
-        unsigned long UDP_FPIPE_mgmt_adptr_cmnd_timeout;
-        unsigned long UDP_FPIPE_mgmt_adptr_send_passed;
-        unsigned long UDP_FPIPE_mgmt_adptr_send_failed;
-        unsigned long UDP_FPIPE_mgmt_not_passed_to_stack;
-        unsigned long UDP_FPIPE_mgmt_passed_to_stack;
-        unsigned long UDP_FPIPE_mgmt_no_socket;
-        unsigned long UDP_DRVSTATS_mgmt_kmalloc_err;
-        unsigned long UDP_DRVSTATS_mgmt_adptr_cmnd_OK;
-        unsigned long UDP_DRVSTATS_mgmt_adptr_cmnd_timeout;
-        unsigned long UDP_DRVSTATS_mgmt_adptr_send_passed;
-        unsigned long UDP_DRVSTATS_mgmt_adptr_send_failed;
-        unsigned long UDP_DRVSTATS_mgmt_not_passed_to_stack;
-        unsigned long UDP_DRVSTATS_mgmt_passed_to_stack;
-        unsigned long UDP_DRVSTATS_mgmt_no_socket;
-} drvstats_gen_t;
-
-typedef struct {
-        unsigned char   attr           PACKED;
-        unsigned short  time_stamp      PACKED;
-        unsigned char   reserved[13]    PACKED;
-} api_rx_hdr_t;
-
-typedef struct {
-        api_rx_hdr_t    api_rx_hdr      PACKED;
-        void *          data            PACKED;
-} api_rx_element_t;
-
-typedef struct {
-        unsigned char   attr            PACKED;
-        unsigned char   reserved[15]    PACKED;
-} api_tx_hdr_t;
-
-typedef struct {
-        api_tx_hdr_t    api_tx_hdr      PACKED;
-        void *          data            PACKED;
-} api_tx_element_t;
-
-#ifdef         _MSC_
-#  pragma      pack()
-#endif
-#endif /* _SDLA_FR_H */
-
index 5992f65..2694cb3 100644 (file)
 #include <net/checksum.h>
 #include <linux/rcupdate.h>
 #include <linux/dmaengine.h>
+#include <linux/hrtimer.h>
 
 #define HAVE_ALLOC_SKB         /* For the drivers to know */
 #define HAVE_ALIGNABLE_SKB     /* Ditto 8)                */
 
+/* Don't change this without changing skb_csum_unnecessary! */
 #define CHECKSUM_NONE 0
-#define CHECKSUM_PARTIAL 1
-#define CHECKSUM_UNNECESSARY 2
-#define CHECKSUM_COMPLETE 3
+#define CHECKSUM_UNNECESSARY 1
+#define CHECKSUM_COMPLETE 2
+#define CHECKSUM_PARTIAL 3
 
 #define SKB_DATA_ALIGN(X)      (((X) + (SMP_CACHE_BYTES - 1)) & \
                                 ~(SMP_CACHE_BYTES - 1))
-#define SKB_MAX_ORDER(X, ORDER)        (((PAGE_SIZE << (ORDER)) - (X) - \
-                                 sizeof(struct skb_shared_info)) & \
-                                 ~(SMP_CACHE_BYTES - 1))
+#define SKB_WITH_OVERHEAD(X)   \
+       (((X) - sizeof(struct skb_shared_info)) & \
+        ~(SMP_CACHE_BYTES - 1))
+#define SKB_MAX_ORDER(X, ORDER) \
+       SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X))
 #define SKB_MAX_HEAD(X)                (SKB_MAX_ORDER((X), 0))
 #define SKB_MAX_ALLOC          (SKB_MAX_ORDER(0, 2))
 
@@ -66,8 +70,8 @@
  *     NONE: skb is checksummed by protocol or csum is not required.
  *
  *     PARTIAL: device is required to csum packet as seen by hard_start_xmit
- *     from skb->h.raw to the end and to record the checksum
- *     at skb->h.raw+skb->csum.
+ *     from skb->transport_header to the end and to record the checksum
+ *     at skb->transport_header + skb->csum.
  *
  *     Device must show its capabilities in dev->features, set
  *     at device setup time.
  */
 
 struct net_device;
+struct scatterlist;
 
-#ifdef CONFIG_NETFILTER
+#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 struct nf_conntrack {
        atomic_t use;
-       void (*destroy)(struct nf_conntrack *);
 };
+#endif
 
 #ifdef CONFIG_BRIDGE_NETFILTER
 struct nf_bridge_info {
@@ -103,8 +108,6 @@ struct nf_bridge_info {
 };
 #endif
 
-#endif
-
 struct sk_buff_head {
        /* These two members must be first. */
        struct sk_buff  *next;
@@ -156,11 +159,6 @@ struct skb_shared_info {
 #define SKB_DATAREF_SHIFT 16
 #define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1)
 
-struct skb_timeval {
-       u32     off_sec;
-       u32     off_usec;
-};
-
 
 enum {
        SKB_FCLONE_UNAVAILABLE,
@@ -181,6 +179,16 @@ enum {
        SKB_GSO_TCPV6 = 1 << 4,
 };
 
+#if BITS_PER_LONG > 32
+#define NET_SKBUFF_DATA_USES_OFFSET 1
+#endif
+
+#ifdef NET_SKBUFF_DATA_USES_OFFSET
+typedef unsigned int sk_buff_data_t;
+#else
+typedef unsigned char *sk_buff_data_t;
+#endif
+
 /** 
  *     struct sk_buff - socket buffer
  *     @next: Next buffer in list
@@ -190,15 +198,17 @@ enum {
  *     @dev: Device we arrived on/are leaving by
  *     @iif: ifindex of device we arrived on
  *     @h: Transport layer header
- *     @nh: Network layer header
- *     @mac: Link layer header
+ *     @network_header: Network layer header
+ *     @mac_header: Link layer header
  *     @dst: destination entry
  *     @sp: the security path, used for xfrm
  *     @cb: Control buffer. Free for use by every layer. Put private vars here
  *     @len: Length of actual data
  *     @data_len: Data length
  *     @mac_len: Length of link layer header
- *     @csum: Checksum
+ *     @csum: Checksum (must include start/offset pair)
+ *     @csum_start: Offset from skb->head where checksumming should start
+ *     @csum_offset: Offset from csum_start where checksum should be stored
  *     @local_df: allow local fragmentation
  *     @cloned: Head may be cloned (check refcnt to be sure)
  *     @nohdr: Payload reference only, must not modify header
@@ -233,32 +243,11 @@ struct sk_buff {
        struct sk_buff          *prev;
 
        struct sock             *sk;
-       struct skb_timeval      tstamp;
+       ktime_t                 tstamp;
        struct net_device       *dev;
        int                     iif;
        /* 4 byte hole on 64 bit*/
 
-       union {
-               struct tcphdr   *th;
-               struct udphdr   *uh;
-               struct icmphdr  *icmph;
-               struct igmphdr  *igmph;
-               struct iphdr    *ipiph;
-               struct ipv6hdr  *ipv6h;
-               unsigned char   *raw;
-       } h;
-
-       union {
-               struct iphdr    *iph;
-               struct ipv6hdr  *ipv6h;
-               struct arphdr   *arph;
-               unsigned char   *raw;
-       } nh;
-
-       union {
-               unsigned char   *raw;
-       } mac;
-
        struct  dst_entry       *dst;
        struct  sec_path        *sp;
 
@@ -275,7 +264,10 @@ struct sk_buff {
                                mac_len;
        union {
                __wsum          csum;
-               __u32           csum_offset;
+               struct {
+                       __u16   csum_start;
+                       __u16   csum_offset;
+               };
        };
        __u32                   priority;
        __u8                    local_df:1,
@@ -289,15 +281,13 @@ struct sk_buff {
        __be16                  protocol;
 
        void                    (*destructor)(struct sk_buff *skb);
-#ifdef CONFIG_NETFILTER
-       struct nf_conntrack     *nfct;
 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+       struct nf_conntrack     *nfct;
        struct sk_buff          *nfct_reasm;
 #endif
 #ifdef CONFIG_BRIDGE_NETFILTER
        struct nf_bridge_info   *nf_bridge;
 #endif
-#endif /* CONFIG_NETFILTER */
 #ifdef CONFIG_NET_SCHED
        __u16                   tc_index;       /* traffic control index */
 #ifdef CONFIG_NET_CLS_ACT
@@ -313,13 +303,16 @@ struct sk_buff {
 
        __u32                   mark;
 
+       sk_buff_data_t          transport_header;
+       sk_buff_data_t          network_header;
+       sk_buff_data_t          mac_header;
        /* These elements must be at the end, see alloc_skb() for details.  */
+       sk_buff_data_t          tail;
+       sk_buff_data_t          end;
+       unsigned char           *head,
+                               *data;
        unsigned int            truesize;
        atomic_t                users;
-       unsigned char           *head,
-                               *data,
-                               *tail,
-                               *end;
 };
 
 #ifdef __KERNEL__
@@ -361,6 +354,11 @@ extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
 extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
                                       int newheadroom, int newtailroom,
                                       gfp_t priority);
+extern int            skb_to_sgvec(struct sk_buff *skb,
+                                   struct scatterlist *sg, int offset,
+                                   int len);
+extern int            skb_cow_data(struct sk_buff *skb, int tailbits,
+                                   struct sk_buff **trailer);
 extern int            skb_pad(struct sk_buff *skb, int pad);
 #define dev_kfree_skb(a)       kfree_skb(a)
 extern void          skb_over_panic(struct sk_buff *skb, int len,
@@ -402,8 +400,20 @@ extern unsigned int   skb_find_text(struct sk_buff *skb, unsigned int from,
                                    unsigned int to, struct ts_config *config,
                                    struct ts_state *state);
 
+#ifdef NET_SKBUFF_DATA_USES_OFFSET
+static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
+{
+       return skb->head + skb->end;
+}
+#else
+static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
+{
+       return skb->end;
+}
+#endif
+
 /* Internal */
-#define skb_shinfo(SKB)                ((struct skb_shared_info *)((SKB)->end))
+#define skb_shinfo(SKB)        ((struct skb_shared_info *)(skb_end_pointer(SKB)))
 
 /**
  *     skb_queue_empty - check if a queue is empty
@@ -822,12 +832,46 @@ static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
 #define SKB_FRAG_ASSERT(skb)   BUG_ON(skb_shinfo(skb)->frag_list)
 #define SKB_LINEAR_ASSERT(skb)  BUG_ON(skb_is_nonlinear(skb))
 
+#ifdef NET_SKBUFF_DATA_USES_OFFSET
+static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
+{
+       return skb->head + skb->tail;
+}
+
+static inline void skb_reset_tail_pointer(struct sk_buff *skb)
+{
+       skb->tail = skb->data - skb->head;
+}
+
+static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
+{
+       skb_reset_tail_pointer(skb);
+       skb->tail += offset;
+}
+#else /* NET_SKBUFF_DATA_USES_OFFSET */
+static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
+{
+       return skb->tail;
+}
+
+static inline void skb_reset_tail_pointer(struct sk_buff *skb)
+{
+       skb->tail = skb->data;
+}
+
+static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
+{
+       skb->tail = skb->data + offset;
+}
+
+#endif /* NET_SKBUFF_DATA_USES_OFFSET */
+
 /*
  *     Add data to an sk_buff
  */
 static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
 {
-       unsigned char *tmp = skb->tail;
+       unsigned char *tmp = skb_tail_pointer(skb);
        SKB_LINEAR_ASSERT(skb);
        skb->tail += len;
        skb->len  += len;
@@ -845,11 +889,11 @@ static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
  */
 static inline unsigned char *skb_put(struct sk_buff *skb, unsigned int len)
 {
-       unsigned char *tmp = skb->tail;
+       unsigned char *tmp = skb_tail_pointer(skb);
        SKB_LINEAR_ASSERT(skb);
        skb->tail += len;
        skb->len  += len;
-       if (unlikely(skb->tail>skb->end))
+       if (unlikely(skb->tail > skb->end))
                skb_over_panic(skb, len, current_text_addr());
        return tmp;
 }
@@ -962,6 +1006,130 @@ static inline void skb_reserve(struct sk_buff *skb, int len)
        skb->tail += len;
 }
 
+#ifdef NET_SKBUFF_DATA_USES_OFFSET
+static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
+{
+       return skb->head + skb->transport_header;
+}
+
+static inline void skb_reset_transport_header(struct sk_buff *skb)
+{
+       skb->transport_header = skb->data - skb->head;
+}
+
+static inline void skb_set_transport_header(struct sk_buff *skb,
+                                           const int offset)
+{
+       skb_reset_transport_header(skb);
+       skb->transport_header += offset;
+}
+
+static inline unsigned char *skb_network_header(const struct sk_buff *skb)
+{
+       return skb->head + skb->network_header;
+}
+
+static inline void skb_reset_network_header(struct sk_buff *skb)
+{
+       skb->network_header = skb->data - skb->head;
+}
+
+static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
+{
+       skb_reset_network_header(skb);
+       skb->network_header += offset;
+}
+
+static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
+{
+       return skb->head + skb->mac_header;
+}
+
+static inline int skb_mac_header_was_set(const struct sk_buff *skb)
+{
+       return skb->mac_header != ~0U;
+}
+
+static inline void skb_reset_mac_header(struct sk_buff *skb)
+{
+       skb->mac_header = skb->data - skb->head;
+}
+
+static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
+{
+       skb_reset_mac_header(skb);
+       skb->mac_header += offset;
+}
+
+#else /* NET_SKBUFF_DATA_USES_OFFSET */
+
+static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
+{
+       return skb->transport_header;
+}
+
+static inline void skb_reset_transport_header(struct sk_buff *skb)
+{
+       skb->transport_header = skb->data;
+}
+
+static inline void skb_set_transport_header(struct sk_buff *skb,
+                                           const int offset)
+{
+       skb->transport_header = skb->data + offset;
+}
+
+static inline unsigned char *skb_network_header(const struct sk_buff *skb)
+{
+       return skb->network_header;
+}
+
+static inline void skb_reset_network_header(struct sk_buff *skb)
+{
+       skb->network_header = skb->data;
+}
+
+static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
+{
+       skb->network_header = skb->data + offset;
+}
+
+static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
+{
+       return skb->mac_header;
+}
+
+static inline int skb_mac_header_was_set(const struct sk_buff *skb)
+{
+       return skb->mac_header != NULL;
+}
+
+static inline void skb_reset_mac_header(struct sk_buff *skb)
+{
+       skb->mac_header = skb->data;
+}
+
+static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
+{
+       skb->mac_header = skb->data + offset;
+}
+#endif /* NET_SKBUFF_DATA_USES_OFFSET */
+
+static inline int skb_transport_offset(const struct sk_buff *skb)
+{
+       return skb_transport_header(skb) - skb->data;
+}
+
+static inline u32 skb_network_header_len(const struct sk_buff *skb)
+{
+       return skb->transport_header - skb->network_header;
+}
+
+static inline int skb_network_offset(const struct sk_buff *skb)
+{
+       return skb_network_header(skb) - skb->data;
+}
+
 /*
  * CPUs often take a performance hit when accessing unaligned memory
  * locations. The actual performance hit varies, it can be small if the
@@ -1013,8 +1181,8 @@ static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
                WARN_ON(1);
                return;
        }
-       skb->len  = len;
-       skb->tail = skb->data + len;
+       skb->len = len;
+       skb_set_tail_pointer(skb, len);
 }
 
 /**
@@ -1326,8 +1494,8 @@ extern __wsum            skb_checksum(const struct sk_buff *skb, int offset,
                                    int len, __wsum csum);
 extern int            skb_copy_bits(const struct sk_buff *skb, int offset,
                                     void *to, int len);
-extern int            skb_store_bits(const struct sk_buff *skb, int offset,
-                                     void *from, int len);
+extern int            skb_store_bits(struct sk_buff *skb, int offset,
+                                     const void *from, int len);
 extern __wsum         skb_copy_and_csum_bits(const struct sk_buff *skb,
                                              int offset, u8 *to, int len,
                                              __wsum csum);
@@ -1351,8 +1519,36 @@ static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
        return buffer;
 }
 
+static inline void skb_copy_from_linear_data(const struct sk_buff *skb,
+                                            void *to,
+                                            const unsigned int len)
+{
+       memcpy(to, skb->data, len);
+}
+
+static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb,
+                                                   const int offset, void *to,
+                                                   const unsigned int len)
+{
+       memcpy(to, skb->data + offset, len);
+}
+
+static inline void skb_copy_to_linear_data(struct sk_buff *skb,
+                                          const void *from,
+                                          const unsigned int len)
+{
+       memcpy(skb->data, from, len);
+}
+
+static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb,
+                                                 const int offset,
+                                                 const void *from,
+                                                 const unsigned int len)
+{
+       memcpy(skb->data + offset, from, len);
+}
+
 extern void skb_init(void);
-extern void skb_add_mtu(int mtu);
 
 /**
  *     skb_get_timestamp - get timestamp from a skb
@@ -1365,29 +1561,28 @@ extern void skb_add_mtu(int mtu);
  */
 static inline void skb_get_timestamp(const struct sk_buff *skb, struct timeval *stamp)
 {
-       stamp->tv_sec  = skb->tstamp.off_sec;
-       stamp->tv_usec = skb->tstamp.off_usec;
+       *stamp = ktime_to_timeval(skb->tstamp);
 }
 
-/**
- *     skb_set_timestamp - set timestamp of a skb
- *     @skb: skb to set stamp of
- *     @stamp: pointer to struct timeval to get stamp from
- *
- *     Timestamps are stored in the skb as offsets to a base timestamp.
- *     This function converts a struct timeval to an offset and stores
- *     it in the skb.
- */
-static inline void skb_set_timestamp(struct sk_buff *skb, const struct timeval *stamp)
+static inline void __net_timestamp(struct sk_buff *skb)
+{
+       skb->tstamp = ktime_get_real();
+}
+
+static inline ktime_t net_timedelta(ktime_t t)
 {
-       skb->tstamp.off_sec  = stamp->tv_sec;
-       skb->tstamp.off_usec = stamp->tv_usec;
+       return ktime_sub(ktime_get_real(), t);
 }
 
-extern void __net_timestamp(struct sk_buff *skb);
 
+extern __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
 extern __sum16 __skb_checksum_complete(struct sk_buff *skb);
 
+static inline int skb_csum_unnecessary(const struct sk_buff *skb)
+{
+       return skb->ip_summed & CHECKSUM_UNNECESSARY;
+}
+
 /**
  *     skb_checksum_complete - Calculate checksum of an entire packet
  *     @skb: packet to process
@@ -1406,22 +1601,22 @@ extern __sum16 __skb_checksum_complete(struct sk_buff *skb);
  */
 static inline unsigned int skb_checksum_complete(struct sk_buff *skb)
 {
-       return skb->ip_summed != CHECKSUM_UNNECESSARY &&
-               __skb_checksum_complete(skb);
+       return skb_csum_unnecessary(skb) ?
+              0 : __skb_checksum_complete(skb);
 }
 
-#ifdef CONFIG_NETFILTER
+#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+extern void nf_conntrack_destroy(struct nf_conntrack *nfct);
 static inline void nf_conntrack_put(struct nf_conntrack *nfct)
 {
        if (nfct && atomic_dec_and_test(&nfct->use))
-               nfct->destroy(nfct);
+               nf_conntrack_destroy(nfct);
 }
 static inline void nf_conntrack_get(struct nf_conntrack *nfct)
 {
        if (nfct)
                atomic_inc(&nfct->use);
 }
-#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 static inline void nf_conntrack_get_reasm(struct sk_buff *skb)
 {
        if (skb)
@@ -1447,9 +1642,9 @@ static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge)
 #endif /* CONFIG_BRIDGE_NETFILTER */
 static inline void nf_reset(struct sk_buff *skb)
 {
+#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
        nf_conntrack_put(skb->nfct);
        skb->nfct = NULL;
-#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
        nf_conntrack_put_reasm(skb->nfct_reasm);
        skb->nfct_reasm = NULL;
 #endif
@@ -1459,9 +1654,33 @@ static inline void nf_reset(struct sk_buff *skb)
 #endif
 }
 
-#else /* CONFIG_NETFILTER */
-static inline void nf_reset(struct sk_buff *skb) {}
-#endif /* CONFIG_NETFILTER */
+/* Note: This doesn't put any conntrack and bridge info in dst. */
+static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src)
+{
+#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+       dst->nfct = src->nfct;
+       nf_conntrack_get(src->nfct);
+       dst->nfctinfo = src->nfctinfo;
+       dst->nfct_reasm = src->nfct_reasm;
+       nf_conntrack_get_reasm(src->nfct_reasm);
+#endif
+#ifdef CONFIG_BRIDGE_NETFILTER
+       dst->nf_bridge  = src->nf_bridge;
+       nf_bridge_get(src->nf_bridge);
+#endif
+}
+
+static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
+{
+#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+       nf_conntrack_put(dst->nfct);
+       nf_conntrack_put_reasm(dst->nfct_reasm);
+#endif
+#ifdef CONFIG_BRIDGE_NETFILTER
+       nf_bridge_put(dst->nf_bridge);
+#endif
+       __nf_copy(dst, src);
+}
 
 #ifdef CONFIG_NETWORK_SECMARK
 static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
@@ -1486,5 +1705,12 @@ static inline int skb_is_gso(const struct sk_buff *skb)
        return skb_shinfo(skb)->gso_size;
 }
 
+static inline void skb_forward_csum(struct sk_buff *skb)
+{
+       /* Unfortunately we don't support this one.  Any brave souls? */
+       if (skb->ip_summed == CHECKSUM_COMPLETE)
+               skb->ip_summed = CHECKSUM_NONE;
+}
+
 #endif /* __KERNEL__ */
 #endif /* _LINUX_SKBUFF_H */
index fcd35a2..6e7c948 100644 (file)
@@ -188,7 +188,8 @@ struct ucred {
 #define AF_TIPC                30      /* TIPC sockets                 */
 #define AF_BLUETOOTH   31      /* Bluetooth sockets            */
 #define AF_IUCV                32      /* IUCV sockets                 */
-#define AF_MAX         33      /* For now.. */
+#define AF_RXRPC       33      /* RxRPC sockets                */
+#define AF_MAX         34      /* For now.. */
 
 /* Protocol families, same as address families. */
 #define PF_UNSPEC      AF_UNSPEC
@@ -222,6 +223,7 @@ struct ucred {
 #define PF_TIPC                AF_TIPC
 #define PF_BLUETOOTH   AF_BLUETOOTH
 #define PF_IUCV                AF_IUCV
+#define PF_RXRPC       AF_RXRPC
 #define PF_MAX         AF_MAX
 
 /* Maximum queue length specifiable by listen.  */
@@ -284,6 +286,7 @@ struct ucred {
 #define SOL_DCCP       269
 #define SOL_NETLINK    270
 #define SOL_TIPC       271
+#define SOL_RXRPC      272
 
 /* IPX options */
 #define IPX_TYPE       1
index 4f69ef9..7f2eb6a 100644 (file)
@@ -47,6 +47,12 @@ extern int strncmp(const char *,const char *,__kernel_size_t);
 #ifndef __HAVE_ARCH_STRNICMP
 extern int strnicmp(const char *, const char *, __kernel_size_t);
 #endif
+#ifndef __HAVE_ARCH_STRCASECMP
+extern int strcasecmp(const char *s1, const char *s2);
+#endif
+#ifndef __HAVE_ARCH_STRNCASECMP
+extern int strncasecmp(const char *s1, const char *s2, size_t n);
+#endif
 #ifndef __HAVE_ARCH_STRCHR
 extern char * strchr(const char *,int);
 #endif
index 2c5fb38..47f1c53 100644 (file)
@@ -290,6 +290,7 @@ enum
        NET_CORE_BUDGET=19,
        NET_CORE_AEVENT_ETIME=20,
        NET_CORE_AEVENT_RSEQTH=21,
+       NET_CORE_WARNINGS=22,
 };
 
 /* /proc/sys/net/ethernet */
@@ -438,6 +439,8 @@ enum
        NET_CIPSOV4_RBM_STRICTVALID=121,
        NET_TCP_AVAIL_CONG_CONTROL=122,
        NET_TCP_ALLOWED_CONG_CONTROL=123,
+       NET_TCP_MAX_SSTHRESH=124,
+       NET_TCP_FRTO_RESPONSE=125,
 };
 
 enum {
@@ -580,6 +583,7 @@ enum {
        NET_IPV6_RTR_PROBE_INTERVAL=21,
        NET_IPV6_ACCEPT_RA_RT_INFO_MAX_PLEN=22,
        NET_IPV6_PROXY_NDP=23,
+       NET_IPV6_ACCEPT_SOURCE_ROUTE=25,
        __NET_IPV6_MAX
 };
 
@@ -788,6 +792,7 @@ enum {
        NET_BRIDGE_NF_CALL_IPTABLES = 2,
        NET_BRIDGE_NF_CALL_IP6TABLES = 3,
        NET_BRIDGE_NF_FILTER_VLAN_TAGGED = 4,
+       NET_BRIDGE_NF_FILTER_PPPOE_TAGGED = 5,
 };
 
 /* CTL_FS names: */
index 3fced47..a46104a 100644 (file)
@@ -31,7 +31,7 @@
  */
 
 
-#define TASKSTATS_VERSION      3
+#define TASKSTATS_VERSION      4
 #define TS_COMM_LEN            32      /* should be >= TASK_COMM_LEN
                                         * in linux/sched.h */
 
@@ -66,7 +66,7 @@ struct taskstats {
        /* Delay waiting for cpu, while runnable
         * count, delay_total NOT updated atomically
         */
-       __u64   cpu_count;
+       __u64   cpu_count __attribute__((aligned(8)));
        __u64   cpu_delay_total;
 
        /* Following four fields atomically updated using task->delays->lock */
@@ -101,14 +101,17 @@ struct taskstats {
 
        /* Basic Accounting Fields start */
        char    ac_comm[TS_COMM_LEN];   /* Command name */
-       __u8    ac_sched;               /* Scheduling discipline */
+       __u8    ac_sched __attribute__((aligned(8)));
+                                       /* Scheduling discipline */
        __u8    ac_pad[3];
-       __u32   ac_uid;                 /* User ID */
+       __u32   ac_uid __attribute__((aligned(8)));
+                                       /* User ID */
        __u32   ac_gid;                 /* Group ID */
        __u32   ac_pid;                 /* Process ID */
        __u32   ac_ppid;                /* Parent process ID */
        __u32   ac_btime;               /* Begin time [sec since 1970] */
-       __u64   ac_etime;               /* Elapsed time [usec] */
+       __u64   ac_etime __attribute__((aligned(8)));
+                                       /* Elapsed time [usec] */
        __u64   ac_utime;               /* User CPU time [usec] */
        __u64   ac_stime;               /* SYstem CPU time [usec] */
        __u64   ac_minflt;              /* Minor Page Fault Count */
index 29d3089..c6b9f92 100644 (file)
@@ -178,6 +178,21 @@ struct tcp_md5sig {
 #include <net/inet_connection_sock.h>
 #include <net/inet_timewait_sock.h>
 
+static inline struct tcphdr *tcp_hdr(const struct sk_buff *skb)
+{
+       return (struct tcphdr *)skb_transport_header(skb);
+}
+
+static inline unsigned int tcp_hdrlen(const struct sk_buff *skb)
+{
+       return tcp_hdr(skb)->doff * 4;
+}
+
+static inline unsigned int tcp_optlen(const struct sk_buff *skb)
+{
+       return (tcp_hdr(skb)->doff - 5) * 4;
+}
+
 /* This defines a selective acknowledgement block. */
 struct tcp_sack_block_wire {
        __be32  start_seq;
@@ -242,6 +257,8 @@ struct tcp_sock {
  *     See RFC793 and RFC1122. The RFC writes these in capitals.
  */
        u32     rcv_nxt;        /* What we want to receive next         */
+       u32     copied_seq;     /* Head of yet unread data              */
+       u32     rcv_wup;        /* rcv_nxt on last window update sent   */
        u32     snd_nxt;        /* Next sequence we send                */
 
        u32     snd_una;        /* First byte we want an ack for        */
@@ -300,17 +317,15 @@ struct tcp_sock {
        u32     snd_ssthresh;   /* Slow start size threshold            */
        u32     snd_cwnd;       /* Sending congestion window            */
        u16     snd_cwnd_cnt;   /* Linear increase counter              */
-       u16     snd_cwnd_clamp; /* Do not allow snd_cwnd to grow above this */
+       u32     snd_cwnd_clamp; /* Do not allow snd_cwnd to grow above this */
        u32     snd_cwnd_used;
        u32     snd_cwnd_stamp;
 
        struct sk_buff_head     out_of_order_queue; /* Out of order segments go here */
 
        u32     rcv_wnd;        /* Current receiver window              */
-       u32     rcv_wup;        /* rcv_nxt on last window update sent   */
        u32     write_seq;      /* Tail(+1) of data held in tcp send buffer */
        u32     pushed_seq;     /* Last pushed seq, required to talk to windows */
-       u32     copied_seq;     /* Head of yet unread data              */
 
 /*     SACKs data      */
        struct tcp_sack_block duplicate_sack[1]; /* D-SACK block */
index 7e08c07..6de445c 100644 (file)
@@ -26,6 +26,15 @@ struct udphdr {
        __sum16 check;
 };
 
+#ifdef __KERNEL__
+#include <linux/skbuff.h>
+
+static inline struct udphdr *udp_hdr(const struct sk_buff *skb)
+{
+       return (struct udphdr *)skb_transport_header(skb);
+}
+#endif
+
 /* UDP socket options */
 #define UDP_CORK       1       /* Never send partially complete segments */
 #define UDP_ENCAP      100     /* Set the socket to accept encapsulated packets */
index 2a7b38d..b8abfc7 100644 (file)
@@ -191,14 +191,15 @@ int execute_in_process_context(work_func_t fn, struct execute_work *);
 
 /*
  * Kill off a pending schedule_delayed_work().  Note that the work callback
- * function may still be running on return from cancel_delayed_work().  Run
- * flush_scheduled_work() to wait on it.
+ * function may still be running on return from cancel_delayed_work(), unless
+ * it returns 1 and the work doesn't re-arm itself. Run flush_workqueue() or
+ * cancel_work_sync() to wait on it.
  */
 static inline int cancel_delayed_work(struct delayed_work *work)
 {
        int ret;
 
-       ret = del_timer_sync(&work->timer);
+       ret = del_timer(&work->timer);
        if (ret)
                work_release(&work->work);
        return ret;
index 15ca89e..9c656a5 100644 (file)
@@ -181,6 +181,10 @@ enum {
        XFRM_MSG_MIGRATE,
 #define XFRM_MSG_MIGRATE XFRM_MSG_MIGRATE
 
+       XFRM_MSG_NEWSADINFO,
+#define XFRM_MSG_NEWSADINFO XFRM_MSG_NEWSADINFO
+       XFRM_MSG_GETSADINFO,
+#define XFRM_MSG_GETSADINFO XFRM_MSG_GETSADINFO
        __XFRM_MSG_MAX
 };
 #define XFRM_MSG_MAX (__XFRM_MSG_MAX - 1)
@@ -234,6 +238,17 @@ enum xfrm_ae_ftype_t {
 #define XFRM_AE_MAX (__XFRM_AE_MAX - 1)
 };
 
+/* SAD Table filter flags  */
+enum xfrm_sad_ftype_t {
+       XFRM_SAD_UNSPEC,
+       XFRM_SAD_HMASK=1,
+       XFRM_SAD_HMAX=2,
+       XFRM_SAD_CNT=4,
+       __XFRM_SAD_MAX
+
+#define XFRM_SAD_MAX (__XFRM_SAD_MAX - 1)
+};
+
 struct xfrm_userpolicy_type {
        __u8            type;
        __u16           reserved1;
@@ -265,6 +280,16 @@ enum xfrm_attr_type_t {
 #define XFRMA_MAX (__XFRMA_MAX - 1)
 };
 
+enum xfrm_sadattr_type_t {
+       XFRMA_SAD_UNSPEC,
+       XFRMA_SADHMASK,
+       XFRMA_SADHMAX,
+       XFRMA_SADCNT,
+       __XFRMA_SAD_MAX
+
+#define XFRMA_SAD_MAX (__XFRMA_SAD_MAX - 1)
+};
+
 struct xfrm_usersa_info {
        struct xfrm_selector            sel;
        struct xfrm_id                  id;
index 88df8fc..f3531d0 100644 (file)
@@ -73,7 +73,9 @@ extern int                    ipv6_get_saddr(struct dst_entry *dst,
 extern int                     ipv6_dev_get_saddr(struct net_device *dev, 
                                               struct in6_addr *daddr,
                                               struct in6_addr *saddr);
-extern int                     ipv6_get_lladdr(struct net_device *dev, struct in6_addr *);
+extern int                     ipv6_get_lladdr(struct net_device *dev,
+                                               struct in6_addr *addr,
+                                               unsigned char banned_flags);
 extern int                     ipv6_rcv_saddr_equal(const struct sock *sk, 
                                                      const struct sock *sk2);
 extern void                    addrconf_join_solict(struct net_device *dev,
diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h
new file mode 100644 (file)
index 0000000..00c2eaa
--- /dev/null
@@ -0,0 +1,57 @@
+/* RxRPC kernel service interface definitions
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _NET_RXRPC_H
+#define _NET_RXRPC_H
+
+#ifdef __KERNEL__
+
+#include <linux/rxrpc.h>
+
+struct rxrpc_call;
+
+/*
+ * the mark applied to socket buffers that may be intercepted
+ */
+enum {
+       RXRPC_SKB_MARK_DATA,            /* data message */
+       RXRPC_SKB_MARK_FINAL_ACK,       /* final ACK received message */
+       RXRPC_SKB_MARK_BUSY,            /* server busy message */
+       RXRPC_SKB_MARK_REMOTE_ABORT,    /* remote abort message */
+       RXRPC_SKB_MARK_NET_ERROR,       /* network error message */
+       RXRPC_SKB_MARK_LOCAL_ERROR,     /* local error message */
+       RXRPC_SKB_MARK_NEW_CALL,        /* local error message */
+};
+
+typedef void (*rxrpc_interceptor_t)(struct sock *, unsigned long,
+                                   struct sk_buff *);
+extern void rxrpc_kernel_intercept_rx_messages(struct socket *,
+                                              rxrpc_interceptor_t);
+extern struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *,
+                                                 struct sockaddr_rxrpc *,
+                                                 struct key *,
+                                                 unsigned long,
+                                                 gfp_t);
+extern int rxrpc_kernel_send_data(struct rxrpc_call *, struct msghdr *,
+                                 size_t);
+extern void rxrpc_kernel_abort_call(struct rxrpc_call *, u32);
+extern void rxrpc_kernel_end_call(struct rxrpc_call *);
+extern bool rxrpc_kernel_is_data_last(struct sk_buff *);
+extern u32 rxrpc_kernel_get_abort_code(struct sk_buff *);
+extern int rxrpc_kernel_get_error_number(struct sk_buff *);
+extern void rxrpc_kernel_data_delivered(struct sk_buff *);
+extern void rxrpc_kernel_free_skb(struct sk_buff *);
+extern struct rxrpc_call *rxrpc_kernel_accept_call(struct socket *,
+                                                  unsigned long);
+extern int rxrpc_kernel_reject_call(struct socket *);
+
+#endif /* __KERNEL__ */
+#endif /* _NET_RXRPC_H */
index 47ff2f4..99a4e36 100644 (file)
@@ -263,8 +263,8 @@ static __inline__ void ax25_cb_put(ax25_cb *ax25)
 static inline __be16 ax25_type_trans(struct sk_buff *skb, struct net_device *dev)
 {
        skb->dev      = dev;
+       skb_reset_mac_header(skb);
        skb->pkt_type = PACKET_HOST;
-       skb->mac.raw  = skb->data;
        return htons(ETH_P_AX25);
 }
 
index 41456c1..93ce272 100644 (file)
@@ -709,6 +709,24 @@ struct hci_sco_hdr {
        __u8    dlen;
 } __attribute__ ((packed));
 
+#ifdef __KERNEL__
+#include <linux/skbuff.h>
+static inline struct hci_event_hdr *hci_event_hdr(const struct sk_buff *skb)
+{
+       return (struct hci_event_hdr *)skb->data;
+}
+
+static inline struct hci_acl_hdr *hci_acl_hdr(const struct sk_buff *skb)
+{
+       return (struct hci_acl_hdr *)skb->data;
+}
+
+static inline struct hci_sco_hdr *hci_sco_hdr(const struct sk_buff *skb)
+{
+       return (struct hci_sco_hdr *)skb->data;
+}
+#endif
+
 /* Command opcode pack/unpack */
 #define hci_opcode_pack(ogf, ocf)      (__u16) ((ocf & 0x03ff)|(ogf << 10))
 #define hci_opcode_ogf(op)             (op >> 10)
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
new file mode 100644 (file)
index 0000000..88171f8
--- /dev/null
@@ -0,0 +1,40 @@
+#ifndef __NET_CFG80211_H
+#define __NET_CFG80211_H
+
+#include <linux/netlink.h>
+#include <linux/skbuff.h>
+#include <net/genetlink.h>
+
+/*
+ * 802.11 configuration in-kernel interface
+ *
+ * Copyright 2006 Johannes Berg <johannes@sipsolutions.net>
+ */
+
+/* from net/wireless.h */
+struct wiphy;
+
+/**
+ * struct cfg80211_ops - backend description for wireless configuration
+ *
+ * This struct is registered by fullmac card drivers and/or wireless stacks
+ * in order to handle configuration requests on their interfaces.
+ *
+ * All callbacks except where otherwise noted should return 0
+ * on success or a negative error code.
+ *
+ * All operations are currently invoked under rtnl for consistency with the
+ * wireless extensions but this is subject to reevaluation as soon as this
+ * code is used more widely and we have a first user without wext.
+ *
+ * @add_virtual_intf: create a new virtual interface with the given name
+ *
+ * @del_virtual_intf: remove the virtual interface determined by ifindex.
+ */
+struct cfg80211_ops {
+       int     (*add_virtual_intf)(struct wiphy *wiphy, char *name,
+                                   unsigned int type);
+       int     (*del_virtual_intf)(struct wiphy *wiphy, int ifindex);
+};
+
+#endif /* __NET_CFG80211_H */
index 4c9522c..4f90f55 100644 (file)
@@ -120,7 +120,7 @@ extern int cipso_v4_rbm_strictvalid;
  */
 
 #define CIPSO_V4_OPTEXIST(x) (IPCB(x)->opt.cipso != 0)
-#define CIPSO_V4_OPTPTR(x) ((x)->nh.raw + IPCB(x)->opt.cipso)
+#define CIPSO_V4_OPTPTR(x) (skb_network_header(x) + IPCB(x)->opt.cipso)
 
 /*
  * DOI List Functions
index 9859b60..406db24 100644 (file)
@@ -25,6 +25,7 @@ struct compat_cmsghdr {
 };
 
 extern int compat_sock_get_timestamp(struct sock *, struct timeval __user *);
+extern int compat_sock_get_timestampns(struct sock *, struct timespec __user *);
 
 #else /* defined(CONFIG_COMPAT) */
 #define compat_msghdr  msghdr          /* to avoid compiler warnings */
index f01626c..3012511 100644 (file)
@@ -148,17 +148,8 @@ extern void dn_fib_rules_cleanup(void);
 extern unsigned dnet_addr_type(__le16 addr);
 extern int dn_fib_lookup(struct flowi *fl, struct dn_fib_res *res);
 
-/*
- * rtnetlink interface
- */
-extern int dn_fib_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg);
-extern int dn_fib_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg);
 extern int dn_fib_dump(struct sk_buff *skb, struct netlink_callback *cb);
 
-extern int dn_fib_rtm_delrule(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg);
-extern int dn_fib_rtm_newrule(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg);
-extern int dn_fib_dump_rules(struct sk_buff *skb, struct netlink_callback *cb);
-
 extern void dn_fib_free_info(struct dn_fib_info *fi);
 
 static inline void dn_fib_info_put(struct dn_fib_info *fi)
index a566944..c10e8e7 100644 (file)
@@ -18,7 +18,6 @@
 extern struct sk_buff *dn_alloc_skb(struct sock *sk, int size, gfp_t pri);
 extern int dn_route_output_sock(struct dst_entry **pprt, struct flowi *, struct sock *sk, int flags);
 extern int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb);
-extern int dn_cache_getroute(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg);
 extern void dn_rt_cache_flush(int delay);
 
 /* Masks for flags field */
index 713d039..d05d8d2 100644 (file)
@@ -40,8 +40,6 @@ struct esp_data
        } auth;
 };
 
-extern int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len);
-extern int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer);
 extern void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len);
 
 static inline int esp_mac_digest(struct esp_data *esp, struct sk_buff *skb,
index d585ea9..ed3a887 100644 (file)
@@ -5,7 +5,7 @@
 #include <linux/netdevice.h>
 #include <linux/fib_rules.h>
 #include <net/flow.h>
-#include <net/netlink.h>
+#include <net/rtnetlink.h>
 
 struct fib_rule
 {
@@ -19,6 +19,8 @@ struct fib_rule
        u32                     flags;
        u32                     table;
        u8                      action;
+       u32                     target;
+       struct fib_rule *       ctarget;
        struct rcu_head         rcu;
 };
 
@@ -35,6 +37,8 @@ struct fib_rules_ops
        struct list_head        list;
        int                     rule_size;
        int                     addr_size;
+       int                     unresolved_rules;
+       int                     nr_goto_rules;
 
        int                     (*action)(struct fib_rule *,
                                          struct flowi *, int,
@@ -55,6 +59,10 @@ struct fib_rules_ops
        u32                     (*default_pref)(void);
        size_t                  (*nlmsg_payload)(struct fib_rule *);
 
+       /* Called after modifications to the rules set, must flush
+        * the route cache if one exists. */
+       void                    (*flush_cache)(void);
+
        int                     nlgroup;
        struct nla_policy       *policy;
        struct list_head        *rules_list;
@@ -66,7 +74,8 @@ struct fib_rules_ops
        [FRA_PRIORITY]  = { .type = NLA_U32 }, \
        [FRA_FWMARK]    = { .type = NLA_U32 }, \
        [FRA_FWMASK]    = { .type = NLA_U32 }, \
-       [FRA_TABLE]     = { .type = NLA_U32 }
+       [FRA_TABLE]     = { .type = NLA_U32 }, \
+       [FRA_GOTO]      = { .type = NLA_U32 }
 
 static inline void fib_rule_get(struct fib_rule *rule)
 {
@@ -98,11 +107,4 @@ extern int                  fib_rules_unregister(struct fib_rules_ops *);
 extern int                     fib_rules_lookup(struct fib_rules_ops *,
                                                 struct flowi *, int flags,
                                                 struct fib_lookup_arg *);
-
-extern int                     fib_nl_newrule(struct sk_buff *,
-                                              struct nlmsghdr *, void *);
-extern int                     fib_nl_delrule(struct sk_buff *,
-                                              struct nlmsghdr *, void *);
-extern int                     fib_rules_dump(struct sk_buff *,
-                                              struct netlink_callback *, int);
 #endif
index c28e424..668056b 100644 (file)
@@ -19,6 +19,9 @@
 #include <linux/in6.h>
 #include <linux/ipv6.h>
 #include <linux/types.h>
+#include <linux/jhash.h>
+
+#include <net/inet_sock.h>
 
 #include <net/ipv6.h>
 
@@ -28,12 +31,11 @@ struct inet_hashinfo;
 static inline unsigned int inet6_ehashfn(const struct in6_addr *laddr, const u16 lport,
                                const struct in6_addr *faddr, const __be16 fport)
 {
-       unsigned int hashent = (lport ^ (__force u16)fport);
+       u32 ports = (lport ^ (__force u16)fport);
 
-       hashent ^= (__force u32)(laddr->s6_addr32[3] ^ faddr->s6_addr32[3]);
-       hashent ^= hashent >> 16;
-       hashent ^= hashent >> 8;
-       return hashent;
+       return jhash_3words((__force u32)laddr->s6_addr32[3],
+                           (__force u32)faddr->s6_addr32[3],
+                           ports, inet_ehash_secret);
 }
 
 static inline int inet6_sk_ehashfn(const struct sock *sk)
index 10117c8..de8399a 100644 (file)
@@ -114,13 +114,13 @@ static inline int INET_ECN_set_ce(struct sk_buff *skb)
 {
        switch (skb->protocol) {
        case __constant_htons(ETH_P_IP):
-               if (skb->nh.raw + sizeof(struct iphdr) <= skb->tail)
-                       return IP_ECN_set_ce(skb->nh.iph);
+               if (skb->network_header + sizeof(struct iphdr) <= skb->tail)
+                       return IP_ECN_set_ce(ip_hdr(skb));
                break;
 
        case __constant_htons(ETH_P_IPV6):
-               if (skb->nh.raw + sizeof(struct ipv6hdr) <= skb->tail)
-                       return IP6_ECN_set_ce(skb->nh.ipv6h);
+               if (skb->network_header + sizeof(struct ipv6hdr) <= skb->tail)
+                       return IP6_ECN_set_ce(ipv6_hdr(skb));
                break;
        }
 
index ce6da97..62daf21 100644 (file)
@@ -19,6 +19,7 @@
 
 #include <linux/string.h>
 #include <linux/types.h>
+#include <linux/jhash.h>
 
 #include <net/flow.h>
 #include <net/sock.h>
@@ -167,13 +168,15 @@ static inline void inet_sk_copy_descendant(struct sock *sk_to,
 
 extern int inet_sk_rebuild_header(struct sock *sk);
 
+extern u32 inet_ehash_secret;
+extern void build_ehash_secret(void);
+
 static inline unsigned int inet_ehashfn(const __be32 laddr, const __u16 lport,
                                        const __be32 faddr, const __be16 fport)
 {
-       unsigned int h = ((__force __u32)laddr ^ lport) ^ ((__force __u32)faddr ^ (__force __u32)fport);
-       h ^= h >> 16;
-       h ^= h >> 8;
-       return h;
+       return jhash_2words((__force __u32) laddr ^ (__force __u32) faddr,
+                           ((__u32) lport) << 16 | (__force __u32)fport,
+                           inet_ehash_secret);
 }
 
 static inline int inet_sk_ehashfn(const struct sock *sk)
index e79c3e3..bb207db 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/types.h>
 #include <linux/ip.h>
 #include <linux/in.h>
+#include <linux/skbuff.h>
 
 #include <net/inet_sock.h>
 #include <net/snmp.h>
@@ -43,6 +44,11 @@ struct inet_skb_parm
 #define IPSKB_REROUTED         16
 };
 
+static inline unsigned int ip_hdrlen(const struct sk_buff *skb)
+{
+       return ip_hdr(skb)->ihl * 4;
+}
+
 struct ipcm_cookie
 {
        __be32                  addr;
@@ -74,7 +80,6 @@ struct msghdr;
 struct net_device;
 struct packet_type;
 struct rtable;
-struct sk_buff;
 struct sockaddr;
 
 extern void            ip_mc_dropsocket(struct sock *);
@@ -161,6 +166,10 @@ DECLARE_SNMP_STAT(struct linux_mib, net_statistics);
 #define NET_ADD_STATS_BH(field, adnd)  SNMP_ADD_STATS_BH(net_statistics, field, adnd)
 #define NET_ADD_STATS_USER(field, adnd)        SNMP_ADD_STATS_USER(net_statistics, field, adnd)
 
+extern unsigned long snmp_fold_field(void *mib[], int offt);
+extern int snmp_mib_init(void *ptr[2], size_t mibsize, size_t mibalign);
+extern void snmp_mib_free(void *ptr[2]);
+
 extern int sysctl_local_port_range[2];
 extern int sysctl_ip_default_ttl;
 extern int sysctl_ip_nonlocal_bind;
index cf355a3..c48ea87 100644 (file)
@@ -219,8 +219,6 @@ extern void                 fib6_init(void);
 
 extern void                    fib6_rules_init(void);
 extern void                    fib6_rules_cleanup(void);
-extern int                     fib6_rules_dump(struct sk_buff *,
-                                               struct netlink_callback *);
 
 #endif
 #endif
index 4e927eb..5456fdd 100644 (file)
@@ -116,12 +116,7 @@ extern void                        rt6_pmtu_discovery(struct in6_addr *daddr,
                                                   struct net_device *dev,
                                                   u32 pmtu);
 
-struct nlmsghdr;
 struct netlink_callback;
-extern int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb);
-extern int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg);
-extern int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg);
-extern int inet6_rtm_getroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg);
 
 struct rt6_rtnl_dump_arg
 {
index 36c635c..5a4a036 100644 (file)
@@ -215,10 +215,6 @@ extern void fib_select_default(const struct flowi *flp, struct fib_result *res);
 /* Exported by fib_frontend.c */
 extern struct nla_policy rtm_ipv4_policy[];
 extern void            ip_fib_init(void);
-extern int inet_rtm_delroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg);
-extern int inet_rtm_newroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg);
-extern int inet_rtm_getroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg);
-extern int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb);
 extern int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif,
                               struct net_device *dev, __be32 *spec_dst, u32 *itag);
 extern void fib_select_multipath(const struct flowi *flp, struct fib_result *res);
@@ -235,8 +231,6 @@ extern __be32  __fib_res_prefsrc(struct fib_result *res);
 extern struct fib_table *fib_hash_init(u32 id);
 
 #ifdef CONFIG_IP_MULTIPLE_TABLES
-extern int fib4_rules_dump(struct sk_buff *skb, struct netlink_callback *cb);
-
 extern void __init fib4_rules_init(void);
 
 #ifdef CONFIG_NET_CLS_ROUTE
index 00328b7..f70afef 100644 (file)
@@ -166,13 +166,6 @@ DECLARE_SNMP_STAT(struct udp_mib, udplite_stats_in6);
        if (is_udplite) SNMP_INC_STATS_USER(udplite_stats_in6, field);         \
        else            SNMP_INC_STATS_USER(udp_stats_in6, field);    } while(0)
 
-int snmp6_register_dev(struct inet6_dev *idev);
-int snmp6_unregister_dev(struct inet6_dev *idev);
-int snmp6_alloc_dev(struct inet6_dev *idev);
-int snmp6_free_dev(struct inet6_dev *idev);
-int snmp6_mib_init(void *ptr[2], size_t mibsize, size_t mibalign);
-void snmp6_mib_free(void *ptr[2]);
-
 struct ip6_ra_chain
 {
        struct ip6_ra_chain     *next;
@@ -605,8 +598,20 @@ extern int  udplite6_proc_init(void);
 extern void udplite6_proc_exit(void);
 extern int  ipv6_misc_proc_init(void);
 extern void ipv6_misc_proc_exit(void);
+extern int snmp6_register_dev(struct inet6_dev *idev);
+extern int snmp6_unregister_dev(struct inet6_dev *idev);
 
 extern struct rt6_statistics rt6_stats;
+#else
+static inline int snmp6_register_dev(struct inet6_dev *idev)
+{
+       return 0;
+}
+
+static inline int snmp6_unregister_dev(struct inet6_dev *idev)
+{
+       return 0;
+}
 #endif
 
 #ifdef CONFIG_SYSCTL
index c6b2ee6..4cc0b4e 100644 (file)
@@ -43,7 +43,7 @@ struct ipxhdr {
 
 static __inline__ struct ipxhdr *ipx_hdr(struct sk_buff *skb)
 {
-       return (struct ipxhdr *)skb->h.raw;
+       return (struct ipxhdr *)skb_transport_header(skb);
 }
 
 struct ipx_interface {
index 8a83018..f23d07c 100644 (file)
@@ -431,26 +431,7 @@ struct iw_public_data {
  * Those may be called only within the kernel.
  */
 
-/* First : function strictly used inside the kernel */
-
-/* Handle /proc/net/wireless, called in net/code/dev.c */
-extern int dev_get_wireless_info(char * buffer, char **start, off_t offset,
-                                int length);
-
-/* Handle IOCTLs, called in net/core/dev.c */
-extern int wireless_process_ioctl(struct ifreq *ifr, unsigned int cmd);
-
-/* Handle RtNetlink requests, called in net/core/rtnetlink.c */
-extern int wireless_rtnetlink_set(struct net_device *  dev,
-                                 char *                data,
-                                 int                   len);
-extern int wireless_rtnetlink_get(struct net_device *  dev,
-                                 char *                data,
-                                 int                   len,
-                                 char **               p_buf,
-                                 int *                 p_len);
-
-/* Second : functions that may be called by driver modules */
+/* functions that may be called by driver modules */
 
 /* Send a single event to user space */
 extern void wireless_send_event(struct net_device *    dev,
index aa33a47..4a8f58b 100644 (file)
@@ -203,7 +203,7 @@ struct llc_pdu_sn {
 
 static inline struct llc_pdu_sn *llc_pdu_sn_hdr(struct sk_buff *skb)
 {
-       return (struct llc_pdu_sn *)skb->nh.raw;
+       return (struct llc_pdu_sn *)skb_network_header(skb);
 }
 
 /* Un-numbered PDU format (3 bytes in length) */
@@ -215,12 +215,7 @@ struct llc_pdu_un {
 
 static inline struct llc_pdu_un *llc_pdu_un_hdr(struct sk_buff *skb)
 {
-       return (struct llc_pdu_un *)skb->nh.raw;
-}
-
-static inline void *llc_set_pdu_hdr(struct sk_buff *skb, void *ptr)
-{
-       return skb->nh.raw = ptr;
+       return (struct llc_pdu_un *)skb_network_header(skb);
 }
 
 /**
@@ -237,7 +232,11 @@ static inline void llc_pdu_header_init(struct sk_buff *skb, u8 type,
                                       u8 ssap, u8 dsap, u8 cr)
 {
        const int hlen = type == LLC_PDU_TYPE_U ? 3 : 4;
-       struct llc_pdu_un *pdu = llc_set_pdu_hdr(skb, skb_push(skb, hlen));
+       struct llc_pdu_un *pdu;
+
+       skb_push(skb, hlen);
+       skb_reset_network_header(skb);
+       pdu = llc_pdu_un_hdr(skb);
        pdu->dsap = dsap;
        pdu->ssap = ssap;
        pdu->ssap |= cr;
index ad7fe11..a4f2618 100644 (file)
@@ -24,6 +24,7 @@
 
 #include <linux/err.h>
 #include <linux/sysctl.h>
+#include <net/rtnetlink.h>
 
 #define NUD_IN_TIMER   (NUD_INCOMPLETE|NUD_REACHABLE|NUD_DELAY|NUD_PROBE)
 #define NUD_VALID      (NUD_PERMANENT|NUD_NOARP|NUD_REACHABLE|NUD_PROBE|NUD_STALE|NUD_DELAY)
@@ -213,16 +214,7 @@ extern void                        pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
 extern struct pneigh_entry     *pneigh_lookup(struct neigh_table *tbl, const void *key, struct net_device *dev, int creat);
 extern int                     pneigh_delete(struct neigh_table *tbl, const void *key, struct net_device *dev);
 
-struct netlink_callback;
-struct nlmsghdr;
-extern int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb);
-extern int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg);
-extern int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg);
 extern void neigh_app_ns(struct neighbour *n);
-
-extern int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb);
-extern int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg);
-
 extern void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie);
 extern void __neigh_for_each_release(struct neigh_table *tbl, int (*cb)(struct neighbour *));
 extern void pneigh_for_each(struct neigh_table *tbl, void (*cb)(struct pneigh_entry *));
index 0e690e3..1c6b8bd 100644 (file)
@@ -250,6 +250,11 @@ static inline int nf_ct_is_dying(struct nf_conn *ct)
        return test_bit(IPS_DYING_BIT, &ct->status);
 }
 
+static inline int nf_ct_is_untracked(const struct sk_buff *skb)
+{
+       return (skb->nfct == &nf_conntrack_untracked.ct_general);
+}
+
 extern unsigned int nf_conntrack_htable_size;
 extern int nf_conntrack_checksum;
 extern atomic_t nf_conntrack_count;
diff --git a/include/net/netfilter/nf_conntrack_compat.h b/include/net/netfilter/nf_conntrack_compat.h
deleted file mode 100644 (file)
index 6f84c1f..0000000
+++ /dev/null
@@ -1,145 +0,0 @@
-#ifndef _NF_CONNTRACK_COMPAT_H
-#define _NF_CONNTRACK_COMPAT_H
-
-#ifdef __KERNEL__
-
-#if defined(CONFIG_IP_NF_CONNTRACK) || defined(CONFIG_IP_NF_CONNTRACK_MODULE)
-
-#include <linux/netfilter_ipv4/ip_conntrack.h>
-#include <linux/socket.h>
-
-#ifdef CONFIG_IP_NF_CONNTRACK_MARK
-static inline u_int32_t *nf_ct_get_mark(const struct sk_buff *skb,
-                                       u_int32_t *ctinfo)
-{
-       struct ip_conntrack *ct = ip_conntrack_get(skb, ctinfo);
-
-       if (ct)
-               return &ct->mark;
-       else
-               return NULL;
-}
-#endif /* CONFIG_IP_NF_CONNTRACK_MARK */
-
-#ifdef CONFIG_IP_NF_CONNTRACK_SECMARK
-static inline u_int32_t *nf_ct_get_secmark(const struct sk_buff *skb,
-                                          u_int32_t *ctinfo)
-{
-       struct ip_conntrack *ct = ip_conntrack_get(skb, ctinfo);
-
-       if (ct)
-               return &ct->secmark;
-       else
-               return NULL;
-}
-#endif /* CONFIG_IP_NF_CONNTRACK_SECMARK */
-
-#ifdef CONFIG_IP_NF_CT_ACCT
-static inline struct ip_conntrack_counter *
-nf_ct_get_counters(const struct sk_buff *skb)
-{
-       enum ip_conntrack_info ctinfo;
-       struct ip_conntrack *ct = ip_conntrack_get(skb, &ctinfo);
-
-       if (ct)
-               return ct->counters;
-       else
-               return NULL;
-}
-#endif /* CONFIG_IP_NF_CT_ACCT */
-
-static inline int nf_ct_is_untracked(const struct sk_buff *skb)
-{
-       return (skb->nfct == &ip_conntrack_untracked.ct_general);
-}
-
-static inline void nf_ct_untrack(struct sk_buff *skb)
-{
-       skb->nfct = &ip_conntrack_untracked.ct_general;
-}
-
-static inline int nf_ct_get_ctinfo(const struct sk_buff *skb,
-                                  enum ip_conntrack_info *ctinfo)
-{
-       struct ip_conntrack *ct = ip_conntrack_get(skb, ctinfo);
-       return (ct != NULL);
-}
-
-static inline int nf_ct_l3proto_try_module_get(unsigned short l3proto)
-{
-       need_conntrack();
-       return l3proto == PF_INET ? 0 : -1;
-}
-
-static inline void nf_ct_l3proto_module_put(unsigned short l3proto)
-{
-}
-
-#else /* CONFIG_IP_NF_CONNTRACK */
-
-#include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
-#include <net/netfilter/nf_conntrack.h>
-
-#ifdef CONFIG_NF_CONNTRACK_MARK
-
-static inline u_int32_t *nf_ct_get_mark(const struct sk_buff *skb,
-                                       u_int32_t *ctinfo)
-{
-       struct nf_conn *ct = nf_ct_get(skb, ctinfo);
-
-       if (ct)
-               return &ct->mark;
-       else
-               return NULL;
-}
-#endif /* CONFIG_NF_CONNTRACK_MARK */
-
-#ifdef CONFIG_NF_CONNTRACK_SECMARK
-static inline u_int32_t *nf_ct_get_secmark(const struct sk_buff *skb,
-                                          u_int32_t *ctinfo)
-{
-       struct nf_conn *ct = nf_ct_get(skb, ctinfo);
-
-       if (ct)
-               return &ct->secmark;
-       else
-               return NULL;
-}
-#endif /* CONFIG_NF_CONNTRACK_MARK */
-
-#ifdef CONFIG_NF_CT_ACCT
-static inline struct ip_conntrack_counter *
-nf_ct_get_counters(const struct sk_buff *skb)
-{
-       enum ip_conntrack_info ctinfo;
-       struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
-
-       if (ct)
-               return ct->counters;
-       else
-               return NULL;
-}
-#endif /* CONFIG_NF_CT_ACCT */
-
-static inline int nf_ct_is_untracked(const struct sk_buff *skb)
-{
-       return (skb->nfct == &nf_conntrack_untracked.ct_general);
-}
-
-static inline void nf_ct_untrack(struct sk_buff *skb)
-{
-       skb->nfct = &nf_conntrack_untracked.ct_general;
-}
-
-static inline int nf_ct_get_ctinfo(const struct sk_buff *skb,
-                                  enum ip_conntrack_info *ctinfo)
-{
-       struct nf_conn *ct = nf_ct_get(skb, ctinfo);
-       return (ct != NULL);
-}
-
-#endif /* CONFIG_IP_NF_CONNTRACK */
-
-#endif /* __KERNEL__ */
-
-#endif /* _NF_CONNTRACK_COMPAT_H */
index 85634e1..9fb9066 100644 (file)
@@ -27,6 +27,9 @@ extern unsigned int nf_conntrack_in(int pf,
 extern int nf_conntrack_init(void);
 extern void nf_conntrack_cleanup(void);
 
+extern int nf_conntrack_proto_init(void);
+extern void nf_conntrack_proto_fini(void);
+
 struct nf_conntrack_l3proto;
 extern struct nf_conntrack_l3proto *nf_ct_find_l3proto(u_int16_t pf);
 /* Like above, but you already have conntrack read lock. */
index b62a8a9..811c907 100644 (file)
@@ -20,30 +20,8 @@ DECLARE_PER_CPU(struct nf_conntrack_ecache, nf_conntrack_ecache);
 #define CONNTRACK_ECACHE(x)    (__get_cpu_var(nf_conntrack_ecache).x)
 
 extern struct atomic_notifier_head nf_conntrack_chain;
-extern struct atomic_notifier_head nf_conntrack_expect_chain;
-
-static inline int nf_conntrack_register_notifier(struct notifier_block *nb)
-{
-       return atomic_notifier_chain_register(&nf_conntrack_chain, nb);
-}
-
-static inline int nf_conntrack_unregister_notifier(struct notifier_block *nb)
-{
-       return atomic_notifier_chain_unregister(&nf_conntrack_chain, nb);
-}
-
-static inline int
-nf_conntrack_expect_register_notifier(struct notifier_block *nb)
-{
-       return atomic_notifier_chain_register(&nf_conntrack_expect_chain, nb);
-}
-
-static inline int
-nf_conntrack_expect_unregister_notifier(struct notifier_block *nb)
-{
-       return atomic_notifier_chain_unregister(&nf_conntrack_expect_chain,
-                       nb);
-}
+extern int nf_conntrack_register_notifier(struct notifier_block *nb);
+extern int nf_conntrack_unregister_notifier(struct notifier_block *nb);
 
 extern void nf_ct_deliver_cached_events(const struct nf_conn *ct);
 extern void __nf_ct_event_cache_init(struct nf_conn *ct);
@@ -71,6 +49,10 @@ static inline void nf_conntrack_event(enum ip_conntrack_events event,
                atomic_notifier_call_chain(&nf_conntrack_chain, event, ct);
 }
 
+extern struct atomic_notifier_head nf_conntrack_expect_chain;
+extern int nf_conntrack_expect_register_notifier(struct notifier_block *nb);
+extern int nf_conntrack_expect_unregister_notifier(struct notifier_block *nb);
+
 static inline void
 nf_conntrack_expect_event(enum ip_conntrack_expect_events event,
                          struct nf_conntrack_expect *exp)
index eb575cb..f32f714 100644 (file)
@@ -90,10 +90,7 @@ extern struct nf_conntrack_l3proto *nf_ct_l3protos[AF_MAX];
 /* Protocol registration. */
 extern int nf_conntrack_l3proto_register(struct nf_conntrack_l3proto *proto);
 extern void nf_conntrack_l3proto_unregister(struct nf_conntrack_l3proto *proto);
-
-extern struct nf_conntrack_l3proto *
-nf_ct_l3proto_find_get(u_int16_t l3proto);
-
+extern struct nf_conntrack_l3proto *nf_ct_l3proto_find_get(u_int16_t l3proto);
 extern void nf_ct_l3proto_put(struct nf_conntrack_l3proto *p);
 
 /* Existing built-in protocols */
index 8415182..f46cb93 100644 (file)
@@ -97,7 +97,6 @@ extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6;
 extern struct nf_conntrack_l4proto nf_conntrack_l4proto_generic;
 
 #define MAX_NF_CT_PROTO 256
-extern struct nf_conntrack_l4proto **nf_ct_protos[PF_MAX];
 
 extern struct nf_conntrack_l4proto *
 __nf_ct_l4proto_find(u_int16_t l3proto, u_int8_t l4proto);
index f191c67..e765654 100644 (file)
@@ -4,16 +4,6 @@
 #include <net/netfilter/nf_nat.h>
 #include <linux/netfilter_ipv4/ip_tables.h>
 
-/* Compatibility definitions for ipt_FOO modules */
-#define ip_nat_range                   nf_nat_range
-#define ip_conntrack_tuple             nf_conntrack_tuple
-#define ip_conntrack_get               nf_ct_get
-#define ip_conntrack                   nf_conn
-#define ip_nat_setup_info              nf_nat_setup_info
-#define ip_nat_multi_range_compat      nf_nat_multi_range_compat
-#define ip_ct_iterate_cleanup          nf_ct_iterate_cleanup
-#define        IP_NF_ASSERT                    NF_CT_ASSERT
-
 extern int nf_nat_rule_init(void) __init;
 extern void nf_nat_rule_cleanup(void);
 extern int nf_nat_rule_find(struct sk_buff **pskb,
index bcaf67b..0bf325c 100644 (file)
@@ -171,6 +171,7 @@ enum {
        NLA_MSECS,
        NLA_NESTED,
        NLA_NUL_STRING,
+       NLA_BINARY,
        __NLA_TYPE_MAX,
 };
 
@@ -188,12 +189,13 @@ enum {
  *    NLA_STRING           Maximum length of string
  *    NLA_NUL_STRING       Maximum length of string (excluding NUL)
  *    NLA_FLAG             Unused
+ *    NLA_BINARY           Maximum length of attribute payload
  *    All other            Exact length of attribute payload
  *
  * Example:
  * static struct nla_policy my_policy[ATTR_MAX+1] __read_mostly = {
  *     [ATTR_FOO] = { .type = NLA_U16 },
- *     [ATTR_BAR] = { .type = NLA_STRING, len = BARSIZ },
+ *     [ATTR_BAR] = { .type = NLA_STRING, .len = BARSIZ },
  *     [ATTR_BAZ] = { .len = sizeof(struct mystruct) },
  * };
  */
@@ -214,9 +216,7 @@ struct nl_info {
 
 extern void            netlink_run_queue(struct sock *sk, unsigned int *qlen,
                                          int (*cb)(struct sk_buff *,
-                                                   struct nlmsghdr *, int *));
-extern void            netlink_queue_skip(struct nlmsghdr *nlh,
-                                          struct sk_buff *skb);
+                                                   struct nlmsghdr *));
 extern int             nlmsg_notify(struct sock *sk, struct sk_buff *skb,
                                     u32 pid, unsigned int group, int report,
                                     gfp_t flags);
@@ -525,7 +525,7 @@ static inline struct sk_buff *nlmsg_new(size_t payload, gfp_t flags)
  */
 static inline int nlmsg_end(struct sk_buff *skb, struct nlmsghdr *nlh)
 {
-       nlh->nlmsg_len = skb->tail - (unsigned char *) nlh;
+       nlh->nlmsg_len = skb_tail_pointer(skb) - (unsigned char *)nlh;
 
        return skb->len;
 }
@@ -538,7 +538,7 @@ static inline int nlmsg_end(struct sk_buff *skb, struct nlmsghdr *nlh)
  */
 static inline void *nlmsg_get_pos(struct sk_buff *skb)
 {
-       return skb->tail;
+       return skb_tail_pointer(skb);
 }
 
 /**
@@ -548,7 +548,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
  *
  * Trims the message to the provided mark. Returns -1.
  */
-static inline int nlmsg_trim(struct sk_buff *skb, void *mark)
+static inline int nlmsg_trim(struct sk_buff *skb, const void *mark)
 {
        if (mark)
                skb_trim(skb, (unsigned char *) mark - skb->data);
@@ -940,7 +940,7 @@ static inline unsigned long nla_get_msecs(struct nlattr *nla)
  */
 static inline struct nlattr *nla_nest_start(struct sk_buff *skb, int attrtype)
 {
-       struct nlattr *start = (struct nlattr *) skb->tail;
+       struct nlattr *start = (struct nlattr *)skb_tail_pointer(skb);
 
        if (nla_put(skb, attrtype, 0, NULL) < 0)
                return NULL;
@@ -960,7 +960,7 @@ static inline struct nlattr *nla_nest_start(struct sk_buff *skb, int attrtype)
  */
 static inline int nla_nest_end(struct sk_buff *skb, struct nlattr *start)
 {
-       start->nla_len = skb->tail - (unsigned char *) start;
+       start->nla_len = skb_tail_pointer(skb) - (unsigned char *)start;
        return skb->len;
 }
 
index 02647fe..4129df7 100644 (file)
@@ -326,18 +326,18 @@ static inline unsigned char * tcf_get_base_ptr(struct sk_buff *skb, int layer)
                case TCF_LAYER_LINK:
                        return skb->data;
                case TCF_LAYER_NETWORK:
-                       return skb->nh.raw;
+                       return skb_network_header(skb);
                case TCF_LAYER_TRANSPORT:
-                       return skb->h.raw;
+                       return skb_transport_header(skb);
        }
 
        return NULL;
 }
 
-static inline int tcf_valid_offset(struct sk_buff *skb, unsigned char *ptr,
-                                  int len)
+static inline int tcf_valid_offset(const struct sk_buff *skb,
+                                  const unsigned char *ptr, const int len)
 {
-       return unlikely((ptr + len) < skb->tail && ptr > skb->head);
+       return unlikely((ptr + len) < skb_tail_pointer(skb) && ptr > skb->head);
 }
 
 #ifdef CONFIG_NET_CLS_IND
index f6afee7..5754d53 100644 (file)
@@ -2,6 +2,7 @@
 #define __NET_PKT_SCHED_H
 
 #include <linux/jiffies.h>
+#include <linux/ktime.h>
 #include <net/sch_generic.h>
 
 struct qdisc_walker
@@ -12,8 +13,6 @@ struct qdisc_walker
        int     (*fn)(struct Qdisc *, unsigned long cl, struct qdisc_walker *);
 };
 
-extern rwlock_t qdisc_tree_lock;
-
 #define QDISC_ALIGNTO          32
 #define QDISC_ALIGN(len)       (((len) + QDISC_ALIGNTO-1) & ~(QDISC_ALIGNTO-1))
 
@@ -37,175 +36,38 @@ static inline void *qdisc_priv(struct Qdisc *q)
    The things are not so bad, because we may use artifical
    clock evaluated by integration of network data flow
    in the most critical places.
-
-   Note: we do not use fastgettimeofday.
-   The reason is that, when it is not the same thing as
-   gettimeofday, it returns invalid timestamp, which is
-   not updated, when net_bh is active.
- */
-
-/* General note about internal clock.
-
-   Any clock source returns time intervals, measured in units
-   close to 1usec. With source CONFIG_NET_SCH_CLK_GETTIMEOFDAY it is precisely
-   microseconds, otherwise something close but different chosen to minimize
-   arithmetic cost. Ratio usec/internal untis in form nominator/denominator
-   may be read from /proc/net/psched.
  */
 
-
-#ifdef CONFIG_NET_SCH_CLK_GETTIMEOFDAY
-
-typedef struct timeval psched_time_t;
-typedef long           psched_tdiff_t;
-
-#define PSCHED_GET_TIME(stamp) do_gettimeofday(&(stamp))
-#define PSCHED_US2JIFFIE(usecs) usecs_to_jiffies(usecs)
-#define PSCHED_JIFFIE2US(delay) jiffies_to_usecs(delay)
-
-#else /* !CONFIG_NET_SCH_CLK_GETTIMEOFDAY */
-
 typedef u64    psched_time_t;
 typedef long   psched_tdiff_t;
 
-#ifdef CONFIG_NET_SCH_CLK_JIFFIES
-
-#if HZ < 96
-#define PSCHED_JSCALE 14
-#elif HZ >= 96 && HZ < 192
-#define PSCHED_JSCALE 13
-#elif HZ >= 192 && HZ < 384
-#define PSCHED_JSCALE 12
-#elif HZ >= 384 && HZ < 768
-#define PSCHED_JSCALE 11
-#elif HZ >= 768
-#define PSCHED_JSCALE 10
-#endif
+/* Avoid doing 64 bit divide by 1000 */
+#define PSCHED_US2NS(x)                        ((s64)(x) << 10)
+#define PSCHED_NS2US(x)                        ((x) >> 10)
 
-#define PSCHED_GET_TIME(stamp) ((stamp) = (get_jiffies_64()<<PSCHED_JSCALE))
-#define PSCHED_US2JIFFIE(delay) (((delay)+(1<<PSCHED_JSCALE)-1)>>PSCHED_JSCALE)
-#define PSCHED_JIFFIE2US(delay) ((delay)<<PSCHED_JSCALE)
-
-#endif /* CONFIG_NET_SCH_CLK_JIFFIES */
-#ifdef CONFIG_NET_SCH_CLK_CPU
-#include <asm/timex.h>
-
-extern psched_tdiff_t psched_clock_per_hz;
-extern int psched_clock_scale;
-extern psched_time_t psched_time_base;
-extern cycles_t psched_time_mark;
-
-#define PSCHED_GET_TIME(stamp)                                         \
-do {                                                                   \
-       cycles_t cur = get_cycles();                                    \
-       if (sizeof(cycles_t) == sizeof(u32)) {                          \
-               if (cur <= psched_time_mark)                            \
-                       psched_time_base += 0x100000000ULL;             \
-               psched_time_mark = cur;                                 \
-               (stamp) = (psched_time_base + cur)>>psched_clock_scale; \
-       } else {                                                        \
-               (stamp) = cur>>psched_clock_scale;                      \
-       }                                                               \
-} while (0)
-#define PSCHED_US2JIFFIE(delay) (((delay)+psched_clock_per_hz-1)/psched_clock_per_hz)
-#define PSCHED_JIFFIE2US(delay) ((delay)*psched_clock_per_hz)
-
-#endif /* CONFIG_NET_SCH_CLK_CPU */
-
-#endif /* !CONFIG_NET_SCH_CLK_GETTIMEOFDAY */
-
-#ifdef CONFIG_NET_SCH_CLK_GETTIMEOFDAY
-#define PSCHED_TDIFF(tv1, tv2) \
-({ \
-          int __delta_sec = (tv1).tv_sec - (tv2).tv_sec; \
-          int __delta = (tv1).tv_usec - (tv2).tv_usec; \
-          if (__delta_sec) { \
-                  switch (__delta_sec) { \
-                  default: \
-                          __delta = 0; \
-                  case 2: \
-                          __delta += USEC_PER_SEC; \
-                  case 1: \
-                          __delta += USEC_PER_SEC; \
-                  } \
-          } \
-          __delta; \
-})
-
-static inline int
-psched_tod_diff(int delta_sec, int bound)
+#define PSCHED_TICKS_PER_SEC           PSCHED_NS2US(NSEC_PER_SEC)
+#define PSCHED_PASTPERFECT             0
+
+static inline psched_time_t psched_get_time(void)
 {
-       int delta;
-
-       if (bound <= USEC_PER_SEC || delta_sec > (0x7FFFFFFF/USEC_PER_SEC)-1)
-               return bound;
-       delta = delta_sec * USEC_PER_SEC;
-       if (delta > bound || delta < 0)
-               delta = bound;
-       return delta;
+       return PSCHED_NS2US(ktime_to_ns(ktime_get()));
 }
 
-#define PSCHED_TDIFF_SAFE(tv1, tv2, bound) \
-({ \
-          int __delta_sec = (tv1).tv_sec - (tv2).tv_sec; \
-          int __delta = (tv1).tv_usec - (tv2).tv_usec; \
-          switch (__delta_sec) { \
-          default: \
-                  __delta = psched_tod_diff(__delta_sec, bound);  break; \
-          case 2: \
-                  __delta += USEC_PER_SEC; \
-          case 1: \
-                  __delta += USEC_PER_SEC; \
-          case 0: \
-                  if (__delta > bound || __delta < 0) \
-                       __delta = bound; \
-          } \
-          __delta; \
-})
-
-#define PSCHED_TLESS(tv1, tv2) (((tv1).tv_usec < (tv2).tv_usec && \
-                               (tv1).tv_sec <= (tv2).tv_sec) || \
-                                (tv1).tv_sec < (tv2).tv_sec)
-
-#define PSCHED_TADD2(tv, delta, tv_res) \
-({ \
-          int __delta = (tv).tv_usec + (delta); \
-          (tv_res).tv_sec = (tv).tv_sec; \
-          while (__delta >= USEC_PER_SEC) { (tv_res).tv_sec++; __delta -= USEC_PER_SEC; } \
-          (tv_res).tv_usec = __delta; \
-})
-
-#define PSCHED_TADD(tv, delta) \
-({ \
-          (tv).tv_usec += (delta); \
-          while ((tv).tv_usec >= USEC_PER_SEC) { (tv).tv_sec++; \
-                (tv).tv_usec -= USEC_PER_SEC; } \
-})
-
-/* Set/check that time is in the "past perfect";
-   it depends on concrete representation of system time
- */
-
-#define PSCHED_SET_PASTPERFECT(t)      ((t).tv_sec = 0)
-#define PSCHED_IS_PASTPERFECT(t)       ((t).tv_sec == 0)
-
-#define        PSCHED_AUDIT_TDIFF(t) ({ if ((t) > 2000000) (t) = 2000000; })
-
-#else /* !CONFIG_NET_SCH_CLK_GETTIMEOFDAY */
-
-#define PSCHED_TDIFF(tv1, tv2) (long)((tv1) - (tv2))
-#define PSCHED_TDIFF_SAFE(tv1, tv2, bound) \
-       min_t(long long, (tv1) - (tv2), bound)
-
+static inline psched_tdiff_t
+psched_tdiff_bounded(psched_time_t tv1, psched_time_t tv2, psched_time_t bound)
+{
+       return min(tv1 - tv2, bound);
+}
 
-#define PSCHED_TLESS(tv1, tv2) ((tv1) < (tv2))
-#define PSCHED_TADD2(tv, delta, tv_res) ((tv_res) = (tv) + (delta))
-#define PSCHED_TADD(tv, delta) ((tv) += (delta))
-#define PSCHED_SET_PASTPERFECT(t)      ((t) = 0)
-#define PSCHED_IS_PASTPERFECT(t)       ((t) == 0)
-#define        PSCHED_AUDIT_TDIFF(t)
+struct qdisc_watchdog {
+       struct hrtimer  timer;
+       struct Qdisc    *qdisc;
+};
 
-#endif /* !CONFIG_NET_SCH_CLK_GETTIMEOFDAY */
+extern void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc);
+extern void qdisc_watchdog_schedule(struct qdisc_watchdog *wd,
+                                   psched_time_t expires);
+extern void qdisc_watchdog_cancel(struct qdisc_watchdog *wd);
 
 extern struct Qdisc_ops pfifo_qdisc_ops;
 extern struct Qdisc_ops bfifo_qdisc_ops;
index a4eb379..3cf31d4 100644 (file)
@@ -151,17 +151,17 @@ static inline void red_set_parms(struct red_parms *p,
 
 static inline int red_is_idling(struct red_parms *p)
 {
-       return !PSCHED_IS_PASTPERFECT(p->qidlestart);
+       return p->qidlestart != PSCHED_PASTPERFECT;
 }
 
 static inline void red_start_of_idle_period(struct red_parms *p)
 {
-       PSCHED_GET_TIME(p->qidlestart);
+       p->qidlestart = psched_get_time();
 }
 
 static inline void red_end_of_idle_period(struct red_parms *p)
 {
-       PSCHED_SET_PASTPERFECT(p->qidlestart);
+       p->qidlestart = PSCHED_PASTPERFECT;
 }
 
 static inline void red_restart(struct red_parms *p)
@@ -177,8 +177,8 @@ static inline unsigned long red_calc_qavg_from_idle_time(struct red_parms *p)
        long us_idle;
        int  shift;
 
-       PSCHED_GET_TIME(now);
-       us_idle = PSCHED_TDIFF_SAFE(now, p->qidlestart, p->Scell_max);
+       now = psched_get_time();
+       us_idle = psched_tdiff_bounded(now, p->qidlestart, p->Scell_max);
 
        /*
         * The problem: ideally, average length queue recalcultion should
diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
new file mode 100644 (file)
index 0000000..3b3d474
--- /dev/null
@@ -0,0 +1,25 @@
+#ifndef __NET_RTNETLINK_H
+#define __NET_RTNETLINK_H
+
+#include <linux/rtnetlink.h>
+#include <net/netlink.h>
+
+typedef int (*rtnl_doit_func)(struct sk_buff *, struct nlmsghdr *, void *);
+typedef int (*rtnl_dumpit_func)(struct sk_buff *, struct netlink_callback *);
+
+extern int     __rtnl_register(int protocol, int msgtype,
+                               rtnl_doit_func, rtnl_dumpit_func);
+extern void    rtnl_register(int protocol, int msgtype,
+                             rtnl_doit_func, rtnl_dumpit_func);
+extern int     rtnl_unregister(int protocol, int msgtype);
+extern void    rtnl_unregister_all(int protocol);
+
+static inline int rtnl_msg_family(struct nlmsghdr *nlh)
+{
+       if (nlmsg_len(nlh) >= sizeof(struct rtgenmsg))
+               return ((struct rtgenmsg *) nlmsg_data(nlh))->rtgen_family;
+       else
+               return AF_UNSPEC;
+}
+
+#endif
index 8208639..1b8e351 100644 (file)
@@ -5,10 +5,10 @@
 #include <linux/types.h>
 #include <linux/rcupdate.h>
 #include <linux/module.h>
-#include <linux/rtnetlink.h>
 #include <linux/pkt_sched.h>
 #include <linux/pkt_cls.h>
 #include <net/gen_stats.h>
+#include <net/rtnetlink.h>
 
 struct Qdisc_ops;
 struct qdisc_walker;
@@ -177,14 +177,8 @@ extern void qdisc_tree_decrease_qlen(struct Qdisc *qdisc, unsigned int n);
 extern struct Qdisc *qdisc_alloc(struct net_device *dev, struct Qdisc_ops *ops);
 extern struct Qdisc *qdisc_create_dflt(struct net_device *dev,
                                       struct Qdisc_ops *ops, u32 parentid);
-
-static inline void
-tcf_destroy(struct tcf_proto *tp)
-{
-       tp->ops->destroy(tp);
-       module_put(tp->ops->owner);
-       kfree(tp);
-}
+extern void tcf_destroy(struct tcf_proto *tp);
+extern void tcf_destroy_chain(struct tcf_proto *fl);
 
 static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
                                       struct sk_buff_head *list)
index 5ddb855..bb37724 100644 (file)
@@ -283,7 +283,7 @@ enum { SCTP_MAX_GABS = 16 };
 #define SCTP_RTO_BETA           2   /* 1/4 when converted to right shifts. */
 
 /* Maximum number of new data packets that can be sent in a burst.  */
-#define SCTP_MAX_BURST         4
+#define SCTP_DEFAULT_MAX_BURST         4
 
 #define SCTP_CLOCK_GRANULARITY 1       /* 1 jiffy */
 
index f431acf..7b4fff9 100644 (file)
@@ -276,6 +276,7 @@ struct sctp_sock {
        __u32 default_context;
        __u32 default_timetolive;
        __u32 default_rcv_context;
+       int max_burst;
 
        /* Heartbeat interval: The endpoint sends out a Heartbeat chunk to
         * the destination address every heartbeat interval. This value
@@ -304,10 +305,12 @@ struct sctp_sock {
        __u32 autoclose;
        __u8 nodelay;
        __u8 disable_fragments;
-       __u8 pd_mode;
        __u8 v4mapped;
+       __u8 frag_interleave;
        __u32 adaptation_ind;
+       __u32 pd_point;
 
+       atomic_t pd_mode;
        /* Receive to here while partial delivery is in effect. */
        struct sk_buff_head pd_lobby;
 };
index 2923e3d..de88ed5 100644 (file)
@@ -89,6 +89,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_assoc_change(
        __u16 error,
        __u16 outbound,
        __u16 inbound,
+       struct sctp_chunk *chunk,
        gfp_t gfp);
 
 struct sctp_ulpevent *sctp_ulpevent_make_peer_addr_change(
index ab26ab3..39ea3f4 100644 (file)
@@ -78,7 +78,7 @@ void sctp_ulpq_partial_delivery(struct sctp_ulpq *, struct sctp_chunk *, gfp_t);
 void sctp_ulpq_abort_pd(struct sctp_ulpq *, gfp_t);
 
 /* Clear the partial data delivery condition on this socket. */
-int sctp_clear_pd(struct sock *sk);
+int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc);
 
 /* Skip over an SSN. */
 void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn);
index 67a30eb..6d2b577 100644 (file)
@@ -97,6 +97,12 @@ enum sctp_optname {
 #define SCTP_DELAYED_ACK_TIME SCTP_DELAYED_ACK_TIME
        SCTP_CONTEXT,   /* Receive Context */
 #define SCTP_CONTEXT SCTP_CONTEXT
+       SCTP_FRAGMENT_INTERLEAVE,
+#define SCTP_FRAGMENT_INTERLEAVE SCTP_FRAGMENT_INTERLEAVE
+       SCTP_PARTIAL_DELIVERY_POINT,    /* Set/Get partial delivery point */
+#define SCTP_PARTIAL_DELIVERY_POINT SCTP_PARTIAL_DELIVERY_POINT
+       SCTP_MAX_BURST,         /* Set/Get max burst */
+#define SCTP_MAX_BURST SCTP_MAX_BURST
 
        /* Internal Socket Options. Some of the sctp library functions are 
         * implemented using these socket options.
@@ -213,6 +219,7 @@ struct sctp_assoc_change {
        __u16 sac_outbound_streams;
        __u16 sac_inbound_streams;
        sctp_assoc_t sac_assoc_id;
+       __u8 sac_info[0];
 };
 
 /*
@@ -261,6 +268,7 @@ enum sctp_spc_state {
        SCTP_ADDR_REMOVED,
        SCTP_ADDR_ADDED,
        SCTP_ADDR_MADE_PRIM,
+       SCTP_ADDR_CONFIRMED,
 };
 
 
@@ -508,16 +516,17 @@ struct sctp_setadaptation {
  *   address's parameters:
  */
 enum  sctp_spp_flags {
-       SPP_HB_ENABLE = 1,              /*Enable heartbeats*/
-       SPP_HB_DISABLE = 2,             /*Disable heartbeats*/
+       SPP_HB_ENABLE = 1<<0,           /*Enable heartbeats*/
+       SPP_HB_DISABLE = 1<<1,          /*Disable heartbeats*/
        SPP_HB = SPP_HB_ENABLE | SPP_HB_DISABLE,
-       SPP_HB_DEMAND = 4,              /*Send heartbeat immediately*/
-       SPP_PMTUD_ENABLE = 8,           /*Enable PMTU discovery*/
-       SPP_PMTUD_DISABLE = 16,         /*Disable PMTU discovery*/
+       SPP_HB_DEMAND = 1<<2,           /*Send heartbeat immediately*/
+       SPP_PMTUD_ENABLE = 1<<3,        /*Enable PMTU discovery*/
+       SPP_PMTUD_DISABLE = 1<<4,       /*Disable PMTU discovery*/
        SPP_PMTUD = SPP_PMTUD_ENABLE | SPP_PMTUD_DISABLE,
-       SPP_SACKDELAY_ENABLE = 32,      /*Enable SACK*/
-       SPP_SACKDELAY_DISABLE = 64,     /*Disable SACK*/
+       SPP_SACKDELAY_ENABLE = 1<<5,    /*Enable SACK*/
+       SPP_SACKDELAY_DISABLE = 1<<6,   /*Disable SACK*/
        SPP_SACKDELAY = SPP_SACKDELAY_ENABLE | SPP_SACKDELAY_DISABLE,
+       SPP_HB_TIME_IS_ZERO = 1<<7,     /* Set HB delay to 0 */
 };
 
 struct sctp_paddrparams {
@@ -530,7 +539,7 @@ struct sctp_paddrparams {
        __u32                   spp_flags;
 } __attribute__((packed, aligned(4)));
 
-/* 7.1.24. Delayed Ack Timer (SCTP_DELAYED_ACK_TIME)
+/* 7.1.23. Delayed Ack Timer (SCTP_DELAYED_ACK_TIME)
  *
  *   This options will get or set the delayed ack timer.  The time is set
  *   in milliseconds.  If the assoc_id is 0, then this sets or gets the
index 2c7d60c..25c37e3 100644 (file)
@@ -202,6 +202,15 @@ struct sock {
        unsigned short          sk_type;
        int                     sk_rcvbuf;
        socket_lock_t           sk_lock;
+       /*
+        * The backlog queue is special, it is always used with
+        * the per-socket spinlock held and requires low latency
+        * access. Therefore we special case it's implementation.
+        */
+       struct {
+               struct sk_buff *head;
+               struct sk_buff *tail;
+       } sk_backlog;
        wait_queue_head_t       *sk_sleep;
        struct dst_entry        *sk_dst_cache;
        struct xfrm_policy      *sk_policy[2];
@@ -221,15 +230,6 @@ struct sock {
        int                     sk_rcvlowat;
        unsigned long           sk_flags;
        unsigned long           sk_lingertime;
-       /*
-        * The backlog queue is special, it is always used with
-        * the per-socket spinlock held and requires low latency
-        * access. Therefore we special case it's implementation.
-        */
-       struct {
-               struct sk_buff *head;
-               struct sk_buff *tail;
-       } sk_backlog;
        struct sk_buff_head     sk_error_queue;
        struct proto            *sk_prot_creator;
        rwlock_t                sk_callback_lock;
@@ -244,7 +244,7 @@ struct sock {
        struct sk_filter        *sk_filter;
        void                    *sk_protinfo;
        struct timer_list       sk_timer;
-       struct timeval          sk_stamp;
+       ktime_t                 sk_stamp;
        struct socket           *sk_socket;
        void                    *sk_user_data;
        struct page             *sk_sndmsg_page;
@@ -390,6 +390,7 @@ enum sock_flags {
        SOCK_USE_WRITE_QUEUE, /* whether to call sk->sk_write_space in sock_wfree */
        SOCK_DBG, /* %SO_DEBUG setting */
        SOCK_RCVTSTAMP, /* %SO_TIMESTAMP setting */
+       SOCK_RCVTSTAMPNS, /* %SO_TIMESTAMPNS setting */
        SOCK_LOCALROUTE, /* route locally only, %SO_DONTROUTE setting */
        SOCK_QUEUE_SHRUNK, /* write queue has been shrunk recently */
 };
@@ -710,15 +711,6 @@ static inline void sk_stream_mem_reclaim(struct sock *sk)
                __sk_stream_mem_reclaim(sk);
 }
 
-static inline void sk_stream_writequeue_purge(struct sock *sk)
-{
-       struct sk_buff *skb;
-
-       while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL)
-               sk_stream_free_skb(sk, skb);
-       sk_stream_mem_reclaim(sk);
-}
-
 static inline int sk_stream_rmem_schedule(struct sock *sk, struct sk_buff *skb)
 {
        return (int)skb->truesize <= sk->sk_forward_alloc ||
@@ -1083,19 +1075,7 @@ static inline int sk_can_gso(const struct sock *sk)
        return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type);
 }
 
-static inline void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
-{
-       __sk_dst_set(sk, dst);
-       sk->sk_route_caps = dst->dev->features;
-       if (sk->sk_route_caps & NETIF_F_GSO)
-               sk->sk_route_caps |= NETIF_F_GSO_MASK;
-       if (sk_can_gso(sk)) {
-               if (dst->header_len)
-                       sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
-               else 
-                       sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
-       }
-}
+extern void sk_setup_caps(struct sock *sk, struct dst_entry *dst);
 
 static inline void sk_charge_skb(struct sock *sk, struct sk_buff *skb)
 {
@@ -1256,18 +1236,6 @@ static inline struct page *sk_stream_alloc_page(struct sock *sk)
        return page;
 }
 
-#define sk_stream_for_retrans_queue(skb, sk)                           \
-               for (skb = (sk)->sk_write_queue.next;                   \
-                    (skb != (sk)->sk_send_head) &&                     \
-                    (skb != (struct sk_buff *)&(sk)->sk_write_queue);  \
-                    skb = skb->next)
-
-/*from STCP for fast SACK Process*/
-#define sk_stream_for_retrans_queue_from(skb, sk)                      \
-               for (; (skb != (sk)->sk_send_head) &&                   \
-                    (skb != (struct sk_buff *)&(sk)->sk_write_queue);  \
-                    skb = skb->next)
-
 /*
  *     Default write policy as shown to user space via poll/select/SIGIO
  */
@@ -1304,22 +1272,18 @@ static inline int sock_intr_errno(long timeo)
        return timeo == MAX_SCHEDULE_TIMEOUT ? -ERESTARTSYS : -EINTR;
 }
 
+extern void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
+       struct sk_buff *skb);
+
 static __inline__ void
 sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
 {
-       struct timeval stamp;
+       ktime_t kt = skb->tstamp;
 
-       skb_get_timestamp(skb, &stamp);
-       if (sock_flag(sk, SOCK_RCVTSTAMP)) {
-               /* Race occurred between timestamp enabling and packet
-                  receiving.  Fill in the current time for now. */
-               if (stamp.tv_sec == 0)
-                       do_gettimeofday(&stamp);
-               skb_set_timestamp(skb, &stamp);
-               put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP, sizeof(struct timeval),
-                        &stamp);
-       } else
-               sk->sk_stamp = stamp;
+       if (sock_flag(sk, SOCK_RCVTSTAMP))
+               __sock_recv_timestamp(msg, sk, skb);
+       else
+               sk->sk_stamp = kt;
 }
 
 /**
@@ -1350,18 +1314,17 @@ static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_e
 
 extern void sock_enable_timestamp(struct sock *sk);
 extern int sock_get_timestamp(struct sock *, struct timeval __user *);
+extern int sock_get_timestampns(struct sock *, struct timespec __user *);
 
 /* 
  *     Enable debug/info messages 
  */
+extern int net_msg_warn;
+#define NETDEBUG(fmt, args...) \
+       do { if (net_msg_warn) printk(fmt,##args); } while (0)
 
-#ifdef CONFIG_NETDEBUG
-#define NETDEBUG(fmt, args...) printk(fmt,##args)
-#define LIMIT_NETDEBUG(fmt, args...) do { if (net_ratelimit()) printk(fmt,##args); } while(0)
-#else
-#define NETDEBUG(fmt, args...) do { } while (0)
-#define LIMIT_NETDEBUG(fmt, args...) do { } while(0)
-#endif
+#define LIMIT_NETDEBUG(fmt, args...) \
+       do { if (net_msg_warn && net_ratelimit()) printk(fmt,##args); } while(0)
 
 /*
  * Macros for sleeping on a socket. Use them like this:
index 5c472f2..a385797 100644 (file)
@@ -220,6 +220,7 @@ extern int sysctl_tcp_app_win;
 extern int sysctl_tcp_adv_win_scale;
 extern int sysctl_tcp_tw_reuse;
 extern int sysctl_tcp_frto;
+extern int sysctl_tcp_frto_response;
 extern int sysctl_tcp_low_latency;
 extern int sysctl_tcp_dma_copybreak;
 extern int sysctl_tcp_nometrics_save;
@@ -230,6 +231,7 @@ extern int sysctl_tcp_mtu_probing;
 extern int sysctl_tcp_base_mss;
 extern int sysctl_tcp_workaround_signed_windows;
 extern int sysctl_tcp_slow_start_after_idle;
+extern int sysctl_tcp_max_ssthresh;
 
 extern atomic_t tcp_memory_allocated;
 extern atomic_t tcp_sockets_allocated;
@@ -341,6 +343,7 @@ extern struct sock *                tcp_check_req(struct sock *sk,struct sk_buff *skb,
 extern int                     tcp_child_process(struct sock *parent,
                                                  struct sock *child,
                                                  struct sk_buff *skb);
+extern int                     tcp_use_frto(struct sock *sk);
 extern void                    tcp_enter_frto(struct sock *sk);
 extern void                    tcp_enter_loss(struct sock *sk, int how);
 extern void                    tcp_clear_retrans(struct tcp_sock *tp);
@@ -417,9 +420,9 @@ extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb,
 
 /* tcp_output.c */
 
-extern void __tcp_push_pending_frames(struct sock *sk, struct tcp_sock *tp,
-                                     unsigned int cur_mss, int nonagle);
-extern int tcp_may_send_now(struct sock *sk, struct tcp_sock *tp);
+extern void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
+                                     int nonagle);
+extern int tcp_may_send_now(struct sock *sk);
 extern int tcp_retransmit_skb(struct sock *, struct sk_buff *);
 extern void tcp_xmit_retransmit_queue(struct sock *);
 extern void tcp_simple_retransmit(struct sock *);
@@ -476,8 +479,10 @@ static inline void tcp_fast_path_on(struct tcp_sock *tp)
        __tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale);
 }
 
-static inline void tcp_fast_path_check(struct sock *sk, struct tcp_sock *tp)
+static inline void tcp_fast_path_check(struct sock *sk)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
+
        if (skb_queue_empty(&tp->out_of_order_queue) &&
            tp->rcv_wnd &&
            atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
@@ -588,10 +593,10 @@ static inline void tcp_dec_pcount_approx(__u32 *count,
        }
 }
 
-static inline void tcp_packets_out_inc(struct sock *sk, 
-                                      struct tcp_sock *tp,
+static inline void tcp_packets_out_inc(struct sock *sk,
                                       const struct sk_buff *skb)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
        int orig = tp->packets_out;
 
        tp->packets_out += tcp_skb_pcount(skb);
@@ -624,9 +629,12 @@ enum tcp_ca_event {
 #define TCP_CA_MAX     128
 #define TCP_CA_BUF_MAX (TCP_CA_NAME_MAX*TCP_CA_MAX)
 
+#define TCP_CONG_NON_RESTRICTED 0x1
+#define TCP_CONG_RTT_STAMP     0x2
+
 struct tcp_congestion_ops {
        struct list_head        list;
-       int     non_restricted;
+       unsigned long flags;
 
        /* initialize private data (optional) */
        void (*init)(struct sock *sk);
@@ -640,8 +648,6 @@ struct tcp_congestion_ops {
        /* do new cwnd calculation (required) */
        void (*cong_avoid)(struct sock *sk, u32 ack,
                           u32 rtt, u32 in_flight, int good_ack);
-       /* round trip time sample per acked packet (optional) */
-       void (*rtt_sample)(struct sock *sk, u32 usrtt);
        /* call before changing ca_state (optional) */
        void (*set_state)(struct sock *sk, u8 new_state);
        /* call when cwnd event occurs (optional) */
@@ -649,7 +655,7 @@ struct tcp_congestion_ops {
        /* new value of cwnd after loss (optional) */
        u32  (*undo_cwnd)(struct sock *sk);
        /* hook for packet ack accounting (optional) */
-       void (*pkts_acked)(struct sock *sk, u32 num_acked);
+       void (*pkts_acked)(struct sock *sk, u32 num_acked, ktime_t last);
        /* get info for inet_diag (optional) */
        void (*get_info)(struct sock *sk, u32 ext, struct sk_buff *skb);
 
@@ -736,7 +742,7 @@ static inline void tcp_sync_left_out(struct tcp_sock *tp)
        tp->left_out = tp->sacked_out + tp->lost_out;
 }
 
-extern void tcp_enter_cwr(struct sock *sk);
+extern void tcp_enter_cwr(struct sock *sk, const int set_ssthresh);
 extern __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst);
 
 /* Slow start with delack produces 3 packets of burst, so that
@@ -775,18 +781,21 @@ static inline void tcp_minshall_update(struct tcp_sock *tp, int mss,
                tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
 }
 
-static inline void tcp_check_probe_timer(struct sock *sk, struct tcp_sock *tp)
+static inline void tcp_check_probe_timer(struct sock *sk)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
        const struct inet_connection_sock *icsk = inet_csk(sk);
+
        if (!tp->packets_out && !icsk->icsk_pending)
                inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
                                          icsk->icsk_rto, TCP_RTO_MAX);
 }
 
-static inline void tcp_push_pending_frames(struct sock *sk,
-                                          struct tcp_sock *tp)
+static inline void tcp_push_pending_frames(struct sock *sk)
 {
-       __tcp_push_pending_frames(sk, tp, tcp_current_mss(sk, 1), tp->nonagle);
+       struct tcp_sock *tp = tcp_sk(sk);
+
+       __tcp_push_pending_frames(sk, tcp_current_mss(sk, 1), tp->nonagle);
 }
 
 static inline void tcp_init_wl(struct tcp_sock *tp, u32 ack, u32 seq)
@@ -815,7 +824,7 @@ static inline __sum16 __tcp_checksum_complete(struct sk_buff *skb)
 
 static inline int tcp_checksum_complete(struct sk_buff *skb)
 {
-       return skb->ip_summed != CHECKSUM_UNNECESSARY &&
+       return !skb_csum_unnecessary(skb) &&
                __tcp_checksum_complete(skb);
 }
 
@@ -918,21 +927,7 @@ static inline void tcp_set_state(struct sock *sk, int state)
 #endif 
 }
 
-static inline void tcp_done(struct sock *sk)
-{
-       if(sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV)
-               TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
-
-       tcp_set_state(sk, TCP_CLOSE);
-       tcp_clear_xmit_timers(sk);
-
-       sk->sk_shutdown = SHUTDOWN_MASK;
-
-       if (!sock_flag(sk, SOCK_DEAD))
-               sk->sk_state_change(sk);
-       else
-               inet_csk_destroy_sock(sk);
-}
+extern void tcp_done(struct sock *sk);
 
 static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
 {
@@ -981,7 +976,7 @@ static inline void tcp_openreq_init(struct request_sock *req,
        ireq->wscale_ok = rx_opt->wscale_ok;
        ireq->acked = 0;
        ireq->ecn_ok = 0;
-       ireq->rmt_port = skb->h.th->source;
+       ireq->rmt_port = tcp_hdr(skb)->source;
 }
 
 extern void tcp_enter_memory_pressure(void);
@@ -1011,7 +1006,7 @@ static inline int tcp_paws_check(const struct tcp_options_received *rx_opt, int
 {
        if ((s32)(rx_opt->rcv_tsval - rx_opt->ts_recent) >= 0)
                return 0;
-       if (xtime.tv_sec >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS)
+       if (get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS)
                return 0;
 
        /* RST segments are not recommended to carry timestamp,
@@ -1026,26 +1021,13 @@ static inline int tcp_paws_check(const struct tcp_options_received *rx_opt, int
 
           However, we can relax time bounds for RST segments to MSL.
         */
-       if (rst && xtime.tv_sec >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL)
+       if (rst && get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL)
                return 0;
        return 1;
 }
 
 #define TCP_CHECK_TIMER(sk) do { } while (0)
 
-static inline int tcp_use_frto(const struct sock *sk)
-{
-       const struct tcp_sock *tp = tcp_sk(sk);
-       
-       /* F-RTO must be activated in sysctl and there must be some
-        * unsent new data, and the advertised window should allow
-        * sending it.
-        */
-       return (sysctl_tcp_frto && sk->sk_send_head &&
-               !after(TCP_SKB_CB(sk->sk_send_head)->end_seq,
-                      tp->snd_una + tp->snd_wnd));
-}
-
 static inline void tcp_mib_init(void)
 {
        /* See RFC 2012 */
@@ -1172,6 +1154,120 @@ static inline void              tcp_put_md5sig_pool(void)
        put_cpu();
 }
 
+/* write queue abstraction */
+static inline void tcp_write_queue_purge(struct sock *sk)
+{
+       struct sk_buff *skb;
+
+       while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL)
+               sk_stream_free_skb(sk, skb);
+       sk_stream_mem_reclaim(sk);
+}
+
+static inline struct sk_buff *tcp_write_queue_head(struct sock *sk)
+{
+       struct sk_buff *skb = sk->sk_write_queue.next;
+       if (skb == (struct sk_buff *) &sk->sk_write_queue)
+               return NULL;
+       return skb;
+}
+
+static inline struct sk_buff *tcp_write_queue_tail(struct sock *sk)
+{
+       struct sk_buff *skb = sk->sk_write_queue.prev;
+       if (skb == (struct sk_buff *) &sk->sk_write_queue)
+               return NULL;
+       return skb;
+}
+
+static inline struct sk_buff *tcp_write_queue_next(struct sock *sk, struct sk_buff *skb)
+{
+       return skb->next;
+}
+
+#define tcp_for_write_queue(skb, sk)                                   \
+               for (skb = (sk)->sk_write_queue.next;                   \
+                    (skb != (struct sk_buff *)&(sk)->sk_write_queue);  \
+                    skb = skb->next)
+
+#define tcp_for_write_queue_from(skb, sk)                              \
+               for (; (skb != (struct sk_buff *)&(sk)->sk_write_queue);\
+                    skb = skb->next)
+
+static inline struct sk_buff *tcp_send_head(struct sock *sk)
+{
+       return sk->sk_send_head;
+}
+
+static inline void tcp_advance_send_head(struct sock *sk, struct sk_buff *skb)
+{
+       sk->sk_send_head = skb->next;
+       if (sk->sk_send_head == (struct sk_buff *)&sk->sk_write_queue)
+               sk->sk_send_head = NULL;
+}
+
+static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unlinked)
+{
+       if (sk->sk_send_head == skb_unlinked)
+               sk->sk_send_head = NULL;
+}
+
+static inline void tcp_init_send_head(struct sock *sk)
+{
+       sk->sk_send_head = NULL;
+}
+
+static inline void __tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
+{
+       __skb_queue_tail(&sk->sk_write_queue, skb);
+}
+
+static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
+{
+       __tcp_add_write_queue_tail(sk, skb);
+
+       /* Queue it, remembering where we must start sending. */
+       if (sk->sk_send_head == NULL)
+               sk->sk_send_head = skb;
+}
+
+static inline void __tcp_add_write_queue_head(struct sock *sk, struct sk_buff *skb)
+{
+       __skb_queue_head(&sk->sk_write_queue, skb);
+}
+
+/* Insert buff after skb on the write queue of sk.  */
+static inline void tcp_insert_write_queue_after(struct sk_buff *skb,
+                                               struct sk_buff *buff,
+                                               struct sock *sk)
+{
+       __skb_append(skb, buff, &sk->sk_write_queue);
+}
+
+/* Insert skb between prev and next on the write queue of sk.  */
+static inline void tcp_insert_write_queue_before(struct sk_buff *new,
+                                                 struct sk_buff *skb,
+                                                 struct sock *sk)
+{
+       __skb_insert(new, skb->prev, skb, &sk->sk_write_queue);
+}
+
+static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk)
+{
+       __skb_unlink(skb, &sk->sk_write_queue);
+}
+
+static inline int tcp_skb_is_last(const struct sock *sk,
+                                 const struct sk_buff *skb)
+{
+       return skb->next == (struct sk_buff *)&sk->sk_write_queue;
+}
+
+static inline int tcp_write_queue_empty(struct sock *sk)
+{
+       return skb_queue_empty(&sk->sk_write_queue);
+}
+
 /* /proc */
 enum tcp_seq_states {
        TCP_SEQ_STATE_LISTENING,
index 4629d77..89eb3e0 100644 (file)
@@ -27,9 +27,10 @@ static inline void TCP_ECN_send_synack(struct tcp_sock *tp,
                TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_ECE;
 }
 
-static inline void TCP_ECN_send_syn(struct sock *sk, struct tcp_sock *tp,
-                                   struct sk_buff *skb)
+static inline void TCP_ECN_send_syn(struct sock *sk, struct sk_buff *skb)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
+
        tp->ecn_flags = 0;
        if (sysctl_tcp_ecn) {
                TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_ECE|TCPCB_FLAG_CWR;
@@ -44,9 +45,11 @@ TCP_ECN_make_synack(struct request_sock *req, struct tcphdr *th)
                th->ece = 1;
 }
 
-static inline void TCP_ECN_send(struct sock *sk, struct tcp_sock *tp,
-                               struct sk_buff *skb, int tcp_header_len)
+static inline void TCP_ECN_send(struct sock *sk, struct sk_buff *skb,
+                               int tcp_header_len)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
+
        if (tp->ecn_flags & TCP_ECN_OK) {
                /* Not-retransmitted data segment: set ECT and inject CWR. */
                if (skb->len != tcp_header_len &&
@@ -54,7 +57,7 @@ static inline void TCP_ECN_send(struct sock *sk, struct tcp_sock *tp,
                        INET_ECN_xmit(sk);
                        if (tp->ecn_flags&TCP_ECN_QUEUE_CWR) {
                                tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR;
-                               skb->h.th->cwr = 1;
+                               tcp_hdr(skb)->cwr = 1;
                                skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
                        }
                } else {
@@ -62,7 +65,7 @@ static inline void TCP_ECN_send(struct sock *sk, struct tcp_sock *tp,
                        INET_ECN_dontxmit(sk);
                }
                if (tp->ecn_flags & TCP_ECN_DEMAND_CWR)
-                       skb->h.th->ece = 1;
+                       tcp_hdr(skb)->ece = 1;
        }
 }
 
@@ -70,7 +73,7 @@ static inline void TCP_ECN_send(struct sock *sk, struct tcp_sock *tp,
 
 static inline void TCP_ECN_accept_cwr(struct tcp_sock *tp, struct sk_buff *skb)
 {
-       if (skb->h.th->cwr)
+       if (tcp_hdr(skb)->cwr)
                tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
 }
 
index 1b921fa..98755eb 100644 (file)
@@ -72,15 +72,12 @@ struct sk_buff;
  */
 static inline __sum16 __udp_lib_checksum_complete(struct sk_buff *skb)
 {
-       if (! UDP_SKB_CB(skb)->partial_cov)
-               return __skb_checksum_complete(skb);
-       return csum_fold(skb_checksum(skb, 0, UDP_SKB_CB(skb)->cscov,
-                                     skb->csum));
+       return __skb_checksum_complete_head(skb, UDP_SKB_CB(skb)->cscov);
 }
 
 static inline int udp_lib_checksum_complete(struct sk_buff *skb)
 {
-       return skb->ip_summed != CHECKSUM_UNNECESSARY &&
+       return !skb_csum_unnecessary(skb) &&
                __udp_lib_checksum_complete(skb);
 }
 
@@ -92,8 +89,8 @@ static inline int udp_lib_checksum_complete(struct sk_buff *skb)
  */
 static inline __wsum udp_csum_outgoing(struct sock *sk, struct sk_buff *skb)
 {
-       __wsum csum = csum_partial(skb->h.raw, sizeof(struct udphdr), 0);
-
+       __wsum csum = csum_partial(skb_transport_header(skb),
+                                  sizeof(struct udphdr), 0);
        skb_queue_walk(&sk->sk_write_queue, skb) {
                csum = csum_add(csum, skb->csum);
        }
index 67ac514..635b0ea 100644 (file)
@@ -47,11 +47,10 @@ static inline int udplite_checksum_init(struct sk_buff *skb, struct udphdr *uh)
                return 1;
        }
 
-        UDP_SKB_CB(skb)->partial_cov = 0;
        cscov = ntohs(uh->len);
 
        if (cscov == 0)          /* Indicates that full coverage is required. */
-               cscov = skb->len;
+               ;
        else if (cscov < 8  || cscov > skb->len) {
                /*
                 * Coverage length violates RFC 3828: log and discard silently.
@@ -60,42 +59,16 @@ static inline int udplite_checksum_init(struct sk_buff *skb, struct udphdr *uh)
                               cscov, skb->len);
                return 1;
 
-       } else if (cscov < skb->len)
+       } else if (cscov < skb->len) {
                UDP_SKB_CB(skb)->partial_cov = 1;
-
-        UDP_SKB_CB(skb)->cscov = cscov;
-
-       /*
-        * There is no known NIC manufacturer supporting UDP-Lite yet,
-        * hence ip_summed is always (re-)set to CHECKSUM_NONE.
-        */
-       skb->ip_summed = CHECKSUM_NONE;
+               UDP_SKB_CB(skb)->cscov = cscov;
+               if (skb->ip_summed == CHECKSUM_COMPLETE)
+                       skb->ip_summed = CHECKSUM_NONE;
+        }
 
        return 0;
 }
 
-static __inline__ int udplite4_csum_init(struct sk_buff *skb, struct udphdr *uh)
-{
-       int rc = udplite_checksum_init(skb, uh);
-
-       if (!rc)
-               skb->csum = csum_tcpudp_nofold(skb->nh.iph->saddr,
-                                              skb->nh.iph->daddr,
-                                              skb->len, IPPROTO_UDPLITE, 0);
-       return rc;
-}
-
-static __inline__ int udplite6_csum_init(struct sk_buff *skb, struct udphdr *uh)
-{
-       int rc = udplite_checksum_init(skb, uh);
-
-       if (!rc)
-               skb->csum = ~csum_unfold(csum_ipv6_magic(&skb->nh.ipv6h->saddr,
-                                            &skb->nh.ipv6h->daddr,
-                                            skb->len, IPPROTO_UDPLITE, 0));
-       return rc;
-}
-
 static inline int udplite_sender_cscov(struct udp_sock *up, struct udphdr *uh)
 {
        int cscov = up->len;
@@ -128,14 +101,14 @@ static inline int udplite_sender_cscov(struct udp_sock *up, struct udphdr *uh)
 
 static inline __wsum udplite_csum_outgoing(struct sock *sk, struct sk_buff *skb)
 {
-       int off, len, cscov = udplite_sender_cscov(udp_sk(sk), skb->h.uh);
+       int cscov = udplite_sender_cscov(udp_sk(sk), udp_hdr(skb));
        __wsum csum = 0;
 
        skb->ip_summed = CHECKSUM_NONE;     /* no HW support for checksumming */
 
        skb_queue_walk(&sk->sk_write_queue, skb) {
-               off = skb->h.raw - skb->data;
-               len = skb->len - off;
+               const int off = skb_transport_offset(skb);
+               const int len = skb->len - off;
 
                csum = skb_checksum(skb, off, (cscov > len)? len : cscov, csum);
 
diff --git a/include/net/wext.h b/include/net/wext.h
new file mode 100644 (file)
index 0000000..5574183
--- /dev/null
@@ -0,0 +1,24 @@
+#ifndef __NET_WEXT_H
+#define __NET_WEXT_H
+
+/*
+ * wireless extensions interface to the core code
+ */
+
+#ifdef CONFIG_WIRELESS_EXT
+extern int wext_proc_init(void);
+extern int wext_handle_ioctl(struct ifreq *ifr, unsigned int cmd,
+                            void __user *arg);
+#else
+static inline int wext_proc_init()
+{
+       return 0;
+}
+static inline int wext_handle_ioctl(struct ifreq *ifr, unsigned int cmd,
+                                   void __user *arg)
+{
+       return -EINVAL;
+}
+#endif
+
+#endif /* __NET_WEXT_H */
diff --git a/include/net/wireless.h b/include/net/wireless.h
new file mode 100644 (file)
index 0000000..d30c4ba
--- /dev/null
@@ -0,0 +1,139 @@
+#ifndef __NET_WIRELESS_H
+#define __NET_WIRELESS_H
+
+/*
+ * 802.11 device management
+ *
+ * Copyright 2007      Johannes Berg <johannes@sipsolutions.net>
+ */
+
+#include <linux/netdevice.h>
+#include <linux/debugfs.h>
+#include <linux/list.h>
+#include <net/cfg80211.h>
+
+/**
+ * struct wiphy - wireless hardware description
+ * @idx: the wiphy index assigned to this item
+ * @class_dev: the class device representing /sys/class/ieee80211/<wiphy-name>
+ */
+struct wiphy {
+       /* assign these fields before you register the wiphy */
+
+       /* permanent MAC address */
+       u8 perm_addr[ETH_ALEN];
+
+       /* If multiple wiphys are registered and you're handed e.g.
+        * a regular netdev with assigned ieee80211_ptr, you won't
+        * know whether it points to a wiphy your driver has registered
+        * or not. Assign this to something global to your driver to
+        * help determine whether you own this wiphy or not. */
+       void *privid;
+
+       /* fields below are read-only, assigned by cfg80211 */
+
+       /* the item in /sys/class/ieee80211/ points to this,
+        * you need use set_wiphy_dev() (see below) */
+       struct device dev;
+
+       /* dir in debugfs: ieee80211/<wiphyname> */
+       struct dentry *debugfsdir;
+
+       char priv[0] __attribute__((__aligned__(NETDEV_ALIGN)));
+};
+
+/** struct wireless_dev - wireless per-netdev state
+ *
+ * This structure must be allocated by the driver/stack
+ * that uses the ieee80211_ptr field in struct net_device
+ * (this is intentional so it can be allocated along with
+ * the netdev.)
+ *
+ * @wiphy: pointer to hardware description
+ */
+struct wireless_dev {
+       struct wiphy *wiphy;
+
+       /* private to the generic wireless code */
+       struct list_head list;
+       struct net_device *netdev;
+};
+
+/**
+ * wiphy_priv - return priv from wiphy
+ */
+static inline void *wiphy_priv(struct wiphy *wiphy)
+{
+       BUG_ON(!wiphy);
+       return &wiphy->priv;
+}
+
+/**
+ * set_wiphy_dev - set device pointer for wiphy
+ */
+static inline void set_wiphy_dev(struct wiphy *wiphy, struct device *dev)
+{
+       wiphy->dev.parent = dev;
+}
+
+/**
+ * wiphy_dev - get wiphy dev pointer
+ */
+static inline struct device *wiphy_dev(struct wiphy *wiphy)
+{
+       return wiphy->dev.parent;
+}
+
+/**
+ * wiphy_name - get wiphy name
+ */
+static inline char *wiphy_name(struct wiphy *wiphy)
+{
+       return wiphy->dev.bus_id;
+}
+
+/**
+ * wdev_priv - return wiphy priv from wireless_dev
+ */
+static inline void *wdev_priv(struct wireless_dev *wdev)
+{
+       BUG_ON(!wdev);
+       return wiphy_priv(wdev->wiphy);
+}
+
+/**
+ * wiphy_new - create a new wiphy for use with cfg80211
+ *
+ * create a new wiphy and associate the given operations with it.
+ * @sizeof_priv bytes are allocated for private use.
+ *
+ * the returned pointer must be assigned to each netdev's
+ * ieee80211_ptr for proper operation.
+ */
+struct wiphy *wiphy_new(struct cfg80211_ops *ops, int sizeof_priv);
+
+/**
+ * wiphy_register - register a wiphy with cfg80211
+ *
+ * register the given wiphy
+ *
+ * Returns a non-negative wiphy index or a negative error code.
+ */
+extern int wiphy_register(struct wiphy *wiphy);
+
+/**
+ * wiphy_unregister - deregister a wiphy from cfg80211
+ *
+ * unregister a device with the given priv pointer.
+ * After this call, no more requests can be made with this priv
+ * pointer, but the call may sleep to wait for an outstanding
+ * request that is being handled.
+ */
+extern void wiphy_unregister(struct wiphy *wiphy);
+
+/**
+ * wiphy_free - free wiphy
+ */
+extern void wiphy_free(struct wiphy *wiphy);
+
+#endif /* __NET_WIRELESS_H */
index 1d10c87..1415bcf 100644 (file)
@@ -7,8 +7,8 @@
 
 static inline __be16 x25_type_trans(struct sk_buff *skb, struct net_device *dev)
 {
-       skb->mac.raw = skb->data;
        skb->dev = dev;
+       skb_reset_mac_header(skb);
        skb->pkt_type = PACKET_HOST;
        
        return htons(ETH_P_X25);
index 5a00aa8..8287081 100644 (file)
@@ -279,7 +279,7 @@ struct xfrm_type
        xfrm_address_t          *(*local_addr)(struct xfrm_state *, xfrm_address_t *);
        xfrm_address_t          *(*remote_addr)(struct xfrm_state *, xfrm_address_t *);
        /* Estimate maximal size of result of transformation of a dgram */
-       u32                     (*get_max_size)(struct xfrm_state *, int size);
+       u32                     (*get_mtu)(struct xfrm_state *, int size);
 };
 
 extern int xfrm_register_type(struct xfrm_type *type, unsigned short family);
@@ -416,6 +416,13 @@ struct xfrm_audit
        u32     secid;
 };
 
+/* SAD metadata, add more later */
+struct xfrm_sadinfo
+{
+       u32 sadhcnt; /* current hash bkts */
+       u32 sadhmcnt; /* max allowed hash bkts */
+       u32 sadcnt; /* current running count */
+};
 #ifdef CONFIG_AUDITSYSCALL
 extern void xfrm_audit_log(uid_t auid, u32 secid, int type, int result,
                    struct xfrm_policy *xp, struct xfrm_state *x);
@@ -938,6 +945,7 @@ static inline int xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **s
 extern struct xfrm_state *xfrm_find_acq_byseq(u32 seq);
 extern int xfrm_state_delete(struct xfrm_state *x);
 extern void xfrm_state_flush(u8 proto, struct xfrm_audit *audit_info);
+extern void xfrm_sad_getinfo(struct xfrm_sadinfo *si);
 extern int xfrm_replay_check(struct xfrm_state *x, __be32 seq);
 extern void xfrm_replay_advance(struct xfrm_state *x, __be32 seq);
 extern void xfrm_replay_notify(struct xfrm_state *x, int event);
diff --git a/include/rxrpc/call.h b/include/rxrpc/call.h
deleted file mode 100644 (file)
index b86f837..0000000
+++ /dev/null
@@ -1,212 +0,0 @@
-/* call.h: Rx call record
- *
- * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#ifndef _LINUX_RXRPC_CALL_H
-#define _LINUX_RXRPC_CALL_H
-
-#include <rxrpc/types.h>
-#include <rxrpc/rxrpc.h>
-#include <rxrpc/packet.h>
-#include <linux/timer.h>
-
-#define RXRPC_CALL_ACK_WINDOW_SIZE     16
-
-extern unsigned rxrpc_call_rcv_timeout;                /* receive activity timeout (secs) */
-
-/* application call state
- * - only state 0 and ffff are reserved, the state is set to 1 after an opid is received
- */
-enum rxrpc_app_cstate {
-       RXRPC_CSTATE_COMPLETE           = 0,    /* operation complete */
-       RXRPC_CSTATE_ERROR,                     /* operation ICMP error or aborted */
-       RXRPC_CSTATE_SRVR_RCV_OPID,             /* [SERVER] receiving operation ID */
-       RXRPC_CSTATE_SRVR_RCV_ARGS,             /* [SERVER] receiving operation data */
-       RXRPC_CSTATE_SRVR_GOT_ARGS,             /* [SERVER] completely received operation data */
-       RXRPC_CSTATE_SRVR_SND_REPLY,            /* [SERVER] sending operation reply */
-       RXRPC_CSTATE_SRVR_RCV_FINAL_ACK,        /* [SERVER] receiving final ACK */
-       RXRPC_CSTATE_CLNT_SND_ARGS,             /* [CLIENT] sending operation args */
-       RXRPC_CSTATE_CLNT_RCV_REPLY,            /* [CLIENT] receiving operation reply */
-       RXRPC_CSTATE_CLNT_GOT_REPLY,            /* [CLIENT] completely received operation reply */
-} __attribute__((packed));
-
-extern const char *rxrpc_call_states[];
-
-enum rxrpc_app_estate {
-       RXRPC_ESTATE_NO_ERROR           = 0,    /* no error */
-       RXRPC_ESTATE_LOCAL_ABORT,               /* aborted locally by application layer */
-       RXRPC_ESTATE_PEER_ABORT,                /* aborted remotely by peer */
-       RXRPC_ESTATE_LOCAL_ERROR,               /* local ICMP network error */
-       RXRPC_ESTATE_REMOTE_ERROR,              /* remote ICMP network error */
-} __attribute__((packed));
-
-extern const char *rxrpc_call_error_states[];
-
-/*****************************************************************************/
-/*
- * Rx call record and application scratch buffer
- * - the call record occupies the bottom of a complete page
- * - the application scratch buffer occupies the rest
- */
-struct rxrpc_call
-{
-       atomic_t                usage;
-       struct rxrpc_connection *conn;          /* connection upon which active */
-       spinlock_t              lock;           /* access lock */
-       struct module           *owner;         /* owner module */
-       wait_queue_head_t       waitq;          /* wait queue for events to happen */
-       struct list_head        link;           /* general internal list link */
-       struct list_head        call_link;      /* master call list link */
-       __be32                  chan_ix;        /* connection channel index  */
-       __be32                  call_id;        /* call ID on connection  */
-       unsigned long           cjif;           /* jiffies at call creation */
-       unsigned long           flags;          /* control flags */
-#define RXRPC_CALL_ACKS_TIMO   0x00000001      /* ACKS timeout reached */
-#define RXRPC_CALL_ACKR_TIMO   0x00000002      /* ACKR timeout reached */
-#define RXRPC_CALL_RCV_TIMO    0x00000004      /* RCV timeout reached */
-#define RXRPC_CALL_RCV_PKT     0x00000008      /* received packet */
-
-       /* transmission */
-       rxrpc_seq_t             snd_seq_count;  /* outgoing packet sequence number counter */
-       struct rxrpc_message    *snd_nextmsg;   /* next message being constructed for sending */
-       struct rxrpc_message    *snd_ping;      /* last ping message sent */
-       unsigned short          snd_resend_cnt; /* count of resends since last ACK */
-
-       /* transmission ACK tracking */
-       struct list_head        acks_pendq;     /* messages pending ACK (ordered by seq) */
-       unsigned                acks_pend_cnt;  /* number of un-ACK'd packets */
-       rxrpc_seq_t             acks_dftv_seq;  /* highest definitively ACK'd msg seq */
-       struct timer_list       acks_timeout;   /* timeout on expected ACK */
-
-       /* reception */
-       struct list_head        rcv_receiveq;   /* messages pending reception (ordered by seq) */
-       struct list_head        rcv_krxiodq_lk; /* krxiod queue for new inbound packets */
-       struct timer_list       rcv_timeout;    /* call receive activity timeout */
-
-       /* reception ACK'ing */
-       rxrpc_seq_t             ackr_win_bot;   /* bottom of ACK window */
-       rxrpc_seq_t             ackr_win_top;   /* top of ACK window */
-       rxrpc_seq_t             ackr_high_seq;  /* highest seqno yet received */
-       rxrpc_seq_net_t         ackr_prev_seq;  /* previous seqno received */
-       unsigned                ackr_pend_cnt;  /* number of pending ACKs */
-       struct timer_list       ackr_dfr_timo;  /* timeout on deferred ACK */
-       char                    ackr_dfr_perm;  /* request for deferred ACKs permitted */
-       rxrpc_seq_t             ackr_dfr_seq;   /* seqno for deferred ACK */
-       struct rxrpc_ackpacket  ackr;           /* pending normal ACK packet */
-       uint8_t                 ackr_array[RXRPC_CALL_ACK_WINDOW_SIZE]; /* ACK records */
-
-       /* presentation layer */
-       char                    app_last_rcv;   /* T if received last packet from remote end */
-       enum rxrpc_app_cstate   app_call_state; /* call state */
-       enum rxrpc_app_estate   app_err_state;  /* abort/error state */
-       struct list_head        app_readyq;     /* ordered ready received packet queue */
-       struct list_head        app_unreadyq;   /* ordered post-hole recv'd packet queue */
-       rxrpc_seq_t             app_ready_seq;  /* last seq number dropped into readyq */
-       size_t                  app_ready_qty;  /* amount of data ready in readyq */
-       unsigned                app_opcode;     /* operation ID */
-       unsigned                app_abort_code; /* abort code (when aborted) */
-       int                     app_errno;      /* error number (when ICMP error received) */
-
-       /* statisics */
-       unsigned                pkt_rcv_count;  /* count of received packets on this call */
-       unsigned                pkt_snd_count;  /* count of sent packets on this call */
-       unsigned                app_read_count; /* number of reads issued */
-
-       /* bits for the application to use */
-       rxrpc_call_attn_func_t  app_attn_func;  /* callback when attention required */
-       rxrpc_call_error_func_t app_error_func; /* callback when abort sent (cleanup and put) */
-       rxrpc_call_aemap_func_t app_aemap_func; /* callback to map abort code to/from errno */
-       void                    *app_user;      /* application data */
-       struct list_head        app_link;       /* application list linkage */
-       struct list_head        app_attn_link;  /* application attention list linkage */
-       size_t                  app_mark;       /* trigger callback when app_ready_qty>=app_mark */
-       char                    app_async_read; /* T if in async-read mode */
-       uint8_t                 *app_read_buf;  /* application async read buffer (app_mark size) */
-       uint8_t                 *app_scr_alloc; /* application scratch allocation pointer */
-       void                    *app_scr_ptr;   /* application pointer into scratch buffer */
-
-#define RXRPC_APP_MARK_EOF 0xFFFFFFFFU /* mark at end of input */
-
-       /* application scratch buffer */
-       uint8_t                 app_scratch[0] __attribute__((aligned(sizeof(long))));
-};
-
-#define RXRPC_CALL_SCRATCH_SIZE (PAGE_SIZE - sizeof(struct rxrpc_call))
-
-#define rxrpc_call_reset_scratch(CALL) \
-do { (CALL)->app_scr_alloc = (CALL)->app_scratch; } while(0)
-
-#define rxrpc_call_alloc_scratch(CALL,SIZE)                                            \
-({                                                                                     \
-       void *ptr;                                                                      \
-       ptr = (CALL)->app_scr_alloc;                                                    \
-       (CALL)->app_scr_alloc += (SIZE);                                                \
-       if ((SIZE)>RXRPC_CALL_SCRATCH_SIZE ||                                           \
-           (size_t)((CALL)->app_scr_alloc - (u8*)(CALL)) > RXRPC_CALL_SCRATCH_SIZE) {  \
-               printk("rxrpc_call_alloc_scratch(%p,%Zu)\n",(CALL),(size_t)(SIZE));     \
-               BUG();                                                                  \
-       }                                                                               \
-       ptr;                                                                            \
-})
-
-#define rxrpc_call_alloc_scratch_s(CALL,TYPE)                                          \
-({                                                                                     \
-       size_t size = sizeof(TYPE);                                                     \
-       TYPE *ptr;                                                                      \
-       ptr = (TYPE*)(CALL)->app_scr_alloc;                                             \
-       (CALL)->app_scr_alloc += size;                                                  \
-       if (size>RXRPC_CALL_SCRATCH_SIZE ||                                             \
-           (size_t)((CALL)->app_scr_alloc - (u8*)(CALL)) > RXRPC_CALL_SCRATCH_SIZE) {  \
-               printk("rxrpc_call_alloc_scratch(%p,%Zu)\n",(CALL),size);               \
-               BUG();                                                                  \
-       }                                                                               \
-       ptr;                                                                            \
-})
-
-#define rxrpc_call_is_ack_pending(CALL) ((CALL)->ackr.reason != 0)
-
-extern int rxrpc_create_call(struct rxrpc_connection *conn,
-                            rxrpc_call_attn_func_t attn,
-                            rxrpc_call_error_func_t error,
-                            rxrpc_call_aemap_func_t aemap,
-                            struct rxrpc_call **_call);
-
-extern int rxrpc_incoming_call(struct rxrpc_connection *conn,
-                              struct rxrpc_message *msg,
-                              struct rxrpc_call **_call);
-
-static inline void rxrpc_get_call(struct rxrpc_call *call)
-{
-       BUG_ON(atomic_read(&call->usage)<=0);
-       atomic_inc(&call->usage);
-       /*printk("rxrpc_get_call(%p{u=%d})\n",(C),atomic_read(&(C)->usage));*/
-}
-
-extern void rxrpc_put_call(struct rxrpc_call *call);
-
-extern void rxrpc_call_do_stuff(struct rxrpc_call *call);
-
-extern int rxrpc_call_abort(struct rxrpc_call *call, int error);
-
-#define RXRPC_CALL_READ_BLOCK  0x0001  /* block if not enough data and not yet EOF */
-#define RXRPC_CALL_READ_ALL    0x0002  /* error if insufficient data received */
-extern int rxrpc_call_read_data(struct rxrpc_call *call, void *buffer, size_t size, int flags);
-
-extern int rxrpc_call_write_data(struct rxrpc_call *call,
-                                size_t sioc,
-                                struct kvec *siov,
-                                uint8_t rxhdr_flags,
-                                gfp_t alloc_flags,
-                                int dup_data,
-                                size_t *size_sent);
-
-extern void rxrpc_call_handle_error(struct rxrpc_call *conn, int local, int errno);
-
-#endif /* _LINUX_RXRPC_CALL_H */
diff --git a/include/rxrpc/connection.h b/include/rxrpc/connection.h
deleted file mode 100644 (file)
index 41e6781..0000000
+++ /dev/null
@@ -1,83 +0,0 @@
-/* connection.h: Rx connection record
- *
- * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#ifndef _LINUX_RXRPC_CONNECTION_H
-#define _LINUX_RXRPC_CONNECTION_H
-
-#include <rxrpc/types.h>
-#include <rxrpc/krxtimod.h>
-
-struct sk_buff;
-
-/*****************************************************************************/
-/*
- * Rx connection
- * - connections are matched by (rmt_port,rmt_addr,service_id,conn_id,clientflag)
- * - connections only retain a refcount on the peer when they are active
- * - connections with refcount==0 are inactive and reside in the peer's graveyard
- */
-struct rxrpc_connection
-{
-       atomic_t                usage;
-       struct rxrpc_transport  *trans;         /* transport endpoint */
-       struct rxrpc_peer       *peer;          /* peer from/to which connected */
-       struct rxrpc_service    *service;       /* responsible service (inbound conns) */
-       struct rxrpc_timer      timeout;        /* decaching timer */
-       struct list_head        link;           /* link in peer's list */
-       struct list_head        proc_link;      /* link in proc list */
-       struct list_head        err_link;       /* link in ICMP error processing list */
-       struct list_head        id_link;        /* link in ID grant list */
-       struct sockaddr_in      addr;           /* remote address */
-       struct rxrpc_call       *channels[4];   /* channels (active calls) */
-       wait_queue_head_t       chanwait;       /* wait for channel to become available */
-       spinlock_t              lock;           /* access lock */
-       struct timeval          atime;          /* last access time */
-       size_t                  mtu_size;       /* MTU size for outbound messages */
-       unsigned                call_counter;   /* call ID counter */
-       rxrpc_serial_t          serial_counter; /* packet serial number counter */
-
-       /* the following should all be in net order */
-       __be32                  in_epoch;       /* peer's epoch */
-       __be32                  out_epoch;      /* my epoch */
-       __be32                  conn_id;        /* connection ID, appropriately shifted */
-       __be16                  service_id;     /* service ID */
-       uint8_t                 security_ix;    /* security ID */
-       uint8_t                 in_clientflag;  /* RXRPC_CLIENT_INITIATED if we are server */
-       uint8_t                 out_clientflag; /* RXRPC_CLIENT_INITIATED if we are client */
-};
-
-extern int rxrpc_create_connection(struct rxrpc_transport *trans,
-                                  __be16 port,
-                                  __be32 addr,
-                                  uint16_t service_id,
-                                  void *security,
-                                  struct rxrpc_connection **_conn);
-
-extern int rxrpc_connection_lookup(struct rxrpc_peer *peer,
-                                  struct rxrpc_message *msg,
-                                  struct rxrpc_connection **_conn);
-
-static inline void rxrpc_get_connection(struct rxrpc_connection *conn)
-{
-       BUG_ON(atomic_read(&conn->usage)<0);
-       atomic_inc(&conn->usage);
-       //printk("rxrpc_get_conn(%p{u=%d})\n",conn,atomic_read(&conn->usage));
-}
-
-extern void rxrpc_put_connection(struct rxrpc_connection *conn);
-
-extern int rxrpc_conn_receive_call_packet(struct rxrpc_connection *conn,
-                                         struct rxrpc_call *call,
-                                         struct rxrpc_message *msg);
-
-extern void rxrpc_conn_handle_error(struct rxrpc_connection *conn, int local, int errno);
-
-#endif /* _LINUX_RXRPC_CONNECTION_H */
diff --git a/include/rxrpc/krxiod.h b/include/rxrpc/krxiod.h
deleted file mode 100644 (file)
index c0e0e82..0000000
+++ /dev/null
@@ -1,27 +0,0 @@
-/* krxiod.h: Rx RPC I/O kernel thread interface
- *
- * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#ifndef _LINUX_RXRPC_KRXIOD_H
-#define _LINUX_RXRPC_KRXIOD_H
-
-#include <rxrpc/types.h>
-
-extern int rxrpc_krxiod_init(void);
-extern void rxrpc_krxiod_kill(void);
-extern void rxrpc_krxiod_queue_transport(struct rxrpc_transport *trans);
-extern void rxrpc_krxiod_dequeue_transport(struct rxrpc_transport *trans);
-extern void rxrpc_krxiod_queue_peer(struct rxrpc_peer *peer);
-extern void rxrpc_krxiod_dequeue_peer(struct rxrpc_peer *peer);
-extern void rxrpc_krxiod_clear_peers(struct rxrpc_transport *trans);
-extern void rxrpc_krxiod_queue_call(struct rxrpc_call *call);
-extern void rxrpc_krxiod_dequeue_call(struct rxrpc_call *call);
-
-#endif /* _LINUX_RXRPC_KRXIOD_H */
diff --git a/include/rxrpc/krxsecd.h b/include/rxrpc/krxsecd.h
deleted file mode 100644 (file)
index 55ce43a..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-/* krxsecd.h: Rx RPC security kernel thread interface
- *
- * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#ifndef _LINUX_RXRPC_KRXSECD_H
-#define _LINUX_RXRPC_KRXSECD_H
-
-#include <rxrpc/types.h>
-
-extern int rxrpc_krxsecd_init(void);
-extern void rxrpc_krxsecd_kill(void);
-extern void rxrpc_krxsecd_clear_transport(struct rxrpc_transport *trans);
-extern void rxrpc_krxsecd_queue_incoming_call(struct rxrpc_message *msg);
-
-#endif /* _LINUX_RXRPC_KRXSECD_H */
diff --git a/include/rxrpc/krxtimod.h b/include/rxrpc/krxtimod.h
deleted file mode 100644 (file)
index b3d298b..0000000
+++ /dev/null
@@ -1,45 +0,0 @@
-/* krxtimod.h: RxRPC timeout daemon
- *
- * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#ifndef _LINUX_RXRPC_KRXTIMOD_H
-#define _LINUX_RXRPC_KRXTIMOD_H
-
-#include <rxrpc/types.h>
-
-struct rxrpc_timer_ops {
-       /* called when the front of the timer queue has timed out */
-       void (*timed_out)(struct rxrpc_timer *timer);
-};
-
-/*****************************************************************************/
-/*
- * RXRPC timer/timeout record
- */
-struct rxrpc_timer
-{
-       struct list_head                link;           /* link in timer queue */
-       unsigned long                   timo_jif;       /* timeout time */
-       const struct rxrpc_timer_ops    *ops;           /* timeout expiry function */
-};
-
-static inline void rxrpc_timer_init(rxrpc_timer_t *timer, const struct rxrpc_timer_ops *ops)
-{
-       INIT_LIST_HEAD(&timer->link);
-       timer->ops = ops;
-}
-
-extern int rxrpc_krxtimod_start(void);
-extern void rxrpc_krxtimod_kill(void);
-
-extern void rxrpc_krxtimod_add_timer(rxrpc_timer_t *timer, unsigned long timeout);
-extern int rxrpc_krxtimod_del_timer(rxrpc_timer_t *timer);
-
-#endif /* _LINUX_RXRPC_KRXTIMOD_H */
diff --git a/include/rxrpc/message.h b/include/rxrpc/message.h
deleted file mode 100644 (file)
index b318f27..0000000
+++ /dev/null
@@ -1,71 +0,0 @@
-/* message.h: Rx message caching
- *
- * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#ifndef _LINUX_RXRPC_MESSAGE_H
-#define _LINUX_RXRPC_MESSAGE_H
-
-#include <rxrpc/packet.h>
-
-/*****************************************************************************/
-/*
- * Rx message record
- */
-struct rxrpc_message
-{
-       atomic_t                usage;
-       struct list_head        link;           /* list link */
-       struct timeval          stamp;          /* time received or last sent */
-       rxrpc_seq_t             seq;            /* message sequence number */
-
-       int                     state;          /* the state the message is currently in */
-#define RXRPC_MSG_PREPARED     0
-#define RXRPC_MSG_SENT         1
-#define RXRPC_MSG_ACKED                2               /* provisionally ACK'd */
-#define RXRPC_MSG_DONE         3               /* definitively ACK'd (msg->seq<ack.firstPacket) */
-#define RXRPC_MSG_RECEIVED     4
-#define RXRPC_MSG_ERROR                -1
-       char                    rttdone;        /* used for RTT */
-
-       struct rxrpc_transport  *trans;         /* transport received through */
-       struct rxrpc_connection *conn;          /* connection received over */
-       struct sk_buff          *pkt;           /* received packet */
-       off_t                   offset;         /* offset into pkt of next byte of data */
-
-       struct rxrpc_header     hdr;            /* message header */
-
-       int                     dcount;         /* data part count */
-       size_t                  dsize;          /* data size */
-#define RXRPC_MSG_MAX_IOCS 8
-       struct kvec             data[RXRPC_MSG_MAX_IOCS]; /* message data */
-       unsigned long           dfree;          /* bit mask indicating kfree(data[x]) if T */
-};
-
-#define rxrpc_get_message(M) do { atomic_inc(&(M)->usage); } while(0)
-
-extern void __rxrpc_put_message(struct rxrpc_message *msg);
-static inline void rxrpc_put_message(struct rxrpc_message *msg)
-{
-       BUG_ON(atomic_read(&msg->usage)<=0);
-       if (atomic_dec_and_test(&msg->usage))
-               __rxrpc_put_message(msg);
-}
-
-extern int rxrpc_conn_newmsg(struct rxrpc_connection *conn,
-                            struct rxrpc_call *call,
-                            uint8_t type,
-                            int count,
-                            struct kvec *diov,
-                            gfp_t alloc_flags,
-                            struct rxrpc_message **_msg);
-
-extern int rxrpc_conn_sendmsg(struct rxrpc_connection *conn, struct rxrpc_message *msg);
-
-#endif /* _LINUX_RXRPC_MESSAGE_H */
index 1447f0a..b69e6e1 100644 (file)
@@ -1,6 +1,6 @@
 /* packet.h: Rx packet layout and definitions
  *
- * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2002, 2007 Red Hat, Inc. All Rights Reserved.
  * Written by David Howells (dhowells@redhat.com)
  *
  * This program is free software; you can redistribute it and/or
 #ifndef _LINUX_RXRPC_PACKET_H
 #define _LINUX_RXRPC_PACKET_H
 
-#include <rxrpc/types.h>
-
-#define RXRPC_IPUDP_SIZE               28
-extern size_t RXRPC_MAX_PACKET_SIZE;
-#define RXRPC_MAX_PACKET_DATA_SIZE     (RXRPC_MAX_PACKET_SIZE - sizeof(struct rxrpc_header))
-#define RXRPC_LOCAL_PACKET_SIZE                RXRPC_MAX_PACKET_SIZE
-#define RXRPC_REMOTE_PACKET_SIZE       (576 - RXRPC_IPUDP_SIZE)
+typedef u32    rxrpc_seq_t;    /* Rx message sequence number */
+typedef u32    rxrpc_serial_t; /* Rx message serial number */
+typedef __be32 rxrpc_seq_net_t; /* on-the-wire Rx message sequence number */
+typedef __be32 rxrpc_serial_net_t; /* on-the-wire Rx message serial number */
 
 /*****************************************************************************/
 /*
  * on-the-wire Rx packet header
  * - all multibyte fields should be in network byte order
  */
-struct rxrpc_header
-{
+struct rxrpc_header {
        __be32          epoch;          /* client boot timestamp */
 
        __be32          cid;            /* connection and channel ID */
 #define RXRPC_MAXCALLS         4                       /* max active calls per conn */
 #define RXRPC_CHANNELMASK      (RXRPC_MAXCALLS-1)      /* mask for channel ID */
 #define RXRPC_CIDMASK          (~RXRPC_CHANNELMASK)    /* mask for connection ID */
-#define RXRPC_CIDSHIFT         2                       /* shift for connection ID */
+#define RXRPC_CIDSHIFT         ilog2(RXRPC_MAXCALLS)   /* shift for connection ID */
+#define RXRPC_CID_INC          (1 << RXRPC_CIDSHIFT)   /* connection ID increment */
 
        __be32          callNumber;     /* call ID (0 for connection-level packets) */
 #define RXRPC_PROCESS_MAXCALLS (1<<2)  /* maximum number of active calls per conn (power of 2) */
@@ -62,7 +59,10 @@ struct rxrpc_header
 
        uint8_t         userStatus;     /* app-layer defined status */
        uint8_t         securityIndex;  /* security protocol ID */
-       __be16          _rsvd;          /* reserved (used by kerberos security as cksum) */
+       union {
+               __be16  _rsvd;          /* reserved */
+               __be16  cksum;          /* kerberos security checksum */
+       };
        __be16          serviceId;      /* service ID */
 
 } __attribute__((packed));
@@ -81,8 +81,7 @@ extern const char *rxrpc_pkts[];
  *   - new__rsvd = j__rsvd
  *   - duplicating all other fields
  */
-struct rxrpc_jumbo_header
-{
+struct rxrpc_jumbo_header {
        uint8_t         flags;          /* packet flags (as per rxrpc_header) */
        uint8_t         pad;
        __be16          _rsvd;          /* reserved (used by kerberos security as cksum) */
@@ -95,8 +94,7 @@ struct rxrpc_jumbo_header
  * on-the-wire Rx ACK packet data payload
  * - all multibyte fields should be in network byte order
  */
-struct rxrpc_ackpacket
-{
+struct rxrpc_ackpacket {
        __be16          bufferSpace;    /* number of packet buffers available */
        __be16          maxSkew;        /* diff between serno being ACK'd and highest serial no
                                         * received */
@@ -124,4 +122,93 @@ struct rxrpc_ackpacket
 
 } __attribute__((packed));
 
+/*
+ * ACK packets can have a further piece of information tagged on the end
+ */
+struct rxrpc_ackinfo {
+       __be32          rxMTU;          /* maximum Rx MTU size (bytes) [AFS 3.3] */
+       __be32          maxMTU;         /* maximum interface MTU size (bytes) [AFS 3.3] */
+       __be32          rwind;          /* Rx window size (packets) [AFS 3.4] */
+       __be32          jumbo_max;      /* max packets to stick into a jumbo packet [AFS 3.5] */
+};
+
+/*****************************************************************************/
+/*
+ * Kerberos security type-2 challenge packet
+ */
+struct rxkad_challenge {
+       __be32          version;        /* version of this challenge type */
+       __be32          nonce;          /* encrypted random number */
+       __be32          min_level;      /* minimum security level */
+       __be32          __padding;      /* padding to 8-byte boundary */
+} __attribute__((packed));
+
+/*****************************************************************************/
+/*
+ * Kerberos security type-2 response packet
+ */
+struct rxkad_response {
+       __be32          version;        /* version of this reponse type */
+       __be32          __pad;
+
+       /* encrypted bit of the response */
+       struct {
+               __be32          epoch;          /* current epoch */
+               __be32          cid;            /* parent connection ID */
+               __be32          checksum;       /* checksum */
+               __be32          securityIndex;  /* security type */
+               __be32          call_id[4];     /* encrypted call IDs */
+               __be32          inc_nonce;      /* challenge nonce + 1 */
+               __be32          level;          /* desired level */
+       } encrypted;
+
+       __be32          kvno;           /* Kerberos key version number */
+       __be32          ticket_len;     /* Kerberos ticket length  */
+} __attribute__((packed));
+
+/*****************************************************************************/
+/*
+ * RxRPC-level abort codes
+ */
+#define RX_CALL_DEAD           -1      /* call/conn has been inactive and is shut down */
+#define RX_INVALID_OPERATION   -2      /* invalid operation requested / attempted */
+#define RX_CALL_TIMEOUT                -3      /* call timeout exceeded */
+#define RX_EOF                 -4      /* unexpected end of data on read op */
+#define RX_PROTOCOL_ERROR      -5      /* low-level protocol error */
+#define RX_USER_ABORT          -6      /* generic user abort */
+#define RX_ADDRINUSE           -7      /* UDP port in use */
+#define RX_DEBUGI_BADTYPE      -8      /* bad debugging packet type */
+
+/*
+ * (un)marshalling abort codes (rxgen)
+ */
+#define        RXGEN_CC_MARSHAL    -450
+#define        RXGEN_CC_UNMARSHAL  -451
+#define        RXGEN_SS_MARSHAL    -452
+#define        RXGEN_SS_UNMARSHAL  -453
+#define        RXGEN_DECODE        -454
+#define        RXGEN_OPCODE        -455
+#define        RXGEN_SS_XDRFREE    -456
+#define        RXGEN_CC_XDRFREE    -457
+
+/*
+ * Rx kerberos security abort codes
+ * - unfortunately we have no generalised security abort codes to say things
+ *   like "unsupported security", so we have to use these instead and hope the
+ *   other side understands
+ */
+#define RXKADINCONSISTENCY     19270400        /* security module structure inconsistent */
+#define RXKADPACKETSHORT       19270401        /* packet too short for security challenge */
+#define RXKADLEVELFAIL         19270402        /* security level negotiation failed */
+#define RXKADTICKETLEN         19270403        /* ticket length too short or too long */
+#define RXKADOUTOFSEQUENCE     19270404        /* packet had bad sequence number */
+#define RXKADNOAUTH            19270405        /* caller not authorised */
+#define RXKADBADKEY            19270406        /* illegal key: bad parity or weak */
+#define RXKADBADTICKET         19270407        /* security object was passed a bad ticket */
+#define RXKADUNKNOWNKEY                19270408        /* ticket contained unknown key version number */
+#define RXKADEXPIRED           19270409        /* authentication expired */
+#define RXKADSEALEDINCON       19270410        /* sealed data inconsistent */
+#define RXKADDATALEN           19270411        /* user data too long */
+#define RXKADILLEGALLEVEL      19270412        /* caller not authorised to use encrypted conns */
+
 #endif /* _LINUX_RXRPC_PACKET_H */
diff --git a/include/rxrpc/peer.h b/include/rxrpc/peer.h
deleted file mode 100644 (file)
index 8b8fe97..0000000
+++ /dev/null
@@ -1,82 +0,0 @@
-/* peer.h: Rx RPC per-transport peer record
- *
- * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#ifndef _LINUX_RXRPC_PEER_H
-#define _LINUX_RXRPC_PEER_H
-
-#include <linux/wait.h>
-#include <rxrpc/types.h>
-#include <rxrpc/krxtimod.h>
-
-struct rxrpc_peer_ops
-{
-       /* peer record being added */
-       int (*adding)(struct rxrpc_peer *peer);
-
-       /* peer record being discarded from graveyard */
-       void (*discarding)(struct rxrpc_peer *peer);
-
-       /* change of epoch detected on connection */
-       void (*change_of_epoch)(struct rxrpc_connection *conn);
-};
-
-/*****************************************************************************/
-/*
- * Rx RPC per-transport peer record
- * - peers only retain a refcount on the transport when they are active
- * - peers with refcount==0 are inactive and reside in the transport's graveyard
- */
-struct rxrpc_peer
-{
-       atomic_t                usage;
-       struct rxrpc_peer_ops   *ops;           /* operations on this peer */
-       struct rxrpc_transport  *trans;         /* owner transport */
-       struct rxrpc_timer      timeout;        /* timeout for grave destruction */
-       struct list_head        link;           /* link in transport's peer list */
-       struct list_head        proc_link;      /* link in /proc list */
-       rwlock_t                conn_idlock;    /* lock for connection IDs */
-       struct list_head        conn_idlist;    /* list of connections granted IDs */
-       uint32_t                conn_idcounter; /* connection ID counter */
-       rwlock_t                conn_lock;      /* lock for active/dead connections */
-       struct list_head        conn_active;    /* active connections to/from this peer */
-       struct list_head        conn_graveyard; /* graveyard for inactive connections */
-       spinlock_t              conn_gylock;    /* lock for conn_graveyard */
-       wait_queue_head_t       conn_gy_waitq;  /* wait queue hit when graveyard is empty */
-       atomic_t                conn_count;     /* number of attached connections */
-       struct in_addr          addr;           /* remote address */
-       size_t                  if_mtu;         /* interface MTU for this peer */
-       spinlock_t              lock;           /* access lock */
-
-       void                    *user;          /* application layer data */
-
-       /* calculated RTT cache */
-#define RXRPC_RTT_CACHE_SIZE 32
-       suseconds_t             rtt;            /* current RTT estimate (in uS) */
-       unsigned                rtt_point;      /* next entry at which to insert */
-       unsigned                rtt_usage;      /* amount of cache actually used */
-       suseconds_t             rtt_cache[RXRPC_RTT_CACHE_SIZE]; /* calculated RTT cache */
-};
-
-
-extern int rxrpc_peer_lookup(struct rxrpc_transport *trans,
-                            __be32 addr,
-                            struct rxrpc_peer **_peer);
-
-static inline void rxrpc_get_peer(struct rxrpc_peer *peer)
-{
-       BUG_ON(atomic_read(&peer->usage)<0);
-       atomic_inc(&peer->usage);
-       //printk("rxrpc_get_peer(%p{u=%d})\n",peer,atomic_read(&peer->usage));
-}
-
-extern void rxrpc_put_peer(struct rxrpc_peer *peer);
-
-#endif /* _LINUX_RXRPC_PEER_H */
diff --git a/include/rxrpc/rxrpc.h b/include/rxrpc/rxrpc.h
deleted file mode 100644 (file)
index 8d9874c..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
-/* rx.h: Rx RPC interface
- *
- * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#ifndef _LINUX_RXRPC_RXRPC_H
-#define _LINUX_RXRPC_RXRPC_H
-
-#ifdef __KERNEL__
-
-extern __be32 rxrpc_epoch;
-
-#ifdef CONFIG_SYSCTL
-extern int rxrpc_ktrace;
-extern int rxrpc_kdebug;
-extern int rxrpc_kproto;
-extern int rxrpc_knet;
-#else
-#define rxrpc_ktrace   0
-#define rxrpc_kdebug   0
-#define rxrpc_kproto   0
-#define rxrpc_knet     0
-#endif
-
-extern int rxrpc_sysctl_init(void);
-extern void rxrpc_sysctl_cleanup(void);
-
-#endif /* __KERNEL__ */
-
-#endif /* _LINUX_RXRPC_RXRPC_H */
diff --git a/include/rxrpc/transport.h b/include/rxrpc/transport.h
deleted file mode 100644 (file)
index 7c7b968..0000000
+++ /dev/null
@@ -1,106 +0,0 @@
-/* transport.h: Rx transport management
- *
- * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#ifndef _LINUX_RXRPC_TRANSPORT_H
-#define _LINUX_RXRPC_TRANSPORT_H
-
-#include <rxrpc/types.h>
-#include <rxrpc/krxiod.h>
-#include <rxrpc/rxrpc.h>
-#include <linux/skbuff.h>
-#include <linux/rwsem.h>
-
-typedef int (*rxrpc_newcall_fnx_t)(struct rxrpc_call *call);
-
-extern wait_queue_head_t rxrpc_krxiod_wq;
-
-/*****************************************************************************/
-/*
- * Rx operation specification
- * - tables of these must be sorted by op ID so that they can be binary-chop searched
- */
-struct rxrpc_operation
-{
-       unsigned                id;             /* operation ID */
-       size_t                  asize;          /* minimum size of argument block */
-       const char              *name;          /* name of operation */
-       void                    *user;          /* initial user data */
-};
-
-/*****************************************************************************/
-/*
- * Rx transport service record
- */
-struct rxrpc_service
-{
-       struct list_head        link;           /* link in services list on transport */
-       struct module           *owner;         /* owner module */
-       rxrpc_newcall_fnx_t     new_call;       /* new call handler function */
-       const char              *name;          /* name of service */
-       unsigned short          service_id;     /* Rx service ID */
-       rxrpc_call_attn_func_t  attn_func;      /* call requires attention callback */
-       rxrpc_call_error_func_t error_func;     /* call error callback */
-       rxrpc_call_aemap_func_t aemap_func;     /* abort -> errno mapping callback */
-
-       const struct rxrpc_operation    *ops_begin;     /* beginning of operations table */
-       const struct rxrpc_operation    *ops_end;       /* end of operations table */
-};
-
-/*****************************************************************************/
-/*
- * Rx transport endpoint record
- */
-struct rxrpc_transport
-{
-       atomic_t                usage;
-       struct socket           *socket;        /* my UDP socket */
-       struct list_head        services;       /* services listening on this socket */
-       struct list_head        link;           /* link in transport list */
-       struct list_head        proc_link;      /* link in transport proc list */
-       struct list_head        krxiodq_link;   /* krxiod attention queue link */
-       spinlock_t              lock;           /* access lock */
-       struct list_head        peer_active;    /* active peers connected to over this socket */
-       struct list_head        peer_graveyard; /* inactive peer list */
-       spinlock_t              peer_gylock;    /* peer graveyard lock */
-       wait_queue_head_t       peer_gy_waitq;  /* wait queue hit when peer graveyard is empty */
-       rwlock_t                peer_lock;      /* peer list access lock */
-       atomic_t                peer_count;     /* number of peers */
-       struct rxrpc_peer_ops   *peer_ops;      /* default peer operations */
-       unsigned short          port;           /* port upon which listening */
-       volatile char           error_rcvd;     /* T if received ICMP error outstanding */
-};
-
-extern int rxrpc_create_transport(unsigned short port,
-                                 struct rxrpc_transport **_trans);
-
-static inline void rxrpc_get_transport(struct rxrpc_transport *trans)
-{
-       BUG_ON(atomic_read(&trans->usage) <= 0);
-       atomic_inc(&trans->usage);
-       //printk("rxrpc_get_transport(%p{u=%d})\n",
-       //       trans, atomic_read(&trans->usage));
-}
-
-extern void rxrpc_put_transport(struct rxrpc_transport *trans);
-
-extern int rxrpc_add_service(struct rxrpc_transport *trans,
-                            struct rxrpc_service *srv);
-
-extern void rxrpc_del_service(struct rxrpc_transport *trans,
-                             struct rxrpc_service *srv);
-
-extern void rxrpc_trans_receive_packet(struct rxrpc_transport *trans);
-
-extern int rxrpc_trans_immediate_abort(struct rxrpc_transport *trans,
-                                      struct rxrpc_message *msg,
-                                      int error);
-
-#endif /* _LINUX_RXRPC_TRANSPORT_H */
index 76c9a11..4e9d208 100644 (file)
@@ -151,7 +151,7 @@ struct audit_buffer {
 
 static void audit_set_pid(struct audit_buffer *ab, pid_t pid)
 {
-       struct nlmsghdr *nlh = (struct nlmsghdr *)ab->skb->data;
+       struct nlmsghdr *nlh = nlmsg_hdr(ab->skb);
        nlh->nlmsg_pid = pid;
 }
 
@@ -750,7 +750,7 @@ static void audit_receive_skb(struct sk_buff *skb)
        u32             rlen;
 
        while (skb->len >= NLMSG_SPACE(0)) {
-               nlh = (struct nlmsghdr *)skb->data;
+               nlh = nlmsg_hdr(skb);
                if (nlh->nlmsg_len < sizeof(*nlh) || skb->len < nlh->nlmsg_len)
                        return;
                rlen = NLMSG_ALIGN(nlh->nlmsg_len);
@@ -795,7 +795,7 @@ static int __init audit_init(void)
        printk(KERN_INFO "audit: initializing netlink socket (%s)\n",
               audit_default ? "enabled" : "disabled");
        audit_sock = netlink_kernel_create(NETLINK_AUDIT, 0, audit_receive,
-                                          THIS_MODULE);
+                                          NULL, THIS_MODULE);
        if (!audit_sock)
                audit_panic("cannot initialize netlink socket");
        else
@@ -1073,7 +1073,7 @@ static void audit_log_vformat(struct audit_buffer *ab, const char *fmt,
                        goto out;
        }
        va_copy(args2, args);
-       len = vsnprintf(skb->tail, avail, fmt, args);
+       len = vsnprintf(skb_tail_pointer(skb), avail, fmt, args);
        if (len >= avail) {
                /* The printk buffer is 1024 bytes long, so if we get
                 * here and AUDIT_BUFSIZ is at least 1024, then we can
@@ -1082,7 +1082,7 @@ static void audit_log_vformat(struct audit_buffer *ab, const char *fmt,
                        max_t(unsigned, AUDIT_BUFSIZ, 1+len-avail));
                if (!avail)
                        goto out;
-               len = vsnprintf(skb->tail, avail, fmt, args2);
+               len = vsnprintf(skb_tail_pointer(skb), avail, fmt, args2);
        }
        if (len > 0)
                skb_put(skb, len);
@@ -1143,7 +1143,7 @@ void audit_log_hex(struct audit_buffer *ab, const unsigned char *buf,
                        return;
        }
 
-       ptr = skb->tail;
+       ptr = skb_tail_pointer(skb);
        for (i=0; i<len; i++) {
                *ptr++ = hex[(buf[i] & 0xF0)>>4]; /* Upper nibble */
                *ptr++ = hex[buf[i] & 0x0F];      /* Lower nibble */
@@ -1175,7 +1175,7 @@ static void audit_log_n_string(struct audit_buffer *ab, size_t slen,
                if (!avail)
                        return;
        }
-       ptr = skb->tail;
+       ptr = skb_tail_pointer(skb);
        *ptr++ = '"';
        memcpy(ptr, string, slen);
        ptr += slen;
@@ -1268,7 +1268,7 @@ void audit_log_end(struct audit_buffer *ab)
                audit_log_lost("rate limit exceeded");
        } else {
                if (audit_pid) {
-                       struct nlmsghdr *nlh = (struct nlmsghdr *)ab->skb->data;
+                       struct nlmsghdr *nlh = nlmsg_hdr(ab->skb);
                        nlh->nlmsg_len = ab->skb->len - NLMSG_SPACE(0);
                        skb_queue_tail(&audit_skb_queue, ab->skb);
                        ab->skb = NULL;
index b74860a..f5cfde8 100644 (file)
@@ -59,6 +59,7 @@ ktime_t ktime_get(void)
 
        return timespec_to_ktime(now);
 }
+EXPORT_SYMBOL_GPL(ktime_get);
 
 /**
  * ktime_get_real - get the real (wall-) time in ktime_t format
index 1b255df..c904748 100644 (file)
@@ -1676,7 +1676,7 @@ static int proc_dointvec_taint(ctl_table *table, int write, struct file *filp,
 {
        int op;
 
-       if (!capable(CAP_SYS_ADMIN))
+       if (write && !capable(CAP_SYS_ADMIN))
                return -EPERM;
 
        op = OP_OR;
index 4c3476f..ad7d239 100644 (file)
@@ -102,7 +102,7 @@ static int prepare_reply(struct genl_info *info, u8 cmd, struct sk_buff **skbp,
  */
 static int send_reply(struct sk_buff *skb, pid_t pid)
 {
-       struct genlmsghdr *genlhdr = nlmsg_data((struct nlmsghdr *)skb->data);
+       struct genlmsghdr *genlhdr = nlmsg_data(nlmsg_hdr(skb));
        void *reply = genlmsg_data(genlhdr);
        int rc;
 
@@ -121,7 +121,7 @@ static int send_reply(struct sk_buff *skb, pid_t pid)
 static void send_cpu_listeners(struct sk_buff *skb,
                                        struct listener_list *listeners)
 {
-       struct genlmsghdr *genlhdr = nlmsg_data((struct nlmsghdr *)skb->data);
+       struct genlmsghdr *genlhdr = nlmsg_data(nlmsg_hdr(skb));
        struct listener *s, *tmp;
        struct sk_buff *skb_next, *skb_cur = skb;
        void *reply = genlmsg_data(genlhdr);
index 2f47888..ba18ec4 100644 (file)
@@ -452,6 +452,7 @@ struct timespec ns_to_timespec(const s64 nsec)
 
        return ts;
 }
+EXPORT_SYMBOL(ns_to_timespec);
 
 /**
  * ns_to_timeval - Convert nanoseconds to timeval
@@ -469,6 +470,7 @@ struct timeval ns_to_timeval(const s64 nsec)
 
        return tv;
 }
+EXPORT_SYMBOL(ns_to_timeval);
 
 /*
  * Convert jiffies to milliseconds and back.
index dd6c2c1..b22bd39 100644 (file)
@@ -505,6 +505,8 @@ out:
        return ret;
 }
 
+EXPORT_SYMBOL(try_to_del_timer_sync);
+
 /**
  * del_timer_sync - deactivate a timer and wait for the handler to finish.
  * @timer: the timer to be deactivated
index 3f3e740..79afd00 100644 (file)
@@ -261,7 +261,7 @@ config LOCKDEP
        bool
        depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
        select STACKTRACE
-       select FRAME_POINTER if !X86
+       select FRAME_POINTER if !X86 && !MIPS
        select KALLSYMS
        select KALLSYMS_ALL
 
index 992a39e..ae57f35 100644 (file)
@@ -4,7 +4,7 @@
 
 lib-y := ctype.o string.o vsprintf.o cmdline.o \
         rbtree.o radix-tree.o dump_stack.o \
-        idr.o div64.o int_sqrt.o bitmap.o extable.o prio_tree.o \
+        idr.o int_sqrt.o bitmap.o extable.o prio_tree.o \
         sha1.o irq_regs.o reciprocal_div.o
 
 lib-$(CONFIG_MMU) += ioremap.o
@@ -12,7 +12,8 @@ lib-$(CONFIG_SMP) += cpumask.o
 
 lib-y  += kobject.o kref.o kobject_uevent.o klist.o
 
-obj-y += sort.o parser.o halfmd4.o debug_locks.o random32.o bust_spinlocks.o
+obj-y += div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
+        bust_spinlocks.o
 
 ifeq ($(CONFIG_DEBUG_KOBJECT),y)
 CFLAGS_kobject.o += -DDEBUG
index 365719f..b71cf93 100644 (file)
@@ -23,7 +23,7 @@
 /* Not needed on 64bit architectures */
 #if BITS_PER_LONG == 32
 
-uint32_t __div64_32(uint64_t *n, uint32_t base)
+uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
 {
        uint64_t rem = *n;
        uint64_t b = base;
@@ -58,4 +58,24 @@ uint32_t __div64_32(uint64_t *n, uint32_t base)
 
 EXPORT_SYMBOL(__div64_32);
 
+/* 64bit divisor, dividend and result. dynamic precision */
+uint64_t div64_64(uint64_t dividend, uint64_t divisor)
+{
+       uint32_t high, d;
+
+       high = divisor >> 32;
+       if (high) {
+               unsigned int shift = fls(high);
+
+               d = divisor >> shift;
+               dividend >>= shift;
+       } else
+               d = divisor;
+
+       do_div(dividend, d);
+
+       return dividend;
+}
+EXPORT_SYMBOL(div64_64);
+
 #endif /* BITS_PER_LONG == 32 */
index 84272ed..82fc179 100644 (file)
@@ -293,7 +293,7 @@ EXPORT_SYMBOL_GPL(add_uevent_var);
 static int __init kobject_uevent_init(void)
 {
        uevent_sock = netlink_kernel_create(NETLINK_KOBJECT_UEVENT, 1, NULL,
-                                           THIS_MODULE);
+                                           NULL, THIS_MODULE);
 
        if (!uevent_sock) {
                printk(KERN_ERR
index bab440f..5efafed 100644 (file)
@@ -60,6 +60,34 @@ int strnicmp(const char *s1, const char *s2, size_t len)
 EXPORT_SYMBOL(strnicmp);
 #endif
 
+#ifndef __HAVE_ARCH_STRCASECMP
+int strcasecmp(const char *s1, const char *s2)
+{
+       int c1, c2;
+
+       do {
+               c1 = tolower(*s1++);
+               c2 = tolower(*s2++);
+       } while (c1 == c2 && c1 != 0);
+       return c1 - c2;
+}
+EXPORT_SYMBOL(strcasecmp);
+#endif
+
+#ifndef __HAVE_ARCH_STRNCASECMP
+int strncasecmp(const char *s1, const char *s2, size_t n)
+{
+       int c1, c2;
+
+       do {
+               c1 = tolower(*s1++);
+               c2 = tolower(*s2++);
+       } while ((--n > 0) && c1 == c2 && c1 != 0);
+       return c1 - c2;
+}
+EXPORT_SYMBOL(strncasecmp);
+#endif
+
 #ifndef __HAVE_ARCH_STRCPY
 /**
  * strcpy - Copy a %NUL terminated string
index 7a66ca2..a91ca00 100644 (file)
@@ -297,7 +297,7 @@ static int migrate_page_move_mapping(struct address_space *mapping,
        void **pslot;
 
        if (!mapping) {
-               /* Anonymous page */
+               /* Anonymous page without mapping */
                if (page_count(page) != 1)
                        return -EAGAIN;
                return 0;
@@ -333,6 +333,19 @@ static int migrate_page_move_mapping(struct address_space *mapping,
         */
        __put_page(page);
 
+       /*
+        * If moved to a different zone then also account
+        * the page for that zone. Other VM counters will be
+        * taken care of when we establish references to the
+        * new page and drop references to the old page.
+        *
+        * Note that anonymous pages are accounted for
+        * via NR_FILE_PAGES and NR_ANON_PAGES if they
+        * are mapped to swap space.
+        */
+       __dec_zone_page_state(page, NR_FILE_PAGES);
+       __inc_zone_page_state(newpage, NR_FILE_PAGES);
+
        write_unlock_irq(&mapping->tree_lock);
 
        return 0;
index 2f39169..3791edf 100644 (file)
@@ -176,6 +176,8 @@ static inline int constrained_alloc(struct zonelist *zonelist, gfp_t gfp_mask)
        struct zone **z;
        nodemask_t nodes;
        int node;
+
+       nodes_clear(nodes);
        /* node has memory ? */
        for_each_online_node(node)
                if (NODE_DATA(node)->node_present_pages)
@@ -333,7 +335,7 @@ static int oom_kill_task(struct task_struct *p)
         */
        do_each_thread(g, q) {
                if (q->mm == mm && q->tgid != p->tgid)
-                       force_sig(SIGKILL, p);
+                       force_sig(SIGKILL, q);
        } while_each_thread(g, q);
 
        return 0;
index f469e3c..a794945 100644 (file)
@@ -67,12 +67,12 @@ static inline long sync_writeback_pages(void)
 /*
  * Start background writeback (via pdflush) at this percentage
  */
-int dirty_background_ratio = 10;
+int dirty_background_ratio = 5;
 
 /*
  * The generator of dirty data starts writeback at this percentage
  */
-int vm_dirty_ratio = 40;
+int vm_dirty_ratio = 10;
 
 /*
  * The interval between `kupdate'-style writebacks, in jiffies
index b82146e..59da5b7 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -498,8 +498,10 @@ int page_mkclean(struct page *page)
                struct address_space *mapping = page_mapping(page);
                if (mapping)
                        ret = page_mkclean_file(mapping, page);
-               if (page_test_and_clear_dirty(page))
+               if (page_test_dirty(page)) {
+                       page_clear_dirty(page);
                        ret = 1;
+               }
        }
 
        return ret;
@@ -605,8 +607,10 @@ void page_remove_rmap(struct page *page, struct vm_area_struct *vma)
                 * Leaving it set also helps swapoff to reinstate ptes
                 * faster for those pages still in swapcache.
                 */
-               if (page_test_and_clear_dirty(page))
+               if (page_test_dirty(page)) {
+                       page_clear_dirty(page);
                        set_page_dirty(page);
+               }
                __dec_zone_page_state(page,
                                PageAnon(page) ? NR_ANON_PAGES : NR_FILE_MAPPED);
        }
index ace6386..91dde41 100644 (file)
@@ -100,7 +100,7 @@ static int fddi_rebuild_header(struct sk_buff       *skb)
        struct fddihdr *fddi = (struct fddihdr *)skb->data;
 
 #ifdef CONFIG_INET
-       if (fddi->hdr.llc_snap.ethertype == __constant_htons(ETH_P_IP))
+       if (fddi->hdr.llc_snap.ethertype == htons(ETH_P_IP))
                /* Try to get ARP to resolve the header and fill destination address */
                return arp_find(fddi->daddr, skb);
        else
@@ -130,12 +130,13 @@ __be16 fddi_type_trans(struct sk_buff *skb, struct net_device *dev)
         * to start of packet data.  Assume 802.2 SNAP frames for now.
         */
 
-       skb->mac.raw = skb->data;       /* point to frame control (FC) */
+       skb->dev = dev;
+       skb_reset_mac_header(skb);      /* point to frame control (FC) */
 
        if(fddi->hdr.llc_8022_1.dsap==0xe0)
        {
                skb_pull(skb, FDDI_K_8022_HLEN-3);
-               type = __constant_htons(ETH_P_802_2);
+               type = htons(ETH_P_802_2);
        }
        else
        {
index 578f2a3..87ffc12 100644 (file)
@@ -60,7 +60,7 @@ static int hippi_header(struct sk_buff *skb, struct net_device *dev,
         * Due to the stupidity of the little endian byte-order we
         * have to set the fp field this way.
         */
-       hip->fp.fixed           = __constant_htonl(0x04800018);
+       hip->fp.fixed           = htonl(0x04800018);
        hip->fp.d2_size         = htonl(len + 8);
        hip->le.fc              = 0;
        hip->le.double_wide     = 0;    /* only HIPPI 800 for the time being */
@@ -104,7 +104,7 @@ static int hippi_rebuild_header(struct sk_buff *skb)
         * Only IP is currently supported
         */
 
-       if(hip->snap.ethertype != __constant_htons(ETH_P_IP))
+       if(hip->snap.ethertype != htons(ETH_P_IP))
        {
                printk(KERN_DEBUG "%s: unable to resolve type %X addresses.\n",skb->dev->name,ntohs(hip->snap.ethertype));
                return 0;
@@ -126,14 +126,14 @@ __be16 hippi_type_trans(struct sk_buff *skb, struct net_device *dev)
 {
        struct hippi_hdr *hip;
 
-       hip = (struct hippi_hdr *) skb->data;
-
        /*
         * This is actually wrong ... question is if we really should
         * set the raw address here.
         */
-        skb->mac.raw = skb->data;
-        skb_pull(skb, HIPPI_HLEN);
+       skb->dev = dev;
+       skb_reset_mac_header(skb);
+       hip = (struct hippi_hdr *)skb_mac_header(skb);
+       skb_pull(skb, HIPPI_HLEN);
 
        /*
         * No fancy promisc stuff here now.
index 6e7c212..04ee43e 100644 (file)
@@ -56,10 +56,10 @@ static int snap_rcv(struct sk_buff *skb, struct net_device *dev,
        };
 
        rcu_read_lock();
-       proto = find_snap_client(skb->h.raw);
+       proto = find_snap_client(skb_transport_header(skb));
        if (proto) {
                /* Pass the frame on. */
-               skb->h.raw  += 5;
+               skb->transport_header += 5;
                skb_pull_rcsum(skb, 5);
                rc = proto->rcvfunc(skb, dev, &snap_packet_type, orig_dev);
        } else {
index 96bd144..0ba1946 100644 (file)
@@ -189,11 +189,13 @@ static int tr_rebuild_header(struct sk_buff *skb)
 __be16 tr_type_trans(struct sk_buff *skb, struct net_device *dev)
 {
 
-       struct trh_hdr *trh=(struct trh_hdr *)skb->data;
+       struct trh_hdr *trh;
        struct trllc *trllc;
        unsigned riflen=0;
 
-       skb->mac.raw = skb->data;
+       skb->dev = dev;
+       skb_reset_mac_header(skb);
+       trh = tr_hdr(skb);
 
        if(trh->saddr[0] & TR_RII)
                riflen = (ntohs(trh->rcf) & TR_RCF_LEN_MASK) >> 8;
@@ -552,7 +554,8 @@ static int rif_seq_show(struct seq_file *seq, void *v)
                                        if(j==1) {
                                                segment=ntohs(entry->rseg[j-1])>>4;
                                                seq_printf(seq,"  %03X",segment);
-                                       };
+                                       }
+
                                        segment=ntohs(entry->rseg[j])>>4;
                                        brdgnmb=ntohs(entry->rseg[j-1])&0x00f;
                                        seq_printf(seq,"-%01X-%03X",brdgnmb,segment);
index eb1c71e..c0c7bb8 100644 (file)
@@ -470,7 +470,7 @@ static struct net_device *register_vlan_device(const char *eth_IF_name,
                 */
        default:
                snprintf(name, IFNAMSIZ, "vlan%.4i", VLAN_ID);
-       };
+       }
 
        new_dev = alloc_netdev(sizeof(struct vlan_dev_info), name,
                               vlan_setup);
@@ -685,7 +685,7 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
                                break;
                }
                break;
-       };
+       }
 
 out:
        return NOTIFY_DONE;
@@ -819,7 +819,7 @@ static int vlan_ioctl_handler(void __user *arg)
                printk(VLAN_DBG "%s: Unknown VLAN CMD: %x \n",
                        __FUNCTION__, args.cmd);
                return -EINVAL;
-       };
+       }
 out:
        return err;
 }
index b6e0eea..ec46084 100644 (file)
@@ -66,7 +66,7 @@ int vlan_dev_rebuild_header(struct sk_buff *skb)
 
                memcpy(veth->h_source, dev->dev_addr, ETH_ALEN);
                break;
-       };
+       }
 
        return 0;
 }
@@ -83,7 +83,7 @@ static inline struct sk_buff *vlan_check_reorder_header(struct sk_buff *skb)
                        /* Lifted from Gleb's VLAN code... */
                        memmove(skb->data - ETH_HLEN,
                                skb->data - VLAN_ETH_HLEN, 12);
-                       skb->mac.raw += VLAN_HLEN;
+                       skb->mac_header += VLAN_HLEN;
                }
        }
 
@@ -219,7 +219,7 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
                break;
        default:
                break;
-       };
+       }
 
        /*  Was a VLAN packet, grab the encapsulated protocol, which the layer
         * three protocols care about.
@@ -258,7 +258,7 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
         * won't work for fault tolerant netware but does for the rest.
         */
        if (*(unsigned short *)rawp == 0xFFFF) {
-               skb->protocol = __constant_htons(ETH_P_802_3);
+               skb->protocol = htons(ETH_P_802_3);
                /* place it back on the queue to be handled by true layer 3 protocols.
                 */
 
@@ -281,7 +281,7 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
        /*
         *      Real 802.2 LLC
         */
-       skb->protocol = __constant_htons(ETH_P_802_2);
+       skb->protocol = htons(ETH_P_802_2);
        /* place it back on the queue to be handled by upper layer protocols.
         */
 
@@ -382,7 +382,7 @@ int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
                }
 
                skb->protocol = htons(ETH_P_8021Q);
-               skb->nh.raw = skb->data;
+               skb_reset_network_header(skb);
        }
 
        /* Before delegating work to the lower layer, enter our MAC-address */
@@ -448,7 +448,7 @@ int vlan_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
         * OTHER THINGS LIKE FDDI/TokenRing/802.3 SNAPs...
         */
 
-       if (veth->h_vlan_proto != __constant_htons(ETH_P_8021Q)) {
+       if (veth->h_vlan_proto != htons(ETH_P_8021Q)) {
                int orig_headroom = skb_headroom(skb);
                unsigned short veth_TCI;
 
index 9156578..2fc8e77 100644 (file)
@@ -27,13 +27,6 @@ if NET
 
 menu "Networking options"
 
-config NETDEBUG
-       bool "Network packet debugging"
-       help
-         You can say Y here if you want to get additional messages useful in
-         debugging bad packets, but can overwhelm logs under denial of service
-         attacks.
-
 source "net/packet/Kconfig"
 source "net/unix/Kconfig"
 source "net/xfrm/Kconfig"
@@ -219,14 +212,18 @@ endmenu
 source "net/ax25/Kconfig"
 source "net/irda/Kconfig"
 source "net/bluetooth/Kconfig"
-source "net/ieee80211/Kconfig"
-
-config WIRELESS_EXT
-       bool
+source "net/rxrpc/Kconfig"
 
 config FIB_RULES
        bool
 
+menu "Wireless"
+
+source "net/wireless/Kconfig"
+source "net/ieee80211/Kconfig"
+
+endmenu
+
 endif   # if NET
 endmenu # Networking
 
index 4854ac5..6b74d41 100644 (file)
@@ -38,6 +38,7 @@ obj-$(CONFIG_IRDA)            += irda/
 obj-$(CONFIG_BT)               += bluetooth/
 obj-$(CONFIG_SUNRPC)           += sunrpc/
 obj-$(CONFIG_RXRPC)            += rxrpc/
+obj-$(CONFIG_AF_RXRPC)         += rxrpc/
 obj-$(CONFIG_ATM)              += atm/
 obj-$(CONFIG_DECNET)           += decnet/
 obj-$(CONFIG_ECONET)           += econet/
@@ -52,3 +53,5 @@ obj-$(CONFIG_IUCV)            += iucv/
 ifeq ($(CONFIG_NET),y)
 obj-$(CONFIG_SYSCTL)           += sysctl_net.o
 endif
+
+obj-y                          += wireless/
index d89d62f..5ef6a23 100644 (file)
@@ -118,7 +118,9 @@ static void __aarp_send_query(struct aarp_entry *a)
 
        /* Set up the buffer */
        skb_reserve(skb, dev->hard_header_len + aarp_dl->header_length);
-       skb->nh.raw      = skb->h.raw = skb_put(skb, sizeof(*eah));
+       skb_reset_network_header(skb);
+       skb_reset_transport_header(skb);
+       skb_put(skb, sizeof(*eah));
        skb->protocol    = htons(ETH_P_ATALK);
        skb->dev         = dev;
        eah              = aarp_hdr(skb);
@@ -163,7 +165,9 @@ static void aarp_send_reply(struct net_device *dev, struct atalk_addr *us,
 
        /* Set up the buffer */
        skb_reserve(skb, dev->hard_header_len + aarp_dl->header_length);
-       skb->nh.raw      = skb->h.raw = skb_put(skb, sizeof(*eah));
+       skb_reset_network_header(skb);
+       skb_reset_transport_header(skb);
+       skb_put(skb, sizeof(*eah));
        skb->protocol    = htons(ETH_P_ATALK);
        skb->dev         = dev;
        eah              = aarp_hdr(skb);
@@ -212,7 +216,9 @@ static void aarp_send_probe(struct net_device *dev, struct atalk_addr *us)
 
        /* Set up the buffer */
        skb_reserve(skb, dev->hard_header_len + aarp_dl->header_length);
-       skb->nh.raw      = skb->h.raw = skb_put(skb, sizeof(*eah));
+       skb_reset_network_header(skb);
+       skb_reset_transport_header(skb);
+       skb_put(skb, sizeof(*eah));
        skb->protocol    = htons(ETH_P_ATALK);
        skb->dev         = dev;
        eah              = aarp_hdr(skb);
@@ -539,7 +545,7 @@ int aarp_send_ddp(struct net_device *dev, struct sk_buff *skb,
        int hash;
        struct aarp_entry *a;
 
-       skb->nh.raw = skb->data;
+       skb_reset_network_header(skb);
 
        /* Check for LocalTalk first */
        if (dev->type == ARPHRD_LOCALTLK) {
index c8b7dc2..16eda21 100644 (file)
@@ -937,11 +937,11 @@ static unsigned long atalk_sum_partial(const unsigned char *data,
 static unsigned long atalk_sum_skb(const struct sk_buff *skb, int offset,
                                   int len, unsigned long sum)
 {
-       int start = skb_headlen(skb);
+       int end = skb_headlen(skb);
        int i, copy;
 
        /* checksum stuff in header space */
-       if ( (copy = start - offset) > 0) {
+       if ((copy = end - offset) > 0) {
                if (copy > len)
                        copy = len;
                sum = atalk_sum_partial(skb->data + offset, copy, sum);
@@ -953,11 +953,9 @@ static unsigned long atalk_sum_skb(const struct sk_buff *skb, int offset,
 
        /* checksum stuff in frags */
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-               int end;
+               BUG_TRAP(len >= 0);
 
-               BUG_TRAP(start <= offset + len);
-
-               end = start + skb_shinfo(skb)->frags[i].size;
+               end = offset + skb_shinfo(skb)->frags[i].size;
                if ((copy = end - offset) > 0) {
                        u8 *vaddr;
                        skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
@@ -965,36 +963,31 @@ static unsigned long atalk_sum_skb(const struct sk_buff *skb, int offset,
                        if (copy > len)
                                copy = len;
                        vaddr = kmap_skb_frag(frag);
-                       sum = atalk_sum_partial(vaddr + frag->page_offset +
-                                                 offset - start, copy, sum);
+                       sum = atalk_sum_partial(vaddr + frag->page_offset,
+                                               copy, sum);
                        kunmap_skb_frag(vaddr);
 
                        if (!(len -= copy))
                                return sum;
                        offset += copy;
                }
-               start = end;
        }
 
        if (skb_shinfo(skb)->frag_list) {
                struct sk_buff *list = skb_shinfo(skb)->frag_list;
 
                for (; list; list = list->next) {
-                       int end;
-
-                       BUG_TRAP(start <= offset + len);
+                       BUG_TRAP(len >= 0);
 
-                       end = start + list->len;
+                       end = offset + list->len;
                        if ((copy = end - offset) > 0) {
                                if (copy > len)
                                        copy = len;
-                               sum = atalk_sum_skb(list, offset - start,
-                                                   copy, sum);
+                               sum = atalk_sum_skb(list, 0, copy, sum);
                                if ((len -= copy) == 0)
                                        return sum;
                                offset += copy;
                        }
-                       start = end;
                }
        }
 
@@ -1275,7 +1268,7 @@ static int handle_ip_over_ddp(struct sk_buff *skb)
        skb->protocol = htons(ETH_P_IP);
        skb_pull(skb, 13);
        skb->dev   = dev;
-       skb->h.raw = skb->data;
+       skb_reset_transport_header(skb);
 
        stats = dev->priv;
        stats->rx_packets++;
@@ -1383,10 +1376,10 @@ free_it:
  *     @pt - packet type
  *
  *     Receive a packet (in skb) from device dev. This has come from the SNAP
- *     decoder, and on entry skb->h.raw is the DDP header, skb->len is the DDP
- *     header, skb->len is the DDP length. The physical headers have been
- *     extracted. PPP should probably pass frames marked as for this layer.
- *     [ie ARPHRD_ETHERTALK]
+ *     decoder, and on entry skb->transport_header is the DDP header, skb->len
+ *     is the DDP header, skb->len is the DDP length. The physical headers
+ *     have been extracted. PPP should probably pass frames marked as for this
+ *     layer.  [ie ARPHRD_ETHERTALK]
  */
 static int atalk_rcv(struct sk_buff *skb, struct net_device *dev,
                     struct packet_type *pt, struct net_device *orig_dev)
@@ -1484,7 +1477,7 @@ static int ltalk_rcv(struct sk_buff *skb, struct net_device *dev,
                     struct packet_type *pt, struct net_device *orig_dev)
 {
        /* Expand any short form frames */
-       if (skb->mac.raw[2] == 1) {
+       if (skb_mac_header(skb)[2] == 1) {
                struct ddpehdr *ddp;
                /* Find our address */
                struct atalk_addr *ap = atalk_find_dev_addr(dev);
@@ -1510,8 +1503,8 @@ static int ltalk_rcv(struct sk_buff *skb, struct net_device *dev,
                 * we write the network numbers !
                 */
 
-               ddp->deh_dnode = skb->mac.raw[0];     /* From physical header */
-               ddp->deh_snode = skb->mac.raw[1];     /* From physical header */
+               ddp->deh_dnode = skb_mac_header(skb)[0];     /* From physical header */
+               ddp->deh_snode = skb_mac_header(skb)[1];     /* From physical header */
 
                ddp->deh_dnet  = ap->s_net;     /* Network number */
                ddp->deh_snet  = ap->s_net;
@@ -1522,7 +1515,7 @@ static int ltalk_rcv(struct sk_buff *skb, struct net_device *dev,
                /* Non routable, so force a drop if we slip up later */
                ddp->deh_len_hops = htons(skb->len + (DDP_MAXHOPS << 10));
        }
-       skb->h.raw = skb->data;
+       skb_reset_transport_header(skb);
 
        return atalk_rcv(skb, dev, pt, orig_dev);
 freeit:
@@ -1771,6 +1764,9 @@ static int atalk_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
                case SIOCGSTAMP:
                        rc = sock_get_timestamp(sk, argp);
                        break;
+               case SIOCGSTAMPNS:
+                       rc = sock_get_timestampns(sk, argp);
+                       break;
                /* Routing */
                case SIOCADDRT:
                case SIOCDELRT:
index ec4ebd3..0e9f00c 100644 (file)
@@ -173,7 +173,7 @@ static int br2684_xmit_vcc(struct sk_buff *skb, struct br2684_dev *brdev,
        }
        skb_push(skb, minheadroom);
        if (brvcc->encaps == e_llc)
-               memcpy(skb->data, llc_oui_pid_pad, 10);
+               skb_copy_to_linear_data(skb, llc_oui_pid_pad, 10);
        else
                memset(skb->data, 0, 2);
 #endif /* FASTER_VERSION */
@@ -375,11 +375,11 @@ packet_fails_filter(__be16 type, struct br2684_vcc *brvcc, struct sk_buff *skb)
 {
        if (brvcc->filter.netmask == 0)
                return 0;                       /* no filter in place */
-       if (type == __constant_htons(ETH_P_IP) &&
+       if (type == htons(ETH_P_IP) &&
            (((struct iphdr *) (skb->data))->daddr & brvcc->filter.
             netmask) == brvcc->filter.prefix)
                return 0;
-       if (type == __constant_htons(ETH_P_ARP))
+       if (type == htons(ETH_P_ARP))
                return 0;
        /* TODO: we should probably filter ARPs too.. don't want to have
         *   them returning values that don't make sense, or is that ok?
@@ -458,7 +458,7 @@ static void br2684_push(struct atm_vcc *atmvcc, struct sk_buff *skb)
        /* FIXME: tcpdump shows that pointer to mac header is 2 bytes earlier,
           than should be. What else should I set? */
        skb_pull(skb, plen);
-       skb->mac.raw = ((char *) (skb->data)) - ETH_HLEN;
+       skb_set_mac_header(skb, -ETH_HLEN);
        skb->pkt_type = PACKET_HOST;
 #ifdef CONFIG_BR2684_FAST_TRANS
        skb->protocol = ((u16 *) skb->data)[-1];
index 8c38258..876b77f 100644 (file)
@@ -213,7 +213,7 @@ static void clip_push(struct atm_vcc *vcc, struct sk_buff *skb)
                return;
        }
        ATM_SKB(skb)->vcc = vcc;
-       skb->mac.raw = skb->data;
+       skb_reset_mac_header(skb);
        if (!clip_vcc->encap
            || skb->len < RFC1483LLC_LEN
            || memcmp(skb->data, llc_oui, sizeof (llc_oui)))
@@ -702,7 +702,7 @@ static struct atm_dev atmarpd_dev = {
        .ops =                  &atmarpd_dev_ops,
        .type =                 "arpd",
        .number =               999,
-       .lock =                 SPIN_LOCK_UNLOCKED
+       .lock =                 __SPIN_LOCK_UNLOCKED(atmarpd_dev.lock)
 };
 
 
index 8ccee45..7afd8e7 100644 (file)
@@ -82,6 +82,9 @@ int vcc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
                case SIOCGSTAMP: /* borrowed from IP */
                        error = sock_get_timestamp(sk, argp);
                        goto done;
+               case SIOCGSTAMPNS: /* borrowed from IP */
+                       error = sock_get_timestampns(sk, argp);
+                       goto done;
                case ATM_SETSC:
                        printk(KERN_WARNING "ATM_SETSC is obsolete\n");
                        error = 0;
index 3d804d6..4dc5f2b 100644 (file)
@@ -283,8 +283,8 @@ static int lec_start_xmit(struct sk_buff *skb, struct net_device *dev)
        }
 
        DPRINTK("skbuff head:%lx data:%lx tail:%lx end:%lx\n",
-               (long)skb->head, (long)skb->data, (long)skb->tail,
-               (long)skb->end);
+               (long)skb->head, (long)skb->data, (long)skb_tail_pointer(skb),
+               (long)skb_end_pointer(skb));
 #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
        if (memcmp(skb->data, bridge_ula_lec, sizeof(bridge_ula_lec)) == 0)
                lec_handle_bridge(skb, dev);
@@ -576,8 +576,8 @@ static int lec_atm_send(struct atm_vcc *vcc, struct sk_buff *skb)
                                        break;
                                }
                                skb2->len = sizeof(struct atmlec_msg);
-                               memcpy(skb2->data, mesg,
-                                      sizeof(struct atmlec_msg));
+                               skb_copy_to_linear_data(skb2, mesg,
+                                                       sizeof(*mesg));
                                atm_force_charge(priv->lecd, skb2->truesize);
                                sk = sk_atm(priv->lecd);
                                skb_queue_tail(&sk->sk_receive_queue, skb2);
@@ -630,7 +630,7 @@ static struct atm_dev lecatm_dev = {
        .ops = &lecdev_ops,
        .type = "lec",
        .number = 999,          /* dummy device number */
-       .lock = SPIN_LOCK_UNLOCKED
+       .lock = __SPIN_LOCK_UNLOCKED(lecatm_dev.lock)
 };
 
 /*
@@ -825,7 +825,6 @@ static void lec_push(struct atm_vcc *vcc, struct sk_buff *skb)
                if (!hlist_empty(&priv->lec_arp_empty_ones)) {
                        lec_arp_check_empties(priv, vcc, skb);
                }
-               skb->dev = dev;
                skb_pull(skb, 2);       /* skip lec_id */
 #ifdef CONFIG_TR
                if (priv->is_trdev)
@@ -1338,7 +1337,7 @@ static int lane2_resolve(struct net_device *dev, u8 *dst_mac, int force,
                if (skb == NULL)
                        return -1;
                skb->len = *sizeoftlvs;
-               memcpy(skb->data, *tlvs, *sizeoftlvs);
+               skb_copy_to_linear_data(skb, *tlvs, *sizeoftlvs);
                retval = send_to_lecd(priv, l_arp_xmt, dst_mac, NULL, skb);
        }
        return retval;
@@ -1372,7 +1371,7 @@ static int lane2_associate_req(struct net_device *dev, u8 *lan_dst,
        if (skb == NULL)
                return 0;
        skb->len = sizeoftlvs;
-       memcpy(skb->data, tlvs, sizeoftlvs);
+       skb_copy_to_linear_data(skb, tlvs, sizeoftlvs);
        retval = send_to_lecd(priv, l_associate_req, NULL, NULL, skb);
        if (retval != 0)
                printk("lec.c: lane2_associate_req() failed\n");
index cb3c004..7c85aa5 100644 (file)
@@ -504,11 +504,13 @@ static int send_via_shortcut(struct sk_buff *skb, struct mpoa_client *mpc)
                tagged_llc_snap_hdr.tag = entry->ctrl_info.tag;
                skb_pull(skb, ETH_HLEN);                       /* get rid of Eth header */
                skb_push(skb, sizeof(tagged_llc_snap_hdr));    /* add LLC/SNAP header   */
-               memcpy(skb->data, &tagged_llc_snap_hdr, sizeof(tagged_llc_snap_hdr));
+               skb_copy_to_linear_data(skb, &tagged_llc_snap_hdr,
+                                       sizeof(tagged_llc_snap_hdr));
        } else {
                skb_pull(skb, ETH_HLEN);                        /* get rid of Eth header */
                skb_push(skb, sizeof(struct llc_snap_hdr));     /* add LLC/SNAP header + tag  */
-               memcpy(skb->data, &llc_snap_mpoa_data, sizeof(struct llc_snap_hdr));
+               skb_copy_to_linear_data(skb, &llc_snap_mpoa_data,
+                                       sizeof(struct llc_snap_hdr));
        }
 
        atomic_add(skb->truesize, &sk_atm(entry->shortcut)->sk_wmem_alloc);
@@ -711,11 +713,12 @@ static void mpc_push(struct atm_vcc *vcc, struct sk_buff *skb)
                return;
        }
        skb_push(new_skb, eg->ctrl_info.DH_length);     /* add MAC header */
-       memcpy(new_skb->data, eg->ctrl_info.DLL_header, eg->ctrl_info.DH_length);
+       skb_copy_to_linear_data(new_skb, eg->ctrl_info.DLL_header,
+                               eg->ctrl_info.DH_length);
        new_skb->protocol = eth_type_trans(new_skb, dev);
-       new_skb->nh.raw = new_skb->data;
+       skb_reset_network_header(new_skb);
 
-       eg->latest_ip_addr = new_skb->nh.iph->saddr;
+       eg->latest_ip_addr = ip_hdr(new_skb)->saddr;
        eg->packets_rcvd++;
        mpc->eg_ops->put(eg);
 
@@ -734,7 +737,7 @@ static struct atm_dev mpc_dev = {
        .ops    = &mpc_ops,
        .type   = "mpc",
        .number = 42,
-       .lock   = SPIN_LOCK_UNLOCKED
+       .lock   = __SPIN_LOCK_UNLOCKED(mpc_dev.lock)
        /* members not explicitly initialised will be 0 */
 };
 
@@ -936,7 +939,7 @@ int msg_to_mpoad(struct k_message *mesg, struct mpoa_client *mpc)
        if (skb == NULL)
                return -ENOMEM;
        skb_put(skb, sizeof(struct k_message));
-       memcpy(skb->data, mesg, sizeof(struct k_message));
+       skb_copy_to_linear_data(skb, mesg, sizeof(*mesg));
        atm_force_charge(mpc->mpoad_vcc, skb->truesize);
 
        sk = sk_atm(mpc->mpoad_vcc);
index 31d98b5..d14baaf 100644 (file)
@@ -256,7 +256,7 @@ static struct atm_dev sigd_dev = {
        .ops =          &sigd_dev_ops,
        .type =         "sig",
        .number =       999,
-       .lock =         SPIN_LOCK_UNLOCKED
+       .lock =         __SPIN_LOCK_UNLOCKED(sigd_dev.lock)
 };
 
 
index 1c07c6a..6ded952 100644 (file)
@@ -1127,22 +1127,22 @@ static int __must_check ax25_connect(struct socket *sock,
                switch (sk->sk_state) {
                case TCP_SYN_SENT: /* still trying */
                        err = -EINPROGRESS;
-                       goto out;
+                       goto out_release;
 
                case TCP_ESTABLISHED: /* connection established */
                        sock->state = SS_CONNECTED;
-                       goto out;
+                       goto out_release;
 
                case TCP_CLOSE: /* connection refused */
                        sock->state = SS_UNCONNECTED;
                        err = -ECONNREFUSED;
-                       goto out;
+                       goto out_release;
                }
        }
 
        if (sk->sk_state == TCP_ESTABLISHED && sk->sk_type == SOCK_SEQPACKET) {
                err = -EISCONN; /* No reconnect on a seqpacket socket */
-               goto out;
+               goto out_release;
        }
 
        sk->sk_state   = TCP_CLOSE;
@@ -1159,12 +1159,12 @@ static int __must_check ax25_connect(struct socket *sock,
                /* Valid number of digipeaters ? */
                if (fsa->fsa_ax25.sax25_ndigis < 1 || fsa->fsa_ax25.sax25_ndigis > AX25_MAX_DIGIS) {
                        err = -EINVAL;
-                       goto out;
+                       goto out_release;
                }
 
                if ((digi = kmalloc(sizeof(ax25_digi), GFP_KERNEL)) == NULL) {
                        err = -ENOBUFS;
-                       goto out;
+                       goto out_release;
                }
 
                digi->ndigi      = fsa->fsa_ax25.sax25_ndigis;
@@ -1194,7 +1194,7 @@ static int __must_check ax25_connect(struct socket *sock,
                        current->comm);
                if ((err = ax25_rt_autobind(ax25, &fsa->fsa_ax25.sax25_call)) < 0) {
                        kfree(digi);
-                       goto out;
+                       goto out_release;
                }
 
                ax25_fillin_cb(ax25, ax25->ax25_dev);
@@ -1203,7 +1203,7 @@ static int __must_check ax25_connect(struct socket *sock,
                if (ax25->ax25_dev == NULL) {
                        kfree(digi);
                        err = -EHOSTUNREACH;
-                       goto out;
+                       goto out_release;
                }
        }
 
@@ -1213,7 +1213,7 @@ static int __must_check ax25_connect(struct socket *sock,
                kfree(digi);
                err = -EADDRINUSE;              /* Already such a connection */
                ax25_cb_put(ax25t);
-               goto out;
+               goto out_release;
        }
 
        ax25->dest_addr = fsa->fsa_ax25.sax25_call;
@@ -1223,7 +1223,7 @@ static int __must_check ax25_connect(struct socket *sock,
        if (sk->sk_type != SOCK_SEQPACKET) {
                sock->state = SS_CONNECTED;
                sk->sk_state   = TCP_ESTABLISHED;
-               goto out;
+               goto out_release;
        }
 
        /* Move to connecting socket, ax.25 lapb WAIT_UA.. */
@@ -1255,55 +1255,53 @@ static int __must_check ax25_connect(struct socket *sock,
        /* Now the loop */
        if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK)) {
                err = -EINPROGRESS;
-               goto out;
+               goto out_release;
        }
 
        if (sk->sk_state == TCP_SYN_SENT) {
-               struct task_struct *tsk = current;
-               DECLARE_WAITQUEUE(wait, tsk);
+               DEFINE_WAIT(wait);
 
-               add_wait_queue(sk->sk_sleep, &wait);
                for (;;) {
+                       prepare_to_wait(sk->sk_sleep, &wait,
+                                       TASK_INTERRUPTIBLE);
                        if (sk->sk_state != TCP_SYN_SENT)
                                break;
-                       set_current_state(TASK_INTERRUPTIBLE);
-                       release_sock(sk);
-                       if (!signal_pending(tsk)) {
+                       if (!signal_pending(current)) {
+                               release_sock(sk);
                                schedule();
                                lock_sock(sk);
                                continue;
                        }
-                       current->state = TASK_RUNNING;
-                       remove_wait_queue(sk->sk_sleep, &wait);
-                       return -ERESTARTSYS;
+                       err = -ERESTARTSYS;
+                       break;
                }
-               current->state = TASK_RUNNING;
-               remove_wait_queue(sk->sk_sleep, &wait);
+               finish_wait(sk->sk_sleep, &wait);
+
+               if (err)
+                       goto out_release;
        }
 
        if (sk->sk_state != TCP_ESTABLISHED) {
                /* Not in ABM, not in WAIT_UA -> failed */
                sock->state = SS_UNCONNECTED;
                err = sock_error(sk);   /* Always set at this point */
-               goto out;
+               goto out_release;
        }
 
        sock->state = SS_CONNECTED;
 
-       err=0;
-out:
+       err = 0;
+out_release:
        release_sock(sk);
 
        return err;
 }
 
-
 static int ax25_accept(struct socket *sock, struct socket *newsock, int flags)
 {
-       struct task_struct *tsk = current;
-       DECLARE_WAITQUEUE(wait, tsk);
        struct sk_buff *skb;
        struct sock *newsk;
+       DEFINE_WAIT(wait);
        struct sock *sk;
        int err = 0;
 
@@ -1328,30 +1326,29 @@ static int ax25_accept(struct socket *sock, struct socket *newsock, int flags)
         *      The read queue this time is holding sockets ready to use
         *      hooked into the SABM we saved
         */
-       add_wait_queue(sk->sk_sleep, &wait);
        for (;;) {
+               prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
                skb = skb_dequeue(&sk->sk_receive_queue);
                if (skb)
                        break;
 
-               release_sock(sk);
-               current->state = TASK_INTERRUPTIBLE;
                if (flags & O_NONBLOCK) {
-                       current->state = TASK_RUNNING;
-                       remove_wait_queue(sk->sk_sleep, &wait);
-                       return -EWOULDBLOCK;
+                       err = -EWOULDBLOCK;
+                       break;
                }
-               if (!signal_pending(tsk)) {
+               if (!signal_pending(current)) {
+                       release_sock(sk);
                        schedule();
                        lock_sock(sk);
                        continue;
                }
-               current->state = TASK_RUNNING;
-               remove_wait_queue(sk->sk_sleep, &wait);
-               return -ERESTARTSYS;
+               err = -ERESTARTSYS;
+               break;
        }
-       current->state = TASK_RUNNING;
-       remove_wait_queue(sk->sk_sleep, &wait);
+       finish_wait(sk->sk_sleep, &wait);
+
+       if (err)
+               goto out;
 
        newsk            = skb->sk;
        newsk->sk_socket = newsock;
@@ -1425,7 +1422,6 @@ static int ax25_sendmsg(struct kiocb *iocb, struct socket *sock,
        struct sockaddr_ax25 sax;
        struct sk_buff *skb;
        ax25_digi dtmp, *dp;
-       unsigned char *asmptr;
        ax25_cb *ax25;
        size_t size;
        int lv, err, addr_len = msg->msg_namelen;
@@ -1548,13 +1544,11 @@ static int ax25_sendmsg(struct kiocb *iocb, struct socket *sock,
                goto out;
        }
 
-       skb->nh.raw = skb->data;
+       skb_reset_network_header(skb);
 
        /* Add the PID if one is not supplied by the user in the skb */
-       if (!ax25->pidincl) {
-               asmptr  = skb_push(skb, 1);
-               *asmptr = sk->sk_protocol;
-       }
+       if (!ax25->pidincl)
+               *skb_push(skb, 1) = sk->sk_protocol;
 
        SOCK_DEBUG(sk, "AX.25: Transmitting buffer\n");
 
@@ -1573,7 +1567,7 @@ static int ax25_sendmsg(struct kiocb *iocb, struct socket *sock,
                goto out;
        }
 
-       asmptr = skb_push(skb, 1 + ax25_addr_size(dp));
+       skb_push(skb, 1 + ax25_addr_size(dp));
 
        SOCK_DEBUG(sk, "Building AX.25 Header (dp=%p).\n", dp);
 
@@ -1581,17 +1575,17 @@ static int ax25_sendmsg(struct kiocb *iocb, struct socket *sock,
                SOCK_DEBUG(sk, "Num digipeaters=%d\n", dp->ndigi);
 
        /* Build an AX.25 header */
-       asmptr += (lv = ax25_addr_build(asmptr, &ax25->source_addr,
-                                       &sax.sax25_call, dp,
-                                       AX25_COMMAND, AX25_MODULUS));
+       lv = ax25_addr_build(skb->data, &ax25->source_addr, &sax.sax25_call,
+                            dp, AX25_COMMAND, AX25_MODULUS);
 
        SOCK_DEBUG(sk, "Built header (%d bytes)\n",lv);
 
-       skb->h.raw = asmptr;
+       skb_set_transport_header(skb, lv);
 
-       SOCK_DEBUG(sk, "base=%p pos=%p\n", skb->data, asmptr);
+       SOCK_DEBUG(sk, "base=%p pos=%p\n",
+                  skb->data, skb_transport_header(skb));
 
-       *asmptr = AX25_UI;
+       *skb_transport_header(skb) = AX25_UI;
 
        /* Datagram frames go straight out of the door as UI */
        ax25_queue_xmit(skb, ax25->ax25_dev->dev);
@@ -1631,8 +1625,8 @@ static int ax25_recvmsg(struct kiocb *iocb, struct socket *sock,
        if (!ax25_sk(sk)->pidincl)
                skb_pull(skb, 1);               /* Remove PID */
 
-       skb->h.raw = skb->data;
-       copied     = skb->len;
+       skb_reset_transport_header(skb);
+       copied = skb->len;
 
        if (copied > size) {
                copied = size;
@@ -1645,9 +1639,10 @@ static int ax25_recvmsg(struct kiocb *iocb, struct socket *sock,
                struct sockaddr_ax25 *sax = (struct sockaddr_ax25 *)msg->msg_name;
                ax25_digi digi;
                ax25_address src;
+               const unsigned char *mac = skb_mac_header(skb);
 
-               ax25_addr_parse(skb->mac.raw+1, skb->data-skb->mac.raw-1, &src, NULL, &digi, NULL, NULL);
-
+               ax25_addr_parse(mac + 1, skb->data - mac - 1, &src, NULL,
+                               &digi, NULL, NULL);
                sax->sax25_family = AF_AX25;
                /* We set this correctly, even though we may not let the
                   application know the digi calls further down (because it
@@ -1711,6 +1706,10 @@ static int ax25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
                res = sock_get_timestamp(sk, argp);
                break;
 
+       case SIOCGSTAMPNS:
+               res = sock_get_timestampns(sk, argp);
+               break;
+
        case SIOCAX25ADDUID:    /* Add a uid to the uid/call map table */
        case SIOCAX25DELUID:    /* Delete a uid from the uid/call map table */
        case SIOCAX25GETUID: {
index 9569dd3..a49773f 100644 (file)
@@ -136,7 +136,7 @@ static void ax25_kiss_cmd(ax25_dev *ax25_dev, unsigned char cmd, unsigned char p
        if ((skb = alloc_skb(2, GFP_ATOMIC)) == NULL)
                return;
 
-       skb->nh.raw = skb->data;
+       skb_reset_network_header(skb);
        p = skb_put(skb, 2);
 
        *p++ = cmd;
index 4a6b26b..0ddaff0 100644 (file)
@@ -61,12 +61,14 @@ static int ax25_rx_fragment(ax25_cb *ax25, struct sk_buff *skb)
                                        skb_reserve(skbn, AX25_MAX_HEADER_LEN);
 
                                        skbn->dev   = ax25->ax25_dev->dev;
-                                       skbn->h.raw = skbn->data;
-                                       skbn->nh.raw = skbn->data;
+                                       skb_reset_network_header(skbn);
+                                       skb_reset_transport_header(skbn);
 
                                        /* Copy data from the fragments */
                                        while ((skbo = skb_dequeue(&ax25->frag_queue)) != NULL) {
-                                               memcpy(skb_put(skbn, skbo->len), skbo->data, skbo->len);
+                                               skb_copy_from_linear_data(skbo,
+                                                         skb_put(skbn, skbo->len),
+                                                                         skbo->len);
                                                kfree_skb(skbo);
                                        }
 
@@ -122,8 +124,8 @@ int ax25_rx_iframe(ax25_cb *ax25, struct sk_buff *skb)
                }
 
                skb_pull(skb, 1);       /* Remove PID */
-               skb->mac.raw  = skb->nh.raw;
-               skb->nh.raw   = skb->data;
+               skb_reset_mac_header(skb);
+               skb_reset_network_header(skb);
                skb->dev      = ax25->ax25_dev->dev;
                skb->pkt_type = PACKET_HOST;
                skb->protocol = htons(ETH_P_IP);
@@ -196,7 +198,7 @@ static int ax25_rcv(struct sk_buff *skb, struct net_device *dev,
         *      Process the AX.25/LAPB frame.
         */
 
-       skb->h.raw = skb->data;
+       skb_reset_transport_header(skb);
 
        if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL) {
                kfree_skb(skb);
@@ -233,7 +235,7 @@ static int ax25_rcv(struct sk_buff *skb, struct net_device *dev,
 
        /* UI frame - bypass LAPB processing */
        if ((*skb->data & ~0x10) == AX25_UI && dp.lastrepeat + 1 == dp.ndigi) {
-               skb->h.raw = skb->data + 2;             /* skip control and pid */
+               skb_set_transport_header(skb, 2); /* skip control and pid */
 
                ax25_send_to_raw(&dest, skb, skb->data[1]);
 
@@ -246,8 +248,8 @@ static int ax25_rcv(struct sk_buff *skb, struct net_device *dev,
                switch (skb->data[1]) {
                case AX25_P_IP:
                        skb_pull(skb,2);                /* drop PID/CTRL */
-                       skb->h.raw    = skb->data;
-                       skb->nh.raw   = skb->data;
+                       skb_reset_transport_header(skb);
+                       skb_reset_network_header(skb);
                        skb->dev      = dev;
                        skb->pkt_type = PACKET_HOST;
                        skb->protocol = htons(ETH_P_IP);
@@ -256,8 +258,8 @@ static int ax25_rcv(struct sk_buff *skb, struct net_device *dev,
 
                case AX25_P_ARP:
                        skb_pull(skb,2);
-                       skb->h.raw    = skb->data;
-                       skb->nh.raw   = skb->data;
+                       skb_reset_transport_header(skb);
+                       skb_reset_network_header(skb);
                        skb->dev      = dev;
                        skb->pkt_type = PACKET_HOST;
                        skb->protocol = htons(ETH_P_ARP);
index 7f818bb..930e491 100644 (file)
@@ -121,7 +121,7 @@ int ax25_rebuild_header(struct sk_buff *skb)
                digipeat = route->digipeat;
                dev = route->dev;
                ip_mode = route->ip_mode;
-       };
+       }
 
        if (dev == NULL)
                dev = skb->dev;
@@ -171,7 +171,7 @@ int ax25_rebuild_header(struct sk_buff *skb)
                        src_c = *(ax25_address *)(bp + 8);
 
                        skb_pull(ourskb, AX25_HEADER_LEN - 1);  /* Keep PID */
-                       ourskb->nh.raw = ourskb->data;
+                       skb_reset_network_header(ourskb);
 
                        ax25=ax25_send_frame(
                            ourskb,
index 2238350..92b517a 100644 (file)
@@ -148,8 +148,9 @@ void ax25_output(ax25_cb *ax25, int paclen, struct sk_buff *skb)
 
                        if (ka9qfrag == 1) {
                                skb_reserve(skbn, frontlen + 2);
-                               skbn->nh.raw = skbn->data + (skb->nh.raw - skb->data);
-                               memcpy(skb_put(skbn, len), skb->data, len);
+                               skb_set_network_header(skbn,
+                                                     skb_network_offset(skb));
+                               skb_copy_from_linear_data(skb, skb_put(skbn, len), len);
                                p = skb_push(skbn, 2);
 
                                *p++ = AX25_P_SEGMENT;
@@ -161,8 +162,9 @@ void ax25_output(ax25_cb *ax25, int paclen, struct sk_buff *skb)
                                }
                        } else {
                                skb_reserve(skbn, frontlen + 1);
-                               skbn->nh.raw = skbn->data + (skb->nh.raw - skb->data);
-                               memcpy(skb_put(skbn, len), skb->data, len);
+                               skb_set_network_header(skbn,
+                                                     skb_network_offset(skb));
+                               skb_copy_from_linear_data(skb, skb_put(skbn, len), len);
                                p = skb_push(skbn, 1);
                                *p = AX25_P_TEXT;
                        }
@@ -205,7 +207,7 @@ static void ax25_send_iframe(ax25_cb *ax25, struct sk_buff *skb, int poll_bit)
        if (skb == NULL)
                return;
 
-       skb->nh.raw = skb->data;
+       skb_reset_network_header(skb);
 
        if (ax25->modulus == AX25_MODULUS) {
                frame = skb_push(skb, 1);
index b6c577e..5fe9b2a 100644 (file)
@@ -162,7 +162,7 @@ void ax25_send_control(ax25_cb *ax25, int frametype, int poll_bit, int type)
 
        skb_reserve(skb, ax25->ax25_dev->dev->hard_header_len);
 
-       skb->nh.raw = skb->data;
+       skb_reset_network_header(skb);
 
        /* Assume a response - address structure for DTE */
        if (ax25->modulus == AX25_MODULUS) {
@@ -205,7 +205,7 @@ void ax25_return_dm(struct net_device *dev, ax25_address *src, ax25_address *des
                return; /* Next SABM will get DM'd */
 
        skb_reserve(skb, dev->hard_header_len);
-       skb->nh.raw = skb->data;
+       skb_reset_network_header(skb);
 
        ax25_digi_invert(digi, &retdigi);
 
index c7228cf..d942b94 100644 (file)
@@ -221,7 +221,7 @@ int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
                copied = len;
        }
 
-       skb->h.raw = skb->data;
+       skb_reset_transport_header(skb);
        err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
 
        skb_free_datagram(sk, skb);
index b85d149..ab2db55 100644 (file)
@@ -326,7 +326,7 @@ static inline int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb)
                return 0;
        }
 
-       skb->mac.raw = skb->data;
+       skb_reset_mac_header(skb);
 
        /* Verify and pull out header */
        if (!skb_pull(skb, __bnep_rx_hlen[type & BNEP_TYPE_MASK]))
@@ -364,26 +364,28 @@ static inline int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb)
 
        case BNEP_COMPRESSED_SRC_ONLY:
                memcpy(__skb_put(nskb, ETH_ALEN), s->eh.h_dest, ETH_ALEN);
-               memcpy(__skb_put(nskb, ETH_ALEN), skb->mac.raw, ETH_ALEN);
+               memcpy(__skb_put(nskb, ETH_ALEN), skb_mac_header(skb), ETH_ALEN);
                put_unaligned(s->eh.h_proto, (__be16 *) __skb_put(nskb, 2));
                break;
 
        case BNEP_COMPRESSED_DST_ONLY:
-               memcpy(__skb_put(nskb, ETH_ALEN), skb->mac.raw, ETH_ALEN);
-               memcpy(__skb_put(nskb, ETH_ALEN + 2), s->eh.h_source, ETH_ALEN + 2);
+               memcpy(__skb_put(nskb, ETH_ALEN), skb_mac_header(skb),
+                      ETH_ALEN);
+               memcpy(__skb_put(nskb, ETH_ALEN + 2), s->eh.h_source,
+                      ETH_ALEN + 2);
                break;
 
        case BNEP_GENERAL:
-               memcpy(__skb_put(nskb, ETH_ALEN * 2), skb->mac.raw, ETH_ALEN * 2);
+               memcpy(__skb_put(nskb, ETH_ALEN * 2), skb_mac_header(skb),
+                      ETH_ALEN * 2);
                put_unaligned(s->eh.h_proto, (__be16 *) __skb_put(nskb, 2));
                break;
        }
 
-       memcpy(__skb_put(nskb, skb->len), skb->data, skb->len);
+       skb_copy_from_linear_data(skb, __skb_put(nskb, skb->len), skb->len);
        kfree_skb(skb);
 
        s->stats.rx_packets++;
-       nskb->dev       = dev;
        nskb->ip_summed = CHECKSUM_NONE;
        nskb->protocol  = eth_type_trans(nskb, dev);
        netif_rx_ni(nskb);
index 3933608..66bef1c 100644 (file)
@@ -124,7 +124,7 @@ static inline void cmtp_add_msgpart(struct cmtp_session *session, int id, const
        }
 
        if (skb && (skb->len > 0))
-               memcpy(skb_put(nskb, skb->len), skb->data, skb->len);
+               skb_copy_from_linear_data(skb, skb_put(nskb, skb->len), skb->len);
 
        memcpy(skb_put(nskb, count), buf, count);
 
@@ -256,7 +256,7 @@ static void cmtp_process_transmit(struct cmtp_session *session)
                        hdr[2] = size >> 8;
                }
 
-               memcpy(skb_put(nskb, size), skb->data, size);
+               skb_copy_from_linear_data(skb, skb_put(nskb, size), size);
                skb_pull(skb, size);
 
                if (skb->len > 0) {
index f3403fd..63980bd 100644 (file)
@@ -72,11 +72,11 @@ void hci_acl_connect(struct hci_conn *conn)
                        inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
                cp.pscan_rep_mode = ie->data.pscan_rep_mode;
                cp.pscan_mode     = ie->data.pscan_mode;
-               cp.clock_offset   = ie->data.clock_offset | __cpu_to_le16(0x8000);
+               cp.clock_offset   = ie->data.clock_offset | cpu_to_le16(0x8000);
                memcpy(conn->dev_class, ie->data.dev_class, 3);
        }
 
-       cp.pkt_type = __cpu_to_le16(hdev->pkt_type & ACL_PTYPE_MASK);
+       cp.pkt_type = cpu_to_le16(hdev->pkt_type & ACL_PTYPE_MASK);
        if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
                cp.role_switch  = 0x01;
        else
@@ -107,7 +107,7 @@ void hci_acl_disconn(struct hci_conn *conn, __u8 reason)
 
        conn->state = BT_DISCONN;
 
-       cp.handle = __cpu_to_le16(conn->handle);
+       cp.handle = cpu_to_le16(conn->handle);
        cp.reason = reason;
        hci_send_cmd(conn->hdev, OGF_LINK_CTL,
                                OCF_DISCONNECT, sizeof(cp), &cp);
@@ -123,8 +123,8 @@ void hci_add_sco(struct hci_conn *conn, __u16 handle)
        conn->state = BT_CONNECT;
        conn->out = 1;
 
-       cp.pkt_type = __cpu_to_le16(hdev->pkt_type & SCO_PTYPE_MASK);
-       cp.handle   = __cpu_to_le16(handle);
+       cp.pkt_type = cpu_to_le16(hdev->pkt_type & SCO_PTYPE_MASK);
+       cp.handle   = cpu_to_le16(handle);
 
        hci_send_cmd(hdev, OGF_LINK_CTL, OCF_ADD_SCO, sizeof(cp), &cp);
 }
@@ -348,7 +348,7 @@ int hci_conn_auth(struct hci_conn *conn)
 
        if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
                struct hci_cp_auth_requested cp;
-               cp.handle = __cpu_to_le16(conn->handle);
+               cp.handle = cpu_to_le16(conn->handle);
                hci_send_cmd(conn->hdev, OGF_LINK_CTL, OCF_AUTH_REQUESTED, sizeof(cp), &cp);
        }
        return 0;
@@ -368,7 +368,7 @@ int hci_conn_encrypt(struct hci_conn *conn)
 
        if (hci_conn_auth(conn)) {
                struct hci_cp_set_conn_encrypt cp;
-               cp.handle  = __cpu_to_le16(conn->handle);
+               cp.handle  = cpu_to_le16(conn->handle);
                cp.encrypt = 1;
                hci_send_cmd(conn->hdev, OGF_LINK_CTL, OCF_SET_CONN_ENCRYPT, sizeof(cp), &cp);
        }
@@ -383,7 +383,7 @@ int hci_conn_change_link_key(struct hci_conn *conn)
 
        if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
                struct hci_cp_change_conn_link_key cp;
-               cp.handle = __cpu_to_le16(conn->handle);
+               cp.handle = cpu_to_le16(conn->handle);
                hci_send_cmd(conn->hdev, OGF_LINK_CTL, OCF_CHANGE_CONN_LINK_KEY, sizeof(cp), &cp);
        }
        return 0;
@@ -423,7 +423,7 @@ void hci_conn_enter_active_mode(struct hci_conn *conn)
 
        if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
                struct hci_cp_exit_sniff_mode cp;
-               cp.handle = __cpu_to_le16(conn->handle);
+               cp.handle = cpu_to_le16(conn->handle);
                hci_send_cmd(hdev, OGF_LINK_POLICY,
                                OCF_EXIT_SNIFF_MODE, sizeof(cp), &cp);
        }
@@ -452,21 +452,21 @@ void hci_conn_enter_sniff_mode(struct hci_conn *conn)
 
        if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
                struct hci_cp_sniff_subrate cp;
-               cp.handle             = __cpu_to_le16(conn->handle);
-               cp.max_latency        = __constant_cpu_to_le16(0);
-               cp.min_remote_timeout = __constant_cpu_to_le16(0);
-               cp.min_local_timeout  = __constant_cpu_to_le16(0);
+               cp.handle             = cpu_to_le16(conn->handle);
+               cp.max_latency        = cpu_to_le16(0);
+               cp.min_remote_timeout = cpu_to_le16(0);
+               cp.min_local_timeout  = cpu_to_le16(0);
                hci_send_cmd(hdev, OGF_LINK_POLICY,
                                OCF_SNIFF_SUBRATE, sizeof(cp), &cp);
        }
 
        if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
                struct hci_cp_sniff_mode cp;
-               cp.handle       = __cpu_to_le16(conn->handle);
-               cp.max_interval = __cpu_to_le16(hdev->sniff_max_interval);
-               cp.min_interval = __cpu_to_le16(hdev->sniff_min_interval);
-               cp.attempt      = __constant_cpu_to_le16(4);
-               cp.timeout      = __constant_cpu_to_le16(1);
+               cp.handle       = cpu_to_le16(conn->handle);
+               cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
+               cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
+               cp.attempt      = cpu_to_le16(4);
+               cp.timeout      = cpu_to_le16(1);
                hci_send_cmd(hdev, OGF_LINK_POLICY,
                                OCF_SNIFF_MODE, sizeof(cp), &cp);
        }
index 4917919..aa4b56a 100644 (file)
@@ -149,7 +149,7 @@ static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev,
        default:
                err = -ETIMEDOUT;
                break;
-       };
+       }
 
        hdev->req_status = hdev->req_result = 0;
 
@@ -216,10 +216,10 @@ static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
        /* Host buffer size */
        {
                struct hci_cp_host_buffer_size cp;
-               cp.acl_mtu = __cpu_to_le16(HCI_MAX_ACL_SIZE);
+               cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
                cp.sco_mtu = HCI_MAX_SCO_SIZE;
-               cp.acl_max_pkt = __cpu_to_le16(0xffff);
-               cp.sco_max_pkt = __cpu_to_le16(0xffff);
+               cp.acl_max_pkt = cpu_to_le16(0xffff);
+               cp.sco_max_pkt = cpu_to_le16(0xffff);
                hci_send_cmd(hdev, OGF_HOST_CTL, OCF_HOST_BUFFER_SIZE, sizeof(cp), &cp);
        }
 #endif
@@ -240,11 +240,11 @@ static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
        }
 
        /* Page timeout ~20 secs */
-       param = __cpu_to_le16(0x8000);
+       param = cpu_to_le16(0x8000);
        hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_PG_TIMEOUT, 2, &param);
 
        /* Connection accept timeout ~20 secs */
-       param = __cpu_to_le16(0x7d00);
+       param = cpu_to_le16(0x7d00);
        hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_CA_TIMEOUT, 2, &param);
 }
 
@@ -1034,7 +1034,7 @@ int hci_send_cmd(struct hci_dev *hdev, __u16 ogf, __u16 ocf, __u32 plen, void *p
        }
 
        hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
-       hdr->opcode = __cpu_to_le16(hci_opcode_pack(ogf, ocf));
+       hdr->opcode = cpu_to_le16(hci_opcode_pack(ogf, ocf));
        hdr->plen   = plen;
 
        if (plen)
@@ -1060,7 +1060,7 @@ void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 ogf, __u16 ocf)
 
        hdr = (void *) hdev->sent_cmd->data;
 
-       if (hdr->opcode != __cpu_to_le16(hci_opcode_pack(ogf, ocf)))
+       if (hdr->opcode != cpu_to_le16(hci_opcode_pack(ogf, ocf)))
                return NULL;
 
        BT_DBG("%s ogf 0x%x ocf 0x%x", hdev->name, ogf, ocf);
@@ -1074,11 +1074,11 @@ static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
        struct hci_acl_hdr *hdr;
        int len = skb->len;
 
-       hdr = (struct hci_acl_hdr *) skb_push(skb, HCI_ACL_HDR_SIZE);
-       hdr->handle = __cpu_to_le16(hci_handle_pack(handle, flags));
-       hdr->dlen   = __cpu_to_le16(len);
-
-       skb->h.raw = (void *) hdr;
+       skb_push(skb, HCI_ACL_HDR_SIZE);
+       skb_reset_transport_header(skb);
+       hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
+       hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
+       hdr->dlen   = cpu_to_le16(len);
 }
 
 int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
@@ -1140,11 +1140,12 @@ int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
                return -EINVAL;
        }
 
-       hdr.handle = __cpu_to_le16(conn->handle);
+       hdr.handle = cpu_to_le16(conn->handle);
        hdr.dlen   = skb->len;
 
-       skb->h.raw = skb_push(skb, HCI_SCO_HDR_SIZE);
-       memcpy(skb->h.raw, &hdr, HCI_SCO_HDR_SIZE);
+       skb_push(skb, HCI_SCO_HDR_SIZE);
+       skb_reset_transport_header(skb);
+       memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
 
        skb->dev = (void *) hdev;
        bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
@@ -1387,7 +1388,7 @@ static void hci_rx_task(unsigned long arg)
                        case HCI_SCODATA_PKT:
                                kfree_skb(skb);
                                continue;
-                       };
+                       }
                }
 
                /* Process frame */
index 936d3fc..447ba71 100644 (file)
@@ -783,7 +783,7 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s
                if (conn->type == ACL_LINK && hdev->link_policy) {
                        struct hci_cp_write_link_policy cp;
                        cp.handle = ev->handle;
-                       cp.policy = __cpu_to_le16(hdev->link_policy);
+                       cp.policy = cpu_to_le16(hdev->link_policy);
                        hci_send_cmd(hdev, OGF_LINK_POLICY,
                                OCF_WRITE_LINK_POLICY, sizeof(cp), &cp);
                }
@@ -793,8 +793,8 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s
                        struct hci_cp_change_conn_ptype cp;
                        cp.handle = ev->handle;
                        cp.pkt_type = (conn->type == ACL_LINK) ?
-                               __cpu_to_le16(hdev->pkt_type & ACL_PTYPE_MASK):
-                               __cpu_to_le16(hdev->pkt_type & SCO_PTYPE_MASK);
+                               cpu_to_le16(hdev->pkt_type & ACL_PTYPE_MASK):
+                               cpu_to_le16(hdev->pkt_type & SCO_PTYPE_MASK);
 
                        hci_send_cmd(hdev, OGF_LINK_CTL,
                                OCF_CHANGE_CONN_PTYPE, sizeof(cp), &cp);
@@ -970,7 +970,7 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s
                if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) {
                        if (!ev->status) {
                                struct hci_cp_set_conn_encrypt cp;
-                               cp.handle  = __cpu_to_le16(conn->handle);
+                               cp.handle  = cpu_to_le16(conn->handle);
                                cp.encrypt = 1;
                                hci_send_cmd(conn->hdev, OGF_LINK_CTL,
                                        OCF_SET_CONN_ENCRYPT, sizeof(cp), &cp);
index 71f5cfb..832b5f4 100644 (file)
@@ -375,7 +375,7 @@ static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
                copied = len;
        }
 
-       skb->h.raw = skb->data;
+       skb_reset_transport_header(skb);
        err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
 
        hci_sock_cmsg(sk, msg, skb);
index e83ee82..a586787 100644 (file)
@@ -459,8 +459,8 @@ static void __l2cap_sock_close(struct sock *sk, int reason)
                        sk->sk_state = BT_DISCONN;
                        l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
 
-                       req.dcid = __cpu_to_le16(l2cap_pi(sk)->dcid);
-                       req.scid = __cpu_to_le16(l2cap_pi(sk)->scid);
+                       req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
+                       req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
                        l2cap_send_cmd(conn, l2cap_get_ident(conn),
                                        L2CAP_DISCONN_REQ, sizeof(req), &req);
                } else {
@@ -652,7 +652,7 @@ static int l2cap_do_connect(struct sock *sk)
                if (sk->sk_type == SOCK_SEQPACKET) {
                        struct l2cap_conn_req req;
                        l2cap_pi(sk)->ident = l2cap_get_ident(conn);
-                       req.scid = __cpu_to_le16(l2cap_pi(sk)->scid);
+                       req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
                        req.psm  = l2cap_pi(sk)->psm;
                        l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
                                        L2CAP_CONN_REQ, sizeof(req), &req);
@@ -868,8 +868,8 @@ static inline int l2cap_do_send(struct sock *sk, struct msghdr *msg, int len)
 
        /* Create L2CAP header */
        lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
-       lh->cid = __cpu_to_le16(l2cap_pi(sk)->dcid);
-       lh->len = __cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
+       lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
+       lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
 
        if (sk->sk_type == SOCK_DGRAM)
                put_unaligned(l2cap_pi(sk)->psm, (u16 *) skb_put(skb, 2));
@@ -1096,7 +1096,7 @@ static void l2cap_conn_ready(struct l2cap_conn *conn)
                } else if (sk->sk_state == BT_CONNECT) {
                        struct l2cap_conn_req req;
                        l2cap_pi(sk)->ident = l2cap_get_ident(conn);
-                       req.scid = __cpu_to_le16(l2cap_pi(sk)->scid);
+                       req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
                        req.psm  = l2cap_pi(sk)->psm;
                        l2cap_send_cmd(conn, l2cap_pi(sk)->ident, L2CAP_CONN_REQ, sizeof(req), &req);
                }
@@ -1192,13 +1192,13 @@ static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
                return NULL;
 
        lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
-       lh->len = __cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
-       lh->cid = __cpu_to_le16(0x0001);
+       lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
+       lh->cid = cpu_to_le16(0x0001);
 
        cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
        cmd->code  = code;
        cmd->ident = ident;
-       cmd->len   = __cpu_to_le16(dlen);
+       cmd->len   = cpu_to_le16(dlen);
 
        if (dlen) {
                count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
@@ -1316,11 +1316,11 @@ static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
                break;
 
        case 2:
-               *((u16 *) opt->val) = __cpu_to_le16(val);
+               *((u16 *) opt->val) = cpu_to_le16(val);
                break;
 
        case 4:
-               *((u32 *) opt->val) = __cpu_to_le32(val);
+               *((u32 *) opt->val) = cpu_to_le32(val);
                break;
 
        default:
@@ -1346,8 +1346,8 @@ static int l2cap_build_conf_req(struct sock *sk, void *data)
        //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
        //   l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
 
-       req->dcid  = __cpu_to_le16(pi->dcid);
-       req->flags = __cpu_to_le16(0);
+       req->dcid  = cpu_to_le16(pi->dcid);
+       req->flags = cpu_to_le16(0);
 
        return ptr - data;
 }
@@ -1383,9 +1383,9 @@ static int l2cap_build_conf_rsp(struct sock *sk, void *data, int *result)
        else
                flags = 0x0001;
 
-       rsp->scid   = __cpu_to_le16(l2cap_pi(sk)->dcid);
-       rsp->result = __cpu_to_le16(result ? *result : 0);
-       rsp->flags  = __cpu_to_le16(flags);
+       rsp->scid   = cpu_to_le16(l2cap_pi(sk)->dcid);
+       rsp->result = cpu_to_le16(result ? *result : 0);
+       rsp->flags  = cpu_to_le16(flags);
 
        return ptr - data;
 }
@@ -1470,10 +1470,10 @@ response:
        bh_unlock_sock(parent);
 
 sendresp:
-       rsp.scid   = __cpu_to_le16(scid);
-       rsp.dcid   = __cpu_to_le16(dcid);
-       rsp.result = __cpu_to_le16(result);
-       rsp.status = __cpu_to_le16(status);
+       rsp.scid   = cpu_to_le16(scid);
+       rsp.dcid   = cpu_to_le16(dcid);
+       rsp.result = cpu_to_le16(result);
+       rsp.status = cpu_to_le16(status);
        l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
        return 0;
 }
@@ -1613,8 +1613,8 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
                l2cap_sock_set_timer(sk, HZ * 5);
                {
                        struct l2cap_disconn_req req;
-                       req.dcid = __cpu_to_le16(l2cap_pi(sk)->dcid);
-                       req.scid = __cpu_to_le16(l2cap_pi(sk)->scid);
+                       req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
+                       req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
                        l2cap_send_cmd(conn, l2cap_get_ident(conn),
                                        L2CAP_DISCONN_REQ, sizeof(req), &req);
                }
@@ -1652,8 +1652,8 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd
        if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
                return 0;
 
-       rsp.dcid = __cpu_to_le16(l2cap_pi(sk)->scid);
-       rsp.scid = __cpu_to_le16(l2cap_pi(sk)->dcid);
+       rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
+       rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
        l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
 
        sk->sk_shutdown = SHUTDOWN_MASK;
@@ -1696,8 +1696,8 @@ static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cm
 
        BT_DBG("type 0x%4.4x", type);
 
-       rsp.type   = __cpu_to_le16(type);
-       rsp.result = __cpu_to_le16(L2CAP_IR_NOTSUPP);
+       rsp.type   = cpu_to_le16(type);
+       rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
        l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp), &rsp);
 
        return 0;
@@ -1794,7 +1794,7 @@ static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *sk
                        BT_DBG("error %d", err);
 
                        /* FIXME: Map err to a valid reason */
-                       rej.reason = __cpu_to_le16(0);
+                       rej.reason = cpu_to_le16(0);
                        l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
                }
 
@@ -1993,10 +1993,10 @@ static int l2cap_auth_cfm(struct hci_conn *hcon, u8 status)
                        result = L2CAP_CR_SEC_BLOCK;
                }
 
-               rsp.scid   = __cpu_to_le16(l2cap_pi(sk)->dcid);
-               rsp.dcid   = __cpu_to_le16(l2cap_pi(sk)->scid);
-               rsp.result = __cpu_to_le16(result);
-               rsp.status = __cpu_to_le16(0);
+               rsp.scid   = cpu_to_le16(l2cap_pi(sk)->dcid);
+               rsp.dcid   = cpu_to_le16(l2cap_pi(sk)->scid);
+               rsp.result = cpu_to_le16(result);
+               rsp.status = cpu_to_le16(0);
                l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
                                L2CAP_CONN_RSP, sizeof(rsp), &rsp);
 
@@ -2041,10 +2041,10 @@ static int l2cap_encrypt_cfm(struct hci_conn *hcon, u8 status)
                        result = L2CAP_CR_SEC_BLOCK;
                }
 
-               rsp.scid   = __cpu_to_le16(l2cap_pi(sk)->dcid);
-               rsp.dcid   = __cpu_to_le16(l2cap_pi(sk)->scid);
-               rsp.result = __cpu_to_le16(result);
-               rsp.status = __cpu_to_le16(0);
+               rsp.scid   = cpu_to_le16(l2cap_pi(sk)->dcid);
+               rsp.dcid   = cpu_to_le16(l2cap_pi(sk)->scid);
+               rsp.result = cpu_to_le16(result);
+               rsp.status = cpu_to_le16(0);
                l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
                                L2CAP_CONN_RSP, sizeof(rsp), &rsp);
 
@@ -2107,7 +2107,8 @@ static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 fl
                if (!(conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC)))
                        goto drop;
 
-               memcpy(skb_put(conn->rx_skb, skb->len), skb->data, skb->len);
+               skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
+                             skb->len);
                conn->rx_len = len - skb->len;
        } else {
                BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
@@ -2128,7 +2129,8 @@ static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 fl
                        goto drop;
                }
 
-               memcpy(skb_put(conn->rx_skb, skb->len), skb->data, skb->len);
+               skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
+                             skb->len);
                conn->rx_len -= skb->len;
 
                if (!conn->rx_len) {
index 94f4573..fe7df90 100644 (file)
@@ -1567,7 +1567,7 @@ static int rfcomm_recv_frame(struct rfcomm_session *s, struct sk_buff *skb)
 
        /* Trim FCS */
        skb->len--; skb->tail--;
-       fcs = *(u8 *) skb->tail;
+       fcs = *(u8 *)skb_tail_pointer(skb);
 
        if (__check_fcs(skb->data, type, fcs)) {
                BT_ERR("bad checksum in packet");
@@ -1851,18 +1851,18 @@ static void rfcomm_worker(void)
        BT_DBG("");
 
        while (!atomic_read(&terminate)) {
+               set_current_state(TASK_INTERRUPTIBLE);
                if (!test_bit(RFCOMM_SCHED_WAKEUP, &rfcomm_event)) {
                        /* No pending events. Let's sleep.
                         * Incoming connections and data will wake us up. */
-                       set_current_state(TASK_INTERRUPTIBLE);
                        schedule();
                }
+               set_current_state(TASK_RUNNING);
 
                /* Process stuff */
                clear_bit(RFCOMM_SCHED_WAKEUP, &rfcomm_event);
                rfcomm_process_sessions();
        }
-       set_current_state(TASK_RUNNING);
        return;
 }
 
index ae43914..3f5163e 100644 (file)
@@ -393,7 +393,7 @@ static void sco_sock_close(struct sock *sk)
        default:
                sock_set_flag(sk, SOCK_ZAPPED);
                break;
-       };
+       }
 
        release_sock(sk);
 
index 2994387..848b8fa 100644 (file)
@@ -37,7 +37,9 @@ static int __init br_init(void)
                return -EADDRINUSE;
        }
 
-       br_fdb_init();
+       err = br_fdb_init();
+       if (err)
+               goto err_out1;
 
        err = br_netfilter_init();
        if (err)
@@ -47,7 +49,10 @@ static int __init br_init(void)
        if (err)
                goto err_out2;
 
-       br_netlink_init();
+       err = br_netlink_init();
+       if (err)
+               goto err_out3;
+
        brioctl_set(br_ioctl_deviceless_stub);
        br_handle_frame_hook = br_handle_frame;
 
@@ -55,7 +60,8 @@ static int __init br_init(void)
        br_fdb_put_hook = br_fdb_put;
 
        return 0;
-
+err_out3:
+       unregister_netdevice_notifier(&br_device_notifier);
 err_out2:
        br_netfilter_fini();
 err_out1:
index 905a39c..5e1892d 100644 (file)
@@ -37,7 +37,7 @@ int br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
        br->statistics.tx_packets++;
        br->statistics.tx_bytes += skb->len;
 
-       skb->mac.raw = skb->data;
+       skb_reset_mac_header(skb);
        skb_pull(skb, ETH_HLEN);
 
        if (dest[0] & 1)
@@ -83,27 +83,21 @@ static int br_change_mtu(struct net_device *dev, int new_mtu)
        return 0;
 }
 
-/* Allow setting mac address of pseudo-bridge to be same as
- * any of the bound interfaces
- */
+/* Allow setting mac address to any valid ethernet address. */
 static int br_set_mac_address(struct net_device *dev, void *p)
 {
        struct net_bridge *br = netdev_priv(dev);
        struct sockaddr *addr = p;
-       struct net_bridge_port *port;
-       int err = -EADDRNOTAVAIL;
+
+       if (!is_valid_ether_addr(addr->sa_data))
+               return -EINVAL;
 
        spin_lock_bh(&br->lock);
-       list_for_each_entry(port, &br->port_list, list) {
-               if (!compare_ether_addr(port->dev->dev_addr, addr->sa_data)) {
-                       br_stp_change_bridge_id(br, addr->sa_data);
-                       err = 0;
-                       break;
-               }
-       }
+       memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
+       br_stp_change_bridge_id(br, addr->sa_data);
        spin_unlock_bh(&br->lock);
 
-       return err;
+       return 0;
 }
 
 static void br_getinfo(struct net_device *dev, struct ethtool_drvinfo *info)
index 8d566c1..91b0170 100644 (file)
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/jhash.h>
+#include <linux/random.h>
 #include <asm/atomic.h>
+#include <asm/unaligned.h>
 #include "br_private.h"
 
 static struct kmem_cache *br_fdb_cache __read_mostly;
 static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
                      const unsigned char *addr);
 
-void __init br_fdb_init(void)
+static u32 fdb_salt __read_mostly;
+
+int __init br_fdb_init(void)
 {
        br_fdb_cache = kmem_cache_create("bridge_fdb_cache",
                                         sizeof(struct net_bridge_fdb_entry),
                                         0,
                                         SLAB_HWCACHE_ALIGN, NULL, NULL);
+       if (!br_fdb_cache)
+               return -ENOMEM;
+
+       get_random_bytes(&fdb_salt, sizeof(fdb_salt));
+       return 0;
 }
 
 void __exit br_fdb_fini(void)
@@ -44,24 +53,26 @@ void __exit br_fdb_fini(void)
 /* if topology_changing then use forward_delay (default 15 sec)
  * otherwise keep longer (default 5 minutes)
  */
-static __inline__ unsigned long hold_time(const struct net_bridge *br)
+static inline unsigned long hold_time(const struct net_bridge *br)
 {
        return br->topology_change ? br->forward_delay : br->ageing_time;
 }
 
-static __inline__ int has_expired(const struct net_bridge *br,
+static inline int has_expired(const struct net_bridge *br,
                                  const struct net_bridge_fdb_entry *fdb)
 {
        return !fdb->is_static
                && time_before_eq(fdb->ageing_timer + hold_time(br), jiffies);
 }
 
-static __inline__ int br_mac_hash(const unsigned char *mac)
+static inline int br_mac_hash(const unsigned char *mac)
 {
-       return jhash(mac, ETH_ALEN, 0) & (BR_HASH_SIZE - 1);
+       /* use 1 byte of OUI cnd 3 bytes of NIC */
+       u32 key = get_unaligned((u32 *)(mac + 2));
+       return jhash_1word(key, fdb_salt) & (BR_HASH_SIZE - 1);
 }
 
-static __inline__ void fdb_delete(struct net_bridge_fdb_entry *f)
+static inline void fdb_delete(struct net_bridge_fdb_entry *f)
 {
        hlist_del_rcu(&f->hlist);
        br_fdb_put(f);
@@ -128,7 +139,26 @@ void br_fdb_cleanup(unsigned long _data)
        mod_timer(&br->gc_timer, jiffies + HZ/10);
 }
 
+/* Completely flush all dynamic entries in forwarding database.*/
+void br_fdb_flush(struct net_bridge *br)
+{
+       int i;
 
+       spin_lock_bh(&br->hash_lock);
+       for (i = 0; i < BR_HASH_SIZE; i++) {
+               struct net_bridge_fdb_entry *f;
+               struct hlist_node *h, *n;
+               hlist_for_each_entry_safe(f, h, n, &br->hash[i], hlist) {
+                       if (!f->is_static)
+                               fdb_delete(f);
+               }
+       }
+       spin_unlock_bh(&br->hash_lock);
+}
+
+/* Flush all entries refering to a specific port.
+ * if do_all is set also flush static entries
+ */
 void br_fdb_delete_by_port(struct net_bridge *br,
                           const struct net_bridge_port *p,
                           int do_all)
index 3e45c1a..ada7f49 100644 (file)
@@ -71,7 +71,7 @@ static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
 
        indev = skb->dev;
        skb->dev = to->dev;
-       skb->ip_summed = CHECKSUM_NONE;
+       skb_forward_csum(skb);
 
        NF_HOOK(PF_BRIDGE, NF_BR_FORWARD, skb, indev, skb->dev,
                        br_forward_finish);
index f3a2e29..690573b 100644 (file)
@@ -152,6 +152,8 @@ static void del_nbp(struct net_bridge_port *p)
        br_stp_disable_port(p);
        spin_unlock_bh(&br->lock);
 
+       br_ifinfo_notify(RTM_DELLINK, p);
+
        br_fdb_delete_by_port(br, p, 1);
 
        list_del_rcu(&p->list);
@@ -203,7 +205,7 @@ static struct net_device *new_bridge_dev(const char *name)
        memcpy(br->group_addr, br_group_address, ETH_ALEN);
 
        br->feature_mask = dev->features;
-       br->stp_enabled = 0;
+       br->stp_enabled = BR_NO_STP;
        br->designated_root = br->bridge_id;
        br->root_path_cost = 0;
        br->root_port = 0;
@@ -434,6 +436,8 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
                br_stp_enable_port(p);
        spin_unlock_bh(&br->lock);
 
+       br_ifinfo_notify(RTM_NEWLINK, p);
+
        dev_set_mtu(br->dev, br_min_mtu(br));
 
        kobject_uevent(&p->kobj, KOBJ_ADD);
index 35b94f9..420bbb9 100644 (file)
@@ -112,46 +112,59 @@ static int br_handle_local_finish(struct sk_buff *skb)
  */
 static inline int is_link_local(const unsigned char *dest)
 {
-       return memcmp(dest, br_group_address, 5) == 0 && (dest[5] & 0xf0) == 0;
+       const u16 *a = (const u16 *) dest;
+       static const u16 *const b = (const u16 *const ) br_group_address;
+       static const u16 m = __constant_cpu_to_be16(0xfff0);
+
+       return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | ((a[2] ^ b[2]) & m)) == 0;
 }
 
 /*
  * Called via br_handle_frame_hook.
- * Return 0 if *pskb should be processed furthur
- *       1 if *pskb is handled
+ * Return NULL if skb is handled
  * note: already called with rcu_read_lock (preempt_disabled)
  */
-int br_handle_frame(struct net_bridge_port *p, struct sk_buff **pskb)
+struct sk_buff *br_handle_frame(struct net_bridge_port *p, struct sk_buff *skb)
 {
-       struct sk_buff *skb = *pskb;
        const unsigned char *dest = eth_hdr(skb)->h_dest;
 
        if (!is_valid_ether_addr(eth_hdr(skb)->h_source))
-               goto err;
+               goto drop;
 
        if (unlikely(is_link_local(dest))) {
-               skb->pkt_type = PACKET_HOST;
-               return NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_IN, skb, skb->dev,
-                              NULL, br_handle_local_finish) != 0;
+               /* Pause frames shouldn't be passed up by driver anyway */
+               if (skb->protocol == htons(ETH_P_PAUSE))
+                       goto drop;
+
+               /* Process STP BPDU's through normal netif_receive_skb() path */
+               if (p->br->stp_enabled != BR_NO_STP) {
+                       if (NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_IN, skb, skb->dev,
+                                   NULL, br_handle_local_finish))
+                               return NULL;
+                       else
+                               return skb;
+               }
        }
 
-       if (p->state == BR_STATE_FORWARDING || p->state == BR_STATE_LEARNING) {
+       switch (p->state) {
+       case BR_STATE_FORWARDING:
+
                if (br_should_route_hook) {
-                       if (br_should_route_hook(pskb))
-                               return 0;
-                       skb = *pskb;
+                       if (br_should_route_hook(&skb))
+                               return skb;
                        dest = eth_hdr(skb)->h_dest;
                }
-
+               /* fall through */
+       case BR_STATE_LEARNING:
                if (!compare_ether_addr(p->br->dev->dev_addr, dest))
                        skb->pkt_type = PACKET_HOST;
 
                NF_HOOK(PF_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL,
                        br_handle_frame_finish);
-               return 1;
+               break;
+       default:
+drop:
+               kfree_skb(skb);
        }
-
-err:
-       kfree_skb(skb);
-       return 1;
+       return NULL;
 }
index 147015f..eda0fbf 100644 (file)
@@ -137,7 +137,8 @@ static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
                b.topology_change = br->topology_change;
                b.topology_change_detected = br->topology_change_detected;
                b.root_port = br->root_port;
-               b.stp_enabled = br->stp_enabled;
+
+               b.stp_enabled = (br->stp_enabled != BR_NO_STP);
                b.ageing_time = jiffies_to_clock_t(br->ageing_time);
                b.hello_timer_value = br_timer_value(&br->hello_timer);
                b.tcn_timer_value = br_timer_value(&br->tcn_timer);
@@ -251,7 +252,7 @@ static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
                if (!capable(CAP_NET_ADMIN))
                        return -EPERM;
 
-               br->stp_enabled = args[1]?1:0;
+               br_stp_set_enabled(br, args[1]);
                return 0;
 
        case BRCTL_SET_BRIDGE_PRIORITY:
index 5439a3c..9b2986b 100644 (file)
@@ -29,6 +29,8 @@
 #include <linux/if_arp.h>
 #include <linux/if_ether.h>
 #include <linux/if_vlan.h>
+#include <linux/if_pppox.h>
+#include <linux/ppp_defs.h>
 #include <linux/netfilter_bridge.h>
 #include <linux/netfilter_ipv4.h>
 #include <linux/netfilter_ipv6.h>
@@ -48,8 +50,8 @@
 
 #define skb_origaddr(skb)       (((struct bridge_skb_cb *) \
                                 (skb->nf_bridge->data))->daddr.ipv4)
-#define store_orig_dstaddr(skb)         (skb_origaddr(skb) = (skb)->nh.iph->daddr)
-#define dnat_took_place(skb)    (skb_origaddr(skb) != (skb)->nh.iph->daddr)
+#define store_orig_dstaddr(skb)         (skb_origaddr(skb) = ip_hdr(skb)->daddr)
+#define dnat_took_place(skb)    (skb_origaddr(skb) != ip_hdr(skb)->daddr)
 
 #ifdef CONFIG_SYSCTL
 static struct ctl_table_header *brnf_sysctl_header;
@@ -57,8 +59,10 @@ static int brnf_call_iptables __read_mostly = 1;
 static int brnf_call_ip6tables __read_mostly = 1;
 static int brnf_call_arptables __read_mostly = 1;
 static int brnf_filter_vlan_tagged __read_mostly = 1;
+static int brnf_filter_pppoe_tagged __read_mostly = 1;
 #else
 #define brnf_filter_vlan_tagged 1
+#define brnf_filter_pppoe_tagged 1
 #endif
 
 static inline __be16 vlan_proto(const struct sk_buff *skb)
@@ -81,6 +85,22 @@ static inline __be16 vlan_proto(const struct sk_buff *skb)
         vlan_proto(skb) == htons(ETH_P_ARP) && \
         brnf_filter_vlan_tagged)
 
+static inline __be16 pppoe_proto(const struct sk_buff *skb)
+{
+       return *((__be16 *)(skb_mac_header(skb) + ETH_HLEN +
+                           sizeof(struct pppoe_hdr)));
+}
+
+#define IS_PPPOE_IP(skb) \
+       (skb->protocol == htons(ETH_P_PPP_SES) && \
+        pppoe_proto(skb) == htons(PPP_IP) && \
+        brnf_filter_pppoe_tagged)
+
+#define IS_PPPOE_IPV6(skb) \
+       (skb->protocol == htons(ETH_P_PPP_SES) && \
+        pppoe_proto(skb) == htons(PPP_IPV6) && \
+        brnf_filter_pppoe_tagged)
+
 /* We need these fake structures to make netfilter happy --
  * lots of places assume that skb->dst != NULL, which isn't
  * all that unreasonable.
@@ -128,8 +148,11 @@ static inline void nf_bridge_save_header(struct sk_buff *skb)
 
        if (skb->protocol == htons(ETH_P_8021Q))
                header_size += VLAN_HLEN;
+       else if (skb->protocol == htons(ETH_P_PPP_SES))
+               header_size += PPPOE_SES_HLEN;
 
-       memcpy(skb->nf_bridge->data, skb->data - header_size, header_size);
+       skb_copy_from_linear_data_offset(skb, -header_size,
+                                        skb->nf_bridge->data, header_size);
 }
 
 /*
@@ -143,15 +166,20 @@ int nf_bridge_copy_header(struct sk_buff *skb)
 
        if (skb->protocol == htons(ETH_P_8021Q))
                header_size += VLAN_HLEN;
+       else if (skb->protocol == htons(ETH_P_PPP_SES))
+               header_size += PPPOE_SES_HLEN;
 
        err = skb_cow(skb, header_size);
        if (err)
                return err;
 
-       memcpy(skb->data - header_size, skb->nf_bridge->data, header_size);
+       skb_copy_to_linear_data_offset(skb, -header_size,
+                                      skb->nf_bridge->data, header_size);
 
        if (skb->protocol == htons(ETH_P_8021Q))
                __skb_push(skb, VLAN_HLEN);
+       else if (skb->protocol == htons(ETH_P_PPP_SES))
+               __skb_push(skb, PPPOE_SES_HLEN);
        return 0;
 }
 
@@ -174,7 +202,10 @@ static int br_nf_pre_routing_finish_ipv6(struct sk_buff *skb)
        skb->dev = nf_bridge->physindev;
        if (skb->protocol == htons(ETH_P_8021Q)) {
                skb_push(skb, VLAN_HLEN);
-               skb->nh.raw -= VLAN_HLEN;
+               skb->network_header -= VLAN_HLEN;
+       } else if (skb->protocol == htons(ETH_P_PPP_SES)) {
+               skb_push(skb, PPPOE_SES_HLEN);
+               skb->network_header -= PPPOE_SES_HLEN;
        }
        NF_HOOK_THRESH(PF_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL,
                       br_handle_frame_finish, 1);
@@ -255,7 +286,10 @@ static int br_nf_pre_routing_finish_bridge(struct sk_buff *skb)
        else {
                if (skb->protocol == htons(ETH_P_8021Q)) {
                        skb_pull(skb, VLAN_HLEN);
-                       skb->nh.raw += VLAN_HLEN;
+                       skb->network_header += VLAN_HLEN;
+               } else if (skb->protocol == htons(ETH_P_PPP_SES)) {
+                       skb_pull(skb, PPPOE_SES_HLEN);
+                       skb->network_header += PPPOE_SES_HLEN;
                }
                skb->dst->output(skb);
        }
@@ -265,7 +299,7 @@ static int br_nf_pre_routing_finish_bridge(struct sk_buff *skb)
 static int br_nf_pre_routing_finish(struct sk_buff *skb)
 {
        struct net_device *dev = skb->dev;
-       struct iphdr *iph = skb->nh.iph;
+       struct iphdr *iph = ip_hdr(skb);
        struct nf_bridge_info *nf_bridge = skb->nf_bridge;
        int err;
 
@@ -325,7 +359,11 @@ bridged_dnat:
                                if (skb->protocol ==
                                    htons(ETH_P_8021Q)) {
                                        skb_push(skb, VLAN_HLEN);
-                                       skb->nh.raw -= VLAN_HLEN;
+                                       skb->network_header -= VLAN_HLEN;
+                               } else if(skb->protocol ==
+                                   htons(ETH_P_PPP_SES)) {
+                                       skb_push(skb, PPPOE_SES_HLEN);
+                                       skb->network_header -= PPPOE_SES_HLEN;
                                }
                                NF_HOOK_THRESH(PF_BRIDGE, NF_BR_PRE_ROUTING,
                                               skb, skb->dev, NULL,
@@ -344,7 +382,10 @@ bridged_dnat:
        skb->dev = nf_bridge->physindev;
        if (skb->protocol == htons(ETH_P_8021Q)) {
                skb_push(skb, VLAN_HLEN);
-               skb->nh.raw -= VLAN_HLEN;
+               skb->network_header -= VLAN_HLEN;
+       } else if (skb->protocol == htons(ETH_P_PPP_SES)) {
+               skb_push(skb, PPPOE_SES_HLEN);
+               skb->network_header -= PPPOE_SES_HLEN;
        }
        NF_HOOK_THRESH(PF_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL,
                       br_handle_frame_finish, 1);
@@ -372,9 +413,10 @@ static struct net_device *setup_pre_routing(struct sk_buff *skb)
 /* We only check the length. A bridge shouldn't do any hop-by-hop stuff anyway */
 static int check_hbh_len(struct sk_buff *skb)
 {
-       unsigned char *raw = (u8 *) (skb->nh.ipv6h + 1);
+       unsigned char *raw = (u8 *)(ipv6_hdr(skb) + 1);
        u32 pkt_len;
-       int off = raw - skb->nh.raw;
+       const unsigned char *nh = skb_network_header(skb);
+       int off = raw - nh;
        int len = (raw[1] + 1) << 3;
 
        if ((raw + len) - skb->data > skb_headlen(skb))
@@ -384,9 +426,9 @@ static int check_hbh_len(struct sk_buff *skb)
        len -= 2;
 
        while (len > 0) {
-               int optlen = skb->nh.raw[off + 1] + 2;
+               int optlen = nh[off + 1] + 2;
 
-               switch (skb->nh.raw[off]) {
+               switch (nh[off]) {
                case IPV6_TLV_PAD0:
                        optlen = 1;
                        break;
@@ -395,17 +437,18 @@ static int check_hbh_len(struct sk_buff *skb)
                        break;
 
                case IPV6_TLV_JUMBO:
-                       if (skb->nh.raw[off + 1] != 4 || (off & 3) != 2)
+                       if (nh[off + 1] != 4 || (off & 3) != 2)
                                goto bad;
-                       pkt_len = ntohl(*(__be32 *) (skb->nh.raw + off + 2));
+                       pkt_len = ntohl(*(__be32 *) (nh + off + 2));
                        if (pkt_len <= IPV6_MAXPLEN ||
-                           skb->nh.ipv6h->payload_len)
+                           ipv6_hdr(skb)->payload_len)
                                goto bad;
                        if (pkt_len > skb->len - sizeof(struct ipv6hdr))
                                goto bad;
                        if (pskb_trim_rcsum(skb,
                                            pkt_len + sizeof(struct ipv6hdr)))
                                goto bad;
+                       nh = skb_network_header(skb);
                        break;
                default:
                        if (optlen > len)
@@ -439,7 +482,7 @@ static unsigned int br_nf_pre_routing_ipv6(unsigned int hook,
        if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
                goto inhdr_error;
 
-       hdr = skb->nh.ipv6h;
+       hdr = ipv6_hdr(skb);
 
        if (hdr->version != 6)
                goto inhdr_error;
@@ -485,7 +528,8 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff **pskb,
        __u32 len;
        struct sk_buff *skb = *pskb;
 
-       if (skb->protocol == htons(ETH_P_IPV6) || IS_VLAN_IPV6(skb)) {
+       if (skb->protocol == htons(ETH_P_IPV6) || IS_VLAN_IPV6(skb) ||
+           IS_PPPOE_IPV6(skb)) {
 #ifdef CONFIG_SYSCTL
                if (!brnf_call_ip6tables)
                        return NF_ACCEPT;
@@ -495,7 +539,10 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff **pskb,
 
                if (skb->protocol == htons(ETH_P_8021Q)) {
                        skb_pull_rcsum(skb, VLAN_HLEN);
-                       skb->nh.raw += VLAN_HLEN;
+                       skb->network_header += VLAN_HLEN;
+               } else if (skb->protocol == htons(ETH_P_PPP_SES)) {
+                       skb_pull_rcsum(skb, PPPOE_SES_HLEN);
+                       skb->network_header += PPPOE_SES_HLEN;
                }
                return br_nf_pre_routing_ipv6(hook, skb, in, out, okfn);
        }
@@ -504,7 +551,8 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff **pskb,
                return NF_ACCEPT;
 #endif
 
-       if (skb->protocol != htons(ETH_P_IP) && !IS_VLAN_IP(skb))
+       if (skb->protocol != htons(ETH_P_IP) && !IS_VLAN_IP(skb) &&
+           !IS_PPPOE_IP(skb))
                return NF_ACCEPT;
 
        if ((skb = skb_share_check(*pskb, GFP_ATOMIC)) == NULL)
@@ -512,20 +560,23 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff **pskb,
 
        if (skb->protocol == htons(ETH_P_8021Q)) {
                skb_pull_rcsum(skb, VLAN_HLEN);
-               skb->nh.raw += VLAN_HLEN;
+               skb->network_header += VLAN_HLEN;
+       } else if (skb->protocol == htons(ETH_P_PPP_SES)) {
+               skb_pull_rcsum(skb, PPPOE_SES_HLEN);
+               skb->network_header += PPPOE_SES_HLEN;
        }
 
        if (!pskb_may_pull(skb, sizeof(struct iphdr)))
                goto inhdr_error;
 
-       iph = skb->nh.iph;
+       iph = ip_hdr(skb);
        if (iph->ihl < 5 || iph->version != 4)
                goto inhdr_error;
 
        if (!pskb_may_pull(skb, 4 * iph->ihl))
                goto inhdr_error;
 
-       iph = skb->nh.iph;
+       iph = ip_hdr(skb);
        if (ip_fast_csum((__u8 *) iph, iph->ihl) != 0)
                goto inhdr_error;
 
@@ -593,7 +644,10 @@ static int br_nf_forward_finish(struct sk_buff *skb)
        }
        if (skb->protocol == htons(ETH_P_8021Q)) {
                skb_push(skb, VLAN_HLEN);
-               skb->nh.raw -= VLAN_HLEN;
+               skb->network_header -= VLAN_HLEN;
+       } else if (skb->protocol == htons(ETH_P_PPP_SES)) {
+               skb_push(skb, PPPOE_SES_HLEN);
+               skb->network_header -= PPPOE_SES_HLEN;
        }
        NF_HOOK_THRESH(PF_BRIDGE, NF_BR_FORWARD, skb, in,
                       skb->dev, br_forward_finish, 1);
@@ -622,14 +676,18 @@ static unsigned int br_nf_forward_ip(unsigned int hook, struct sk_buff **pskb,
        if (!parent)
                return NF_DROP;
 
-       if (skb->protocol == htons(ETH_P_IP) || IS_VLAN_IP(skb))
+       if (skb->protocol == htons(ETH_P_IP) || IS_VLAN_IP(skb) ||
+           IS_PPPOE_IP(skb))
                pf = PF_INET;
        else
                pf = PF_INET6;
 
        if (skb->protocol == htons(ETH_P_8021Q)) {
                skb_pull(*pskb, VLAN_HLEN);
-               (*pskb)->nh.raw += VLAN_HLEN;
+               (*pskb)->network_header += VLAN_HLEN;
+       } else if (skb->protocol == htons(ETH_P_PPP_SES)) {
+               skb_pull(*pskb, PPPOE_SES_HLEN);
+               (*pskb)->network_header += PPPOE_SES_HLEN;
        }
 
        nf_bridge = skb->nf_bridge;
@@ -665,13 +723,13 @@ static unsigned int br_nf_forward_arp(unsigned int hook, struct sk_buff **pskb,
                if (!IS_VLAN_ARP(skb))
                        return NF_ACCEPT;
                skb_pull(*pskb, VLAN_HLEN);
-               (*pskb)->nh.raw += VLAN_HLEN;
+               (*pskb)->network_header += VLAN_HLEN;
        }
 
-       if (skb->nh.arph->ar_pln != 4) {
+       if (arp_hdr(skb)->ar_pln != 4) {
                if (IS_VLAN_ARP(skb)) {
                        skb_push(*pskb, VLAN_HLEN);
-                       (*pskb)->nh.raw -= VLAN_HLEN;
+                       (*pskb)->network_header -= VLAN_HLEN;
                }
                return NF_ACCEPT;
        }
@@ -721,7 +779,10 @@ static unsigned int br_nf_local_out(unsigned int hook, struct sk_buff **pskb,
        }
        if (skb->protocol == htons(ETH_P_8021Q)) {
                skb_push(skb, VLAN_HLEN);
-               skb->nh.raw -= VLAN_HLEN;
+               skb->network_header -= VLAN_HLEN;
+       } else if (skb->protocol == htons(ETH_P_PPP_SES)) {
+               skb_push(skb, PPPOE_SES_HLEN);
+               skb->network_header -= PPPOE_SES_HLEN;
        }
 
        NF_HOOK(PF_BRIDGE, NF_BR_FORWARD, skb, realindev, skb->dev,
@@ -753,7 +814,8 @@ static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff **pskb,
 #ifdef CONFIG_NETFILTER_DEBUG
        /* Be very paranoid. This probably won't happen anymore, but let's
         * keep the check just to be sure... */
-       if (skb->mac.raw < skb->head || skb->mac.raw + ETH_HLEN > skb->data) {
+       if (skb_mac_header(skb) < skb->head ||
+           skb_mac_header(skb) + ETH_HLEN > skb->data) {
                printk(KERN_CRIT "br_netfilter: Argh!! br_nf_post_routing: "
                       "bad mac.raw pointer.\n");
                goto print_error;
@@ -766,7 +828,8 @@ static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff **pskb,
        if (!realoutdev)
                return NF_DROP;
 
-       if (skb->protocol == htons(ETH_P_IP) || IS_VLAN_IP(skb))
+       if (skb->protocol == htons(ETH_P_IP) || IS_VLAN_IP(skb) ||
+           IS_PPPOE_IP(skb))
                pf = PF_INET;
        else
                pf = PF_INET6;
@@ -787,7 +850,10 @@ static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff **pskb,
 
        if (skb->protocol == htons(ETH_P_8021Q)) {
                skb_pull(skb, VLAN_HLEN);
-               skb->nh.raw += VLAN_HLEN;
+               skb->network_header += VLAN_HLEN;
+       } else if (skb->protocol == htons(ETH_P_PPP_SES)) {
+               skb_pull(skb, PPPOE_SES_HLEN);
+               skb->network_header += PPPOE_SES_HLEN;
        }
 
        nf_bridge_save_header(skb);
@@ -808,7 +874,7 @@ print_error:
                if (realoutdev)
                        printk("[%s]", realoutdev->name);
        }
-       printk(" head:%p, raw:%p, data:%p\n", skb->head, skb->mac.raw,
+       printk(" head:%p, raw:%p, data:%p\n", skb->head, skb_mac_header(skb),
               skb->data);
        dump_stack();
        return NF_ACCEPT;
@@ -925,6 +991,14 @@ static ctl_table brnf_table[] = {
                .mode           = 0644,
                .proc_handler   = &brnf_sysctl_call_tables,
        },
+       {
+               .ctl_name       = NET_BRIDGE_NF_FILTER_PPPOE_TAGGED,
+               .procname       = "bridge-nf-filter-pppoe-tagged",
+               .data           = &brnf_filter_pppoe_tagged,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = &brnf_sysctl_call_tables,
+       },
        { .ctl_name = 0 }
 };
 
index 7d68b24..35facc0 100644 (file)
@@ -11,8 +11,7 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/rtnetlink.h>
-#include <net/netlink.h>
+#include <net/rtnetlink.h>
 #include "br_private.h"
 
 static inline size_t br_nlmsg_size(void)
@@ -110,7 +109,6 @@ static int br_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
        struct net_device *dev;
        int idx;
 
-       read_lock(&dev_base_lock);
        for (dev = dev_base, idx = 0; dev; dev = dev->next) {
                /* not a bridge port */
                if (dev->br_port == NULL || idx < cb->args[0])
@@ -123,7 +121,6 @@ static int br_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
 skip:
                ++idx;
        }
-       read_unlock(&dev_base_lock);
 
        cb->args[0] = idx;
 
@@ -166,7 +163,7 @@ static int br_rtm_setlink(struct sk_buff *skb,  struct nlmsghdr *nlh, void *arg)
                return -EINVAL;
 
        /* if kernel STP is running, don't allow changes */
-       if (p->br->stp_enabled)
+       if (p->br->stp_enabled == BR_KERNEL_STP)
                return -EBUSY;
 
        if (!netif_running(dev) ||
@@ -179,18 +176,19 @@ static int br_rtm_setlink(struct sk_buff *skb,  struct nlmsghdr *nlh, void *arg)
 }
 
 
-static struct rtnetlink_link bridge_rtnetlink_table[RTM_NR_MSGTYPES] = {
-       [RTM_GETLINK - RTM_BASE] = { .dumpit    = br_dump_ifinfo, },
-       [RTM_SETLINK - RTM_BASE] = { .doit      = br_rtm_setlink, },
-};
-
-void __init br_netlink_init(void)
+int __init br_netlink_init(void)
 {
-       rtnetlink_links[PF_BRIDGE] = bridge_rtnetlink_table;
+       if (__rtnl_register(PF_BRIDGE, RTM_GETLINK, NULL, br_dump_ifinfo))
+               return -ENOBUFS;
+
+       /* Only the first call to __rtnl_register can fail */
+       __rtnl_register(PF_BRIDGE, RTM_SETLINK, br_rtm_setlink, NULL);
+
+       return 0;
 }
 
 void __exit br_netlink_fini(void)
 {
-       rtnetlink_links[PF_BRIDGE] = NULL;
+       rtnl_unregister_all(PF_BRIDGE);
 }
 
index 37357ed..c8451d3 100644 (file)
@@ -50,7 +50,6 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v
        case NETDEV_CHANGEADDR:
                spin_lock_bh(&br->lock);
                br_fdb_changeaddr(p, dev->dev_addr);
-               br_ifinfo_notify(RTM_NEWLINK, p);
                br_stp_recalculate_bridge_id(br);
                spin_unlock_bh(&br->lock);
                break;
@@ -74,10 +73,11 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v
                break;
 
        case NETDEV_UP:
-               spin_lock_bh(&br->lock);
-               if (netif_carrier_ok(dev) && (br->dev->flags & IFF_UP))
+               if (netif_carrier_ok(dev) && (br->dev->flags & IFF_UP)) {
+                       spin_lock_bh(&br->lock);
                        br_stp_enable_port(p);
-               spin_unlock_bh(&br->lock);
+                       spin_unlock_bh(&br->lock);
+               }
                break;
 
        case NETDEV_UNREGISTER:
@@ -85,5 +85,10 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v
                break;
        }
 
+       /* Events that may cause spanning tree to refresh */
+       if (event == NETDEV_CHANGEADDR || event == NETDEV_UP ||
+           event == NETDEV_CHANGE || event == NETDEV_DOWN)
+               br_ifinfo_notify(RTM_NEWLINK, p);
+
        return NOTIFY_DONE;
 }
index cc3f1c9..21bf3a9 100644 (file)
 #define BR_PORT_BITS   10
 #define BR_MAX_PORTS   (1<<BR_PORT_BITS)
 
-#define BR_VERSION     "2.2"
+#define BR_VERSION     "2.3"
+
+/* Path to usermode spanning tree program */
+#define BR_STP_PROG    "/sbin/bridge-stp"
 
 typedef struct bridge_id bridge_id;
 typedef struct mac_addr mac_addr;
@@ -107,7 +110,13 @@ struct net_bridge
 
        u8                              group_addr[ETH_ALEN];
        u16                             root_port;
-       unsigned char                   stp_enabled;
+
+       enum {
+               BR_NO_STP,              /* no spanning tree */
+               BR_KERNEL_STP,          /* old STP in kernel */
+               BR_USER_STP,            /* new RSTP in userspace */
+       } stp_enabled;
+
        unsigned char                   topology_change;
        unsigned char                   topology_change_detected;
 
@@ -127,14 +136,14 @@ static inline int br_is_root_bridge(const struct net_bridge *br)
        return !memcmp(&br->bridge_id, &br->designated_root, 8);
 }
 
-
 /* br_device.c */
 extern void br_dev_setup(struct net_device *dev);
 extern int br_dev_xmit(struct sk_buff *skb, struct net_device *dev);
 
 /* br_fdb.c */
-extern void br_fdb_init(void);
+extern int br_fdb_init(void);
 extern void br_fdb_fini(void);
+extern void br_fdb_flush(struct net_bridge *br);
 extern void br_fdb_changeaddr(struct net_bridge_port *p,
                              const unsigned char *newaddr);
 extern void br_fdb_cleanup(unsigned long arg);
@@ -182,7 +191,8 @@ extern void br_features_recompute(struct net_bridge *br);
 
 /* br_input.c */
 extern int br_handle_frame_finish(struct sk_buff *skb);
-extern int br_handle_frame(struct net_bridge_port *p, struct sk_buff **pskb);
+extern struct sk_buff *br_handle_frame(struct net_bridge_port *p,
+                                      struct sk_buff *skb);
 
 /* br_ioctl.c */
 extern int br_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
@@ -207,6 +217,7 @@ extern void br_become_designated_port(struct net_bridge_port *p);
 /* br_stp_if.c */
 extern void br_stp_enable_bridge(struct net_bridge *br);
 extern void br_stp_disable_bridge(struct net_bridge *br);
+extern void br_stp_set_enabled(struct net_bridge *br, unsigned long val);
 extern void br_stp_enable_port(struct net_bridge_port *p);
 extern void br_stp_disable_port(struct net_bridge_port *p);
 extern void br_stp_recalculate_bridge_id(struct net_bridge *br);
@@ -235,7 +246,7 @@ extern void (*br_fdb_put_hook)(struct net_bridge_fdb_entry *ent);
 
 
 /* br_netlink.c */
-extern void br_netlink_init(void);
+extern int br_netlink_init(void);
 extern void br_netlink_fini(void);
 extern void br_ifinfo_notify(int event, struct net_bridge_port *port);
 
index f9ff4d5..ebb0861 100644 (file)
@@ -370,11 +370,11 @@ static void br_make_blocking(struct net_bridge_port *p)
 static void br_make_forwarding(struct net_bridge_port *p)
 {
        if (p->state == BR_STATE_BLOCKING) {
-               if (p->br->stp_enabled) {
+               if (p->br->stp_enabled == BR_KERNEL_STP)
                        p->state = BR_STATE_LISTENING;
-               } else {
+               else
                        p->state = BR_STATE_LEARNING;
-               }
+
                br_log_state(p);
                mod_timer(&p->forward_delay_timer, jiffies + p->br->forward_delay);     }
 }
@@ -384,6 +384,10 @@ void br_port_state_selection(struct net_bridge *br)
 {
        struct net_bridge_port *p;
 
+       /* Don't change port states if userspace is handling STP */
+       if (br->stp_enabled == BR_USER_STP)
+               return;
+
        list_for_each_entry(p, &br->port_list, list) {
                if (p->state != BR_STATE_DISABLED) {
                        if (p->port_no == br->root_port) {
index b9fb0dc..60112bc 100644 (file)
@@ -33,9 +33,6 @@ static void br_send_bpdu(struct net_bridge_port *p,
 {
        struct sk_buff *skb;
 
-       if (!p->br->stp_enabled)
-               return;
-
        skb = dev_alloc_skb(length+LLC_RESERVE);
        if (!skb)
                return;
@@ -75,6 +72,9 @@ void br_send_config_bpdu(struct net_bridge_port *p, struct br_config_bpdu *bpdu)
 {
        unsigned char buf[35];
 
+       if (p->br->stp_enabled != BR_KERNEL_STP)
+               return;
+
        buf[0] = 0;
        buf[1] = 0;
        buf[2] = 0;
@@ -117,6 +117,9 @@ void br_send_tcn_bpdu(struct net_bridge_port *p)
 {
        unsigned char buf[4];
 
+       if (p->br->stp_enabled != BR_KERNEL_STP)
+               return;
+
        buf[0] = 0;
        buf[1] = 0;
        buf[2] = 0;
@@ -157,9 +160,13 @@ int br_stp_rcv(struct sk_buff *skb, struct net_device *dev,
        br = p->br;
        spin_lock(&br->lock);
 
-       if (p->state == BR_STATE_DISABLED
-           || !br->stp_enabled
-           || !(br->dev->flags & IFF_UP))
+       if (br->stp_enabled != BR_KERNEL_STP)
+               goto out;
+
+       if (!(br->dev->flags & IFF_UP))
+               goto out;
+
+       if (p->state == BR_STATE_DISABLED)
                goto out;
 
        if (compare_ether_addr(dest, br->group_addr) != 0)
index a285897..3e246b3 100644 (file)
@@ -87,7 +87,6 @@ void br_stp_disable_bridge(struct net_bridge *br)
 void br_stp_enable_port(struct net_bridge_port *p)
 {
        br_init_port(p);
-       br_ifinfo_notify(RTM_NEWLINK, p);
        br_port_state_selection(p->br);
 }
 
@@ -101,8 +100,6 @@ void br_stp_disable_port(struct net_bridge_port *p)
        printk(KERN_INFO "%s: port %i(%s) entering %s state\n",
               br->dev->name, p->port_no, p->dev->name, "disabled");
 
-       br_ifinfo_notify(RTM_DELLINK, p);
-
        wasroot = br_is_root_bridge(br);
        br_become_designated_port(p);
        p->state = BR_STATE_DISABLED;
@@ -123,6 +120,62 @@ void br_stp_disable_port(struct net_bridge_port *p)
                br_become_root_bridge(br);
 }
 
+static void br_stp_start(struct net_bridge *br)
+{
+       int r;
+       char *argv[] = { BR_STP_PROG, br->dev->name, "start", NULL };
+       char *envp[] = { NULL };
+
+       r = call_usermodehelper(BR_STP_PROG, argv, envp, 1);
+       if (r == 0) {
+               br->stp_enabled = BR_USER_STP;
+               printk(KERN_INFO "%s: userspace STP started\n", br->dev->name);
+       } else {
+               br->stp_enabled = BR_KERNEL_STP;
+               printk(KERN_INFO "%s: starting userspace STP failed, "
+                               "staring kernel STP\n", br->dev->name);
+
+               /* To start timers on any ports left in blocking */
+               spin_lock_bh(&br->lock);
+               br_port_state_selection(br);
+               spin_unlock_bh(&br->lock);
+       }
+}
+
+static void br_stp_stop(struct net_bridge *br)
+{
+       int r;
+       char *argv[] = { BR_STP_PROG, br->dev->name, "stop", NULL };
+       char *envp[] = { NULL };
+
+       if (br->stp_enabled == BR_USER_STP) {
+               r = call_usermodehelper(BR_STP_PROG, argv, envp, 1);
+               printk(KERN_INFO "%s: userspace STP stopped, return code %d\n",
+                       br->dev->name, r);
+
+
+               /* To start timers on any ports left in blocking */
+               spin_lock_bh(&br->lock);
+               br_port_state_selection(br);
+               spin_unlock_bh(&br->lock);
+       }
+
+       br->stp_enabled = BR_NO_STP;
+}
+
+void br_stp_set_enabled(struct net_bridge *br, unsigned long val)
+{
+       ASSERT_RTNL();
+
+       if (val) {
+               if (br->stp_enabled == BR_NO_STP)
+                       br_stp_start(br);
+       } else {
+               if (br->stp_enabled != BR_NO_STP)
+                       br_stp_stop(br);
+       }
+}
+
 /* called under bridge lock */
 void br_stp_change_bridge_id(struct net_bridge *br, const unsigned char *addr)
 {
index 01a22ad..33c6c4a 100644 (file)
@@ -149,7 +149,11 @@ static ssize_t show_stp_state(struct device *d,
 
 static void set_stp_state(struct net_bridge *br, unsigned long val)
 {
-       br->stp_enabled = val;
+       rtnl_lock();
+       spin_unlock_bh(&br->lock);
+       br_stp_set_enabled(br, val);
+       spin_lock_bh(&br->lock);
+       rtnl_unlock();
 }
 
 static ssize_t store_stp_state(struct device *d,
@@ -309,6 +313,19 @@ static ssize_t store_group_addr(struct device *d,
 static DEVICE_ATTR(group_addr, S_IRUGO | S_IWUSR,
                   show_group_addr, store_group_addr);
 
+static ssize_t store_flush(struct device *d,
+                          struct device_attribute *attr,
+                          const char *buf, size_t len)
+{
+       struct net_bridge *br = to_bridge(d);
+
+       if (!capable(CAP_NET_ADMIN))
+               return -EPERM;
+
+       br_fdb_flush(br);
+       return len;
+}
+static DEVICE_ATTR(flush, S_IWUSR, NULL, store_flush);
 
 static struct attribute *bridge_attrs[] = {
        &dev_attr_forward_delay.attr,
@@ -328,6 +345,7 @@ static struct attribute *bridge_attrs[] = {
        &dev_attr_topology_change_timer.attr,
        &dev_attr_gc_timer.attr,
        &dev_attr_group_addr.attr,
+       &dev_attr_flush.attr,
        NULL
 };
 
index 0bc2aef..2da2292 100644 (file)
@@ -137,6 +137,13 @@ static ssize_t show_hold_timer(struct net_bridge_port *p,
 }
 static BRPORT_ATTR(hold_timer, S_IRUGO, show_hold_timer, NULL);
 
+static ssize_t store_flush(struct net_bridge_port *p, unsigned long v)
+{
+       br_fdb_delete_by_port(p->br, p, 0); // Don't delete local entry
+       return 0;
+}
+static BRPORT_ATTR(flush, S_IWUSR, NULL, store_flush);
+
 static struct brport_attribute *brport_attrs[] = {
        &brport_attr_path_cost,
        &brport_attr_priority,
@@ -152,6 +159,7 @@ static struct brport_attribute *brport_attrs[] = {
        &brport_attr_message_age_timer,
        &brport_attr_forward_delay_timer,
        &brport_attr_hold_timer,
+       &brport_attr_flush,
        NULL
 };
 
index 9c59980..1a46952 100644 (file)
@@ -35,40 +35,36 @@ static int ebt_filter_arp(const struct sk_buff *skb, const struct net_device *in
                return EBT_NOMATCH;
 
        if (info->bitmask & (EBT_ARP_SRC_IP | EBT_ARP_DST_IP)) {
-               __be32 _addr, *ap;
+               __be32 saddr, daddr, *sap, *dap;
 
-               /* IPv4 addresses are always 4 bytes */
-               if (ah->ar_pln != sizeof(__be32))
+               if (ah->ar_pln != sizeof(__be32) || ah->ar_pro != htons(ETH_P_IP))
+                       return EBT_NOMATCH;
+               sap = skb_header_pointer(skb, sizeof(struct arphdr) +
+                                       ah->ar_hln, sizeof(saddr),
+                                       &saddr);
+               if (sap == NULL)
+                       return EBT_NOMATCH;
+               dap = skb_header_pointer(skb, sizeof(struct arphdr) +
+                                       2*ah->ar_hln+sizeof(saddr),
+                                       sizeof(daddr), &daddr);
+               if (dap == NULL)
+                       return EBT_NOMATCH;
+               if (info->bitmask & EBT_ARP_SRC_IP &&
+                   FWINV(info->saddr != (*sap & info->smsk), EBT_ARP_SRC_IP))
+                       return EBT_NOMATCH;
+               if (info->bitmask & EBT_ARP_DST_IP &&
+                   FWINV(info->daddr != (*dap & info->dmsk), EBT_ARP_DST_IP))
+                       return EBT_NOMATCH;
+               if (info->bitmask & EBT_ARP_GRAT &&
+                   FWINV(*dap != *sap, EBT_ARP_GRAT))
                        return EBT_NOMATCH;
-               if (info->bitmask & EBT_ARP_SRC_IP) {
-                       ap = skb_header_pointer(skb, sizeof(struct arphdr) +
-                                               ah->ar_hln, sizeof(_addr),
-                                               &_addr);
-                       if (ap == NULL)
-                               return EBT_NOMATCH;
-                       if (FWINV(info->saddr != (*ap & info->smsk),
-                          EBT_ARP_SRC_IP))
-                               return EBT_NOMATCH;
-               }
-
-               if (info->bitmask & EBT_ARP_DST_IP) {
-                       ap = skb_header_pointer(skb, sizeof(struct arphdr) +
-                                               2*ah->ar_hln+sizeof(__be32),
-                                               sizeof(_addr), &_addr);
-                       if (ap == NULL)
-                               return EBT_NOMATCH;
-                       if (FWINV(info->daddr != (*ap & info->dmsk),
-                          EBT_ARP_DST_IP))
-                               return EBT_NOMATCH;
-               }
        }
 
        if (info->bitmask & (EBT_ARP_SRC_MAC | EBT_ARP_DST_MAC)) {
                unsigned char _mac[ETH_ALEN], *mp;
                uint8_t verdict, i;
 
-               /* MAC addresses are 6 bytes */
-               if (ah->ar_hln != ETH_ALEN)
+               if (ah->ar_hln != ETH_ALEN || ah->ar_hrd != htons(ARPHRD_ETHER))
                        return EBT_NOMATCH;
                if (info->bitmask & EBT_ARP_SRC_MAC) {
                        mp = skb_header_pointer(skb, sizeof(struct arphdr),
index 45712ae..031bfa4 100644 (file)
@@ -196,14 +196,10 @@ static int __init ebt_log_init(void)
        ret = ebt_register_watcher(&log);
        if (ret < 0)
                return ret;
-       if (nf_log_register(PF_BRIDGE, &ebt_log_logger) < 0) {
-               printk(KERN_WARNING "ebt_log: not logging via system console "
-                      "since somebody else already registered for PF_INET\n");
-               /* we cannot make module load fail here, since otherwise
-                * ebtables userspace would abort */
-       }
-
-       return 0;
+       ret = nf_log_register(PF_BRIDGE, &ebt_log_logger);
+       if (ret < 0 && ret != -EEXIST)
+               ebt_unregister_watcher(&log);
+       return ret;
 }
 
 static void __exit ebt_log_fini(void)
index 8e15cc4..9411db6 100644 (file)
@@ -130,6 +130,7 @@ static void ebt_ulog_packet(unsigned int hooknr, const struct sk_buff *skb,
        unsigned int group = uloginfo->nlgroup;
        ebt_ulog_buff_t *ub = &ulog_buffers[group];
        spinlock_t *lock = &ub->lock;
+       ktime_t kt;
 
        if ((uloginfo->cprange == 0) ||
            (uloginfo->cprange > skb->len + ETH_HLEN))
@@ -164,9 +165,10 @@ static void ebt_ulog_packet(unsigned int hooknr, const struct sk_buff *skb,
 
        /* Fill in the ulog data */
        pm->version = EBT_ULOG_VERSION;
-       do_gettimeofday(&pm->stamp);
+       kt = ktime_get_real();
+       pm->stamp = ktime_to_timeval(kt);
        if (ub->qlen == 1)
-               skb_set_timestamp(ub->skb, &pm->stamp);
+               ub->skb->tstamp = kt;
        pm->data_len = copy_len;
        pm->mark = skb->mark;
        pm->hook = hooknr;
@@ -295,14 +297,12 @@ static int __init ebt_ulog_init(void)
 
        /* initialize ulog_buffers */
        for (i = 0; i < EBT_ULOG_MAXNLGROUPS; i++) {
-               init_timer(&ulog_buffers[i].timer);
-               ulog_buffers[i].timer.function = ulog_timer;
-               ulog_buffers[i].timer.data = i;
+               setup_timer(&ulog_buffers[i].timer, ulog_timer, i);
                spin_lock_init(&ulog_buffers[i].lock);
        }
 
        ebtulognl = netlink_kernel_create(NETLINK_NFLOG, EBT_ULOG_MAXNLGROUPS,
-                                         NULL, THIS_MODULE);
+                                         NULL, NULL, THIS_MODULE);
        if (!ebtulognl)
                ret = -ENOMEM;
        else if ((ret = ebt_register_watcher(&ulog)))
index 1f32866..9a0f5f2 100644 (file)
@@ -34,11 +34,11 @@ static inline int iov_from_user_compat_to_kern(struct iovec *kiov,
 {
        int tot_len = 0;
 
-       while(niov > 0) {
+       while (niov > 0) {
                compat_uptr_t buf;
                compat_size_t len;
 
-               if(get_user(len, &uiov32->iov_len) ||
+               if (get_user(len, &uiov32->iov_len) ||
                   get_user(buf, &uiov32->iov_base)) {
                        tot_len = -EFAULT;
                        break;
@@ -78,12 +78,12 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
 {
        int tot_len;
 
-       if(kern_msg->msg_namelen) {
-               if(mode==VERIFY_READ) {
+       if (kern_msg->msg_namelen) {
+               if (mode==VERIFY_READ) {
                        int err = move_addr_to_kernel(kern_msg->msg_name,
                                                      kern_msg->msg_namelen,
                                                      kern_address);
-                       if(err < 0)
+                       if (err < 0)
                                return err;
                }
                kern_msg->msg_name = kern_address;
@@ -93,7 +93,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
        tot_len = iov_from_user_compat_to_kern(kern_iov,
                                          (struct compat_iovec __user *)kern_msg->msg_iov,
                                          kern_msg->msg_iovlen);
-       if(tot_len >= 0)
+       if (tot_len >= 0)
                kern_msg->msg_iov = kern_iov;
 
        return tot_len;
@@ -146,8 +146,8 @@ int cmsghdr_from_user_compat_to_kern(struct msghdr *kmsg, struct sock *sk,
        kcmlen = 0;
        kcmsg_base = kcmsg = (struct cmsghdr *)stackbuf;
        ucmsg = CMSG_COMPAT_FIRSTHDR(kmsg);
-       while(ucmsg != NULL) {
-               if(get_user(ucmlen, &ucmsg->cmsg_len))
+       while (ucmsg != NULL) {
+               if (get_user(ucmlen, &ucmsg->cmsg_len))
                        return -EFAULT;
 
                /* Catch bogons. */
@@ -160,7 +160,7 @@ int cmsghdr_from_user_compat_to_kern(struct msghdr *kmsg, struct sock *sk,
                kcmlen += tmp;
                ucmsg = cmsg_compat_nxthdr(kmsg, ucmsg, ucmlen);
        }
-       if(kcmlen == 0)
+       if (kcmlen == 0)
                return -EINVAL;
 
        /* The kcmlen holds the 64-bit version of the control length.
@@ -176,7 +176,7 @@ int cmsghdr_from_user_compat_to_kern(struct msghdr *kmsg, struct sock *sk,
        /* Now copy them over neatly. */
        memset(kcmsg, 0, kcmlen);
        ucmsg = CMSG_COMPAT_FIRSTHDR(kmsg);
-       while(ucmsg != NULL) {
+       while (ucmsg != NULL) {
                if (__get_user(ucmlen, &ucmsg->cmsg_len))
                        goto Efault;
                if (!CMSG_COMPAT_OK(ucmlen, ucmsg, kmsg))
@@ -215,11 +215,12 @@ Efault:
 int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *data)
 {
        struct compat_timeval ctv;
+       struct compat_timespec cts;
        struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
        struct compat_cmsghdr cmhdr;
        int cmlen;
 
-       if(cm == NULL || kmsg->msg_controllen < sizeof(*cm)) {
+       if (cm == NULL || kmsg->msg_controllen < sizeof(*cm)) {
                kmsg->msg_flags |= MSG_CTRUNC;
                return 0; /* XXX: return error? check spec. */
        }
@@ -229,11 +230,18 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
                ctv.tv_sec = tv->tv_sec;
                ctv.tv_usec = tv->tv_usec;
                data = &ctv;
-               len = sizeof(struct compat_timeval);
+               len = sizeof(ctv);
+       }
+       if (level == SOL_SOCKET && type == SO_TIMESTAMPNS) {
+               struct timespec *ts = (struct timespec *)data;
+               cts.tv_sec = ts->tv_sec;
+               cts.tv_nsec = ts->tv_nsec;
+               data = &cts;
+               len = sizeof(cts);
        }
 
        cmlen = CMSG_COMPAT_LEN(len);
-       if(kmsg->msg_controllen < cmlen) {
+       if (kmsg->msg_controllen < cmlen) {
                kmsg->msg_flags |= MSG_CTRUNC;
                cmlen = kmsg->msg_controllen;
        }
@@ -241,9 +249,9 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
        cmhdr.cmsg_type = type;
        cmhdr.cmsg_len = cmlen;
 
-       if(copy_to_user(cm, &cmhdr, sizeof cmhdr))
+       if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
                return -EFAULT;
-       if(copy_to_user(CMSG_COMPAT_DATA(cm), data, cmlen - sizeof(struct compat_cmsghdr)))
+       if (copy_to_user(CMSG_COMPAT_DATA(cm), data, cmlen - sizeof(struct compat_cmsghdr)))
                return -EFAULT;
        cmlen = CMSG_COMPAT_SPACE(len);
        kmsg->msg_control += cmlen;
@@ -545,20 +553,49 @@ int compat_sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
        struct compat_timeval __user *ctv =
                        (struct compat_timeval __user*) userstamp;
        int err = -ENOENT;
+       struct timeval tv;
 
        if (!sock_flag(sk, SOCK_TIMESTAMP))
                sock_enable_timestamp(sk);
-       if (sk->sk_stamp.tv_sec == -1)
+       tv = ktime_to_timeval(sk->sk_stamp);
+       if (tv.tv_sec == -1)
                return err;
-       if (sk->sk_stamp.tv_sec == 0)
-               do_gettimeofday(&sk->sk_stamp);
-       if (put_user(sk->sk_stamp.tv_sec, &ctv->tv_sec) ||
-                       put_user(sk->sk_stamp.tv_usec, &ctv->tv_usec))
+       if (tv.tv_sec == 0) {
+               sk->sk_stamp = ktime_get_real();
+               tv = ktime_to_timeval(sk->sk_stamp);
+       }
+       err = 0;
+       if (put_user(tv.tv_sec, &ctv->tv_sec) ||
+                       put_user(tv.tv_usec, &ctv->tv_usec))
                err = -EFAULT;
        return err;
 }
 EXPORT_SYMBOL(compat_sock_get_timestamp);
 
+int compat_sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp)
+{
+       struct compat_timespec __user *ctv =
+                       (struct compat_timespec __user*) userstamp;
+       int err = -ENOENT;
+       struct timespec ts;
+
+       if (!sock_flag(sk, SOCK_TIMESTAMP))
+               sock_enable_timestamp(sk);
+       ts = ktime_to_timespec(sk->sk_stamp);
+       if (ts.tv_sec == -1)
+               return err;
+       if (ts.tv_sec == 0) {
+               sk->sk_stamp = ktime_get_real();
+               ts = ktime_to_timespec(sk->sk_stamp);
+       }
+       err = 0;
+       if (put_user(ts.tv_sec, &ctv->tv_sec) ||
+                       put_user(ts.tv_nsec, &ctv->tv_nsec))
+               err = -EFAULT;
+       return err;
+}
+EXPORT_SYMBOL(compat_sock_get_timestampns);
+
 asmlinkage long compat_sys_getsockopt(int fd, int level, int optname,
                                char __user *optval, int __user *optlen)
 {
@@ -617,7 +654,7 @@ asmlinkage long compat_sys_socketcall(int call, u32 __user *args)
        a0 = a[0];
        a1 = a[1];
 
-       switch(call) {
+       switch (call) {
        case SYS_SOCKET:
                ret = sys_socket(a0, a1, a[2]);
                break;
index 73272d5..4751613 100644 (file)
@@ -13,7 +13,6 @@ obj-y              += dev.o ethtool.o dev_mcast.o dst.o netevent.o \
 obj-$(CONFIG_XFRM) += flow.o
 obj-$(CONFIG_SYSFS) += net-sysfs.o
 obj-$(CONFIG_NET_PKTGEN) += pktgen.o
-obj-$(CONFIG_WIRELESS_EXT) += wireless.o
 obj-$(CONFIG_NETPOLL) += netpoll.o
 obj-$(CONFIG_NET_DMA) += user_dma.o
 obj-$(CONFIG_FIB_RULES) += fib_rules.o
index 186212b..e1afa76 100644 (file)
@@ -247,8 +247,8 @@ EXPORT_SYMBOL(skb_kill_datagram);
 int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset,
                            struct iovec *to, int len)
 {
-       int start = skb_headlen(skb);
-       int i, copy = start - offset;
+       int end = skb_headlen(skb);
+       int i, copy = end - offset;
 
        /* Copy header. */
        if (copy > 0) {
@@ -263,11 +263,9 @@ int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset,
 
        /* Copy paged appendix. Hmm... why does this look so complicated? */
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-               int end;
+               BUG_TRAP(len >= 0);
 
-               BUG_TRAP(start <= offset + len);
-
-               end = start + skb_shinfo(skb)->frags[i].size;
+               end = offset + skb_shinfo(skb)->frags[i].size;
                if ((copy = end - offset) > 0) {
                        int err;
                        u8  *vaddr;
@@ -277,8 +275,8 @@ int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset,
                        if (copy > len)
                                copy = len;
                        vaddr = kmap(page);
-                       err = memcpy_toiovec(to, vaddr + frag->page_offset +
-                                            offset - start, copy);
+                       err = memcpy_toiovec(to, vaddr + frag->page_offset,
+                                            copy);
                        kunmap(page);
                        if (err)
                                goto fault;
@@ -286,30 +284,24 @@ int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset,
                                return 0;
                        offset += copy;
                }
-               start = end;
        }
 
        if (skb_shinfo(skb)->frag_list) {
                struct sk_buff *list = skb_shinfo(skb)->frag_list;
 
                for (; list; list = list->next) {
-                       int end;
-
-                       BUG_TRAP(start <= offset + len);
+                       BUG_TRAP(len >= 0);
 
-                       end = start + list->len;
+                       end = offset + list->len;
                        if ((copy = end - offset) > 0) {
                                if (copy > len)
                                        copy = len;
-                               if (skb_copy_datagram_iovec(list,
-                                                           offset - start,
-                                                           to, copy))
+                               if (skb_copy_datagram_iovec(list, 0, to, copy))
                                        goto fault;
                                if ((len -= copy) == 0)
                                        return 0;
                                offset += copy;
                        }
-                       start = end;
                }
        }
        if (!len)
@@ -323,9 +315,9 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
                                      u8 __user *to, int len,
                                      __wsum *csump)
 {
-       int start = skb_headlen(skb);
+       int end = skb_headlen(skb);
        int pos = 0;
-       int i, copy = start - offset;
+       int i, copy = end - offset;
 
        /* Copy header. */
        if (copy > 0) {
@@ -344,11 +336,9 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
        }
 
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-               int end;
-
-               BUG_TRAP(start <= offset + len);
+               BUG_TRAP(len >= 0);
 
-               end = start + skb_shinfo(skb)->frags[i].size;
+               end = offset + skb_shinfo(skb)->frags[i].size;
                if ((copy = end - offset) > 0) {
                        __wsum csum2;
                        int err = 0;
@@ -360,8 +350,7 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
                                copy = len;
                        vaddr = kmap(page);
                        csum2 = csum_and_copy_to_user(vaddr +
-                                                       frag->page_offset +
-                                                       offset - start,
+                                                       frag->page_offset,
                                                      to, copy, 0, &err);
                        kunmap(page);
                        if (err)
@@ -373,24 +362,20 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
                        to += copy;
                        pos += copy;
                }
-               start = end;
        }
 
        if (skb_shinfo(skb)->frag_list) {
                struct sk_buff *list = skb_shinfo(skb)->frag_list;
 
                for (; list; list=list->next) {
-                       int end;
+                       BUG_TRAP(len >= 0);
 
-                       BUG_TRAP(start <= offset + len);
-
-                       end = start + list->len;
+                       end = offset + list->len;
                        if ((copy = end - offset) > 0) {
                                __wsum csum2 = 0;
                                if (copy > len)
                                        copy = len;
-                               if (skb_copy_and_csum_datagram(list,
-                                                              offset - start,
+                               if (skb_copy_and_csum_datagram(list, 0,
                                                               to, copy,
                                                               &csum2))
                                        goto fault;
@@ -401,7 +386,6 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
                                to += copy;
                                pos += copy;
                        }
-                       start = end;
                }
        }
        if (!len)
@@ -411,11 +395,11 @@ fault:
        return -EFAULT;
 }
 
-__sum16 __skb_checksum_complete(struct sk_buff *skb)
+__sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len)
 {
        __sum16 sum;
 
-       sum = csum_fold(skb_checksum(skb, 0, skb->len, skb->csum));
+       sum = csum_fold(skb_checksum(skb, 0, len, skb->csum));
        if (likely(!sum)) {
                if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE))
                        netdev_rx_csum_fault(skb->dev);
@@ -423,6 +407,12 @@ __sum16 __skb_checksum_complete(struct sk_buff *skb)
        }
        return sum;
 }
+EXPORT_SYMBOL(__skb_checksum_complete_head);
+
+__sum16 __skb_checksum_complete(struct sk_buff *skb)
+{
+       return __skb_checksum_complete_head(skb, skb->len);
+}
 EXPORT_SYMBOL(__skb_checksum_complete);
 
 /**
index 4dc93cc..d5e42d1 100644 (file)
 #include <linux/netpoll.h>
 #include <linux/rcupdate.h>
 #include <linux/delay.h>
-#include <linux/wireless.h>
+#include <net/wext.h>
 #include <net/iw_handler.h>
 #include <asm/current.h>
 #include <linux/audit.h>
  */
 
 static DEFINE_SPINLOCK(ptype_lock);
-static struct list_head ptype_base[16];        /* 16 way hashed list */
-static struct list_head ptype_all;             /* Taps */
+static struct list_head ptype_base[16] __read_mostly;  /* 16 way hashed list */
+static struct list_head ptype_all __read_mostly;       /* Taps */
 
 #ifdef CONFIG_NET_DMA
 static struct dma_client *net_dma_client;
@@ -225,12 +225,6 @@ extern void netdev_unregister_sysfs(struct net_device *);
 
 *******************************************************************************/
 
-/*
- *     For efficiency
- */
-
-static int netdev_nit;
-
 /*
  *     Add a protocol ID to the list. Now that the input handler is
  *     smarter we can dispense with all the messy stuff that used to be
@@ -265,10 +259,9 @@ void dev_add_pack(struct packet_type *pt)
        int hash;
 
        spin_lock_bh(&ptype_lock);
-       if (pt->type == htons(ETH_P_ALL)) {
-               netdev_nit++;
+       if (pt->type == htons(ETH_P_ALL))
                list_add_rcu(&pt->list, &ptype_all);
-       else {
+       else {
                hash = ntohs(pt->type) & 15;
                list_add_rcu(&pt->list, &ptype_base[hash]);
        }
@@ -295,10 +288,9 @@ void __dev_remove_pack(struct packet_type *pt)
 
        spin_lock_bh(&ptype_lock);
 
-       if (pt->type == htons(ETH_P_ALL)) {
-               netdev_nit--;
+       if (pt->type == htons(ETH_P_ALL))
                head = &ptype_all;
-       else
+       else
                head = &ptype_base[ntohs(pt->type) & 15];
 
        list_for_each_entry(pt1, head, list) {
@@ -817,7 +809,6 @@ static int default_rebuild_header(struct sk_buff *skb)
        return 1;
 }
 
-
 /**
  *     dev_open        - prepare an interface for use.
  *     @dev:   device to open
@@ -1031,23 +1022,12 @@ void net_disable_timestamp(void)
        atomic_dec(&netstamp_needed);
 }
 
-void __net_timestamp(struct sk_buff *skb)
-{
-       struct timeval tv;
-
-       do_gettimeofday(&tv);
-       skb_set_timestamp(skb, &tv);
-}
-EXPORT_SYMBOL(__net_timestamp);
-
 static inline void net_timestamp(struct sk_buff *skb)
 {
        if (atomic_read(&netstamp_needed))
                __net_timestamp(skb);
-       else {
-               skb->tstamp.off_sec = 0;
-               skb->tstamp.off_usec = 0;
-       }
+       else
+               skb->tstamp.tv64 = 0;
 }
 
 /*
@@ -1077,18 +1057,18 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
                           set by sender, so that the second statement is
                           just protection against buggy protocols.
                         */
-                       skb2->mac.raw = skb2->data;
+                       skb_reset_mac_header(skb2);
 
-                       if (skb2->nh.raw < skb2->data ||
-                           skb2->nh.raw > skb2->tail) {
+                       if (skb_network_header(skb2) < skb2->data ||
+                           skb2->network_header > skb2->tail) {
                                if (net_ratelimit())
                                        printk(KERN_CRIT "protocol %04x is "
                                               "buggy, dev %s\n",
                                               skb2->protocol, dev->name);
-                               skb2->nh.raw = skb2->data;
+                               skb_reset_network_header(skb2);
                        }
 
-                       skb2->h.raw = skb2->nh.raw;
+                       skb2->transport_header = skb2->network_header;
                        skb2->pkt_type = PACKET_OUTGOING;
                        ptype->func(skb2, skb->dev, ptype, skb->dev);
                }
@@ -1167,7 +1147,7 @@ EXPORT_SYMBOL(netif_device_attach);
 int skb_checksum_help(struct sk_buff *skb)
 {
        __wsum csum;
-       int ret = 0, offset = skb->h.raw - skb->data;
+       int ret = 0, offset;
 
        if (skb->ip_summed == CHECKSUM_COMPLETE)
                goto out_set_summed;
@@ -1183,15 +1163,16 @@ int skb_checksum_help(struct sk_buff *skb)
                        goto out;
        }
 
+       offset = skb->csum_start - skb_headroom(skb);
        BUG_ON(offset > (int)skb->len);
        csum = skb_checksum(skb, offset, skb->len-offset, 0);
 
-       offset = skb->tail - skb->h.raw;
+       offset = skb_headlen(skb) - offset;
        BUG_ON(offset <= 0);
        BUG_ON(skb->csum_offset + 2 > offset);
 
-       *(__sum16*)(skb->h.raw + skb->csum_offset) = csum_fold(csum);
-
+       *(__sum16 *)(skb->head + skb->csum_start + skb->csum_offset) =
+               csum_fold(csum);
 out_set_summed:
        skb->ip_summed = CHECKSUM_NONE;
 out:
@@ -1217,11 +1198,11 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
 
        BUG_ON(skb_shinfo(skb)->frag_list);
 
-       skb->mac.raw = skb->data;
-       skb->mac_len = skb->nh.raw - skb->data;
+       skb_reset_mac_header(skb);
+       skb->mac_len = skb->network_header - skb->mac_header;
        __skb_pull(skb, skb->mac_len);
 
-       if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
+       if (WARN_ON(skb->ip_summed != CHECKSUM_PARTIAL)) {
                if (skb_header_cloned(skb) &&
                    (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
                        return ERR_PTR(err);
@@ -1235,7 +1216,8 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
                                segs = ERR_PTR(err);
                                if (err || skb_gso_ok(skb, features))
                                        break;
-                               __skb_push(skb, skb->data - skb->nh.raw);
+                               __skb_push(skb, (skb->data -
+                                                skb_network_header(skb)));
                        }
                        segs = ptype->gso_segment(skb, features);
                        break;
@@ -1243,7 +1225,7 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
        }
        rcu_read_unlock();
 
-       __skb_push(skb, skb->data - skb->mac.raw);
+       __skb_push(skb, skb->data - skb_mac_header(skb));
 
        return segs;
 }
@@ -1340,7 +1322,7 @@ static int dev_gso_segment(struct sk_buff *skb)
 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        if (likely(!skb->next)) {
-               if (netdev_nit)
+               if (!list_empty(&ptype_all))
                        dev_queue_xmit_nit(skb, dev);
 
                if (netif_needs_gso(dev, skb)) {
@@ -1442,12 +1424,16 @@ int dev_queue_xmit(struct sk_buff *skb)
        /* If packet is not checksummed and device does not support
         * checksumming for this protocol, complete checksumming here.
         */
-       if (skb->ip_summed == CHECKSUM_PARTIAL &&
-           (!(dev->features & NETIF_F_GEN_CSUM) &&
-            (!(dev->features & NETIF_F_IP_CSUM) ||
-             skb->protocol != htons(ETH_P_IP))))
-               if (skb_checksum_help(skb))
-                       goto out_kfree_skb;
+       if (skb->ip_summed == CHECKSUM_PARTIAL) {
+               skb_set_transport_header(skb, skb->csum_start -
+                                             skb_headroom(skb));
+
+               if (!(dev->features & NETIF_F_GEN_CSUM) &&
+                   (!(dev->features & NETIF_F_IP_CSUM) ||
+                    skb->protocol != htons(ETH_P_IP)))
+                       if (skb_checksum_help(skb))
+                               goto out_kfree_skb;
+       }
 
 gso:
        spin_lock_prefetch(&dev->queue_lock);
@@ -1543,9 +1529,9 @@ out:
                        Receiver routines
   =======================================================================*/
 
-int netdev_max_backlog = 1000;
-int netdev_budget = 300;
-int weight_p = 64;            /* old backlog weight */
+int netdev_max_backlog __read_mostly = 1000;
+int netdev_budget __read_mostly = 300;
+int weight_p __read_mostly = 64;            /* old backlog weight */
 
 DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
 
@@ -1577,7 +1563,7 @@ int netif_rx(struct sk_buff *skb)
        if (netpoll_rx(skb))
                return NET_RX_DROP;
 
-       if (!skb->tstamp.off_sec)
+       if (!skb->tstamp.tv64)
                net_timestamp(skb);
 
        /*
@@ -1684,40 +1670,46 @@ static void net_tx_action(struct softirq_action *h)
        }
 }
 
-static __inline__ int deliver_skb(struct sk_buff *skb,
-                                 struct packet_type *pt_prev,
-                                 struct net_device *orig_dev)
+static inline int deliver_skb(struct sk_buff *skb,
+                             struct packet_type *pt_prev,
+                             struct net_device *orig_dev)
 {
        atomic_inc(&skb->users);
        return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
 }
 
 #if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
-int (*br_handle_frame_hook)(struct net_bridge_port *p, struct sk_buff **pskb);
+/* These hooks defined here for ATM */
 struct net_bridge;
 struct net_bridge_fdb_entry *(*br_fdb_get_hook)(struct net_bridge *br,
                                                unsigned char *addr);
-void (*br_fdb_put_hook)(struct net_bridge_fdb_entry *ent);
+void (*br_fdb_put_hook)(struct net_bridge_fdb_entry *ent) __read_mostly;
 
-static __inline__ int handle_bridge(struct sk_buff **pskb,
-                                   struct packet_type **pt_prev, int *ret,
-                                   struct net_device *orig_dev)
+/*
+ * If bridge module is loaded call bridging hook.
+ *  returns NULL if packet was consumed.
+ */
+struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p,
+                                       struct sk_buff *skb) __read_mostly;
+static inline struct sk_buff *handle_bridge(struct sk_buff *skb,
+                                           struct packet_type **pt_prev, int *ret,
+                                           struct net_device *orig_dev)
 {
        struct net_bridge_port *port;
 
-       if ((*pskb)->pkt_type == PACKET_LOOPBACK ||
-           (port = rcu_dereference((*pskb)->dev->br_port)) == NULL)
-               return 0;
+       if (skb->pkt_type == PACKET_LOOPBACK ||
+           (port = rcu_dereference(skb->dev->br_port)) == NULL)
+               return skb;
 
        if (*pt_prev) {
-               *ret = deliver_skb(*pskb, *pt_prev, orig_dev);
+               *ret = deliver_skb(skb, *pt_prev, orig_dev);
                *pt_prev = NULL;
        }
 
-       return br_handle_frame_hook(port, pskb);
+       return br_handle_frame_hook(port, skb);
 }
 #else
-#define handle_bridge(skb, pt_prev, ret, orig_dev)     (0)
+#define handle_bridge(skb, pt_prev, ret, orig_dev)     (skb)
 #endif
 
 #ifdef CONFIG_NET_CLS_ACT
@@ -1747,10 +1739,10 @@ static int ing_filter(struct sk_buff *skb)
 
                skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_INGRESS);
 
-               spin_lock(&dev->queue_lock);
+               spin_lock(&dev->ingress_lock);
                if ((q = dev->qdisc_ingress) != NULL)
                        result = q->enqueue(skb, q);
-               spin_unlock(&dev->queue_lock);
+               spin_unlock(&dev->ingress_lock);
 
        }
 
@@ -1769,7 +1761,7 @@ int netif_receive_skb(struct sk_buff *skb)
        if (skb->dev->poll && netpoll_rx(skb))
                return NET_RX_DROP;
 
-       if (!skb->tstamp.off_sec)
+       if (!skb->tstamp.tv64)
                net_timestamp(skb);
 
        if (!skb->iif)
@@ -1782,8 +1774,9 @@ int netif_receive_skb(struct sk_buff *skb)
 
        __get_cpu_var(netdev_rx_stat).total++;
 
-       skb->h.raw = skb->nh.raw = skb->data;
-       skb->mac_len = skb->nh.raw - skb->mac.raw;
+       skb_reset_network_header(skb);
+       skb_reset_transport_header(skb);
+       skb->mac_len = skb->network_header - skb->mac_header;
 
        pt_prev = NULL;
 
@@ -1823,7 +1816,8 @@ int netif_receive_skb(struct sk_buff *skb)
 ncls:
 #endif
 
-       if (handle_bridge(&skb, &pt_prev, &ret, orig_dev))
+       skb = handle_bridge(skb, &pt_prev, &ret, orig_dev);
+       if (!skb)
                goto out;
 
        type = skb->protocol;
@@ -2076,7 +2070,7 @@ static int dev_ifconf(char __user *arg)
  *     This is invoked by the /proc filesystem handler to display a device
  *     in detail.
  */
-static __inline__ struct net_device *dev_get_idx(loff_t pos)
+static struct net_device *dev_get_idx(loff_t pos)
 {
        struct net_device *dev;
        loff_t i;
@@ -2105,9 +2099,9 @@ void dev_seq_stop(struct seq_file *seq, void *v)
 
 static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
 {
-       if (dev->get_stats) {
-               struct net_device_stats *stats = dev->get_stats(dev);
+       struct net_device_stats *stats = dev->get_stats(dev);
 
+       if (stats) {
                seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
                                "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
                           dev->name, stats->rx_bytes, stats->rx_packets,
@@ -2185,7 +2179,7 @@ static int softnet_seq_show(struct seq_file *seq, void *v)
        return 0;
 }
 
-static struct seq_operations dev_seq_ops = {
+static const struct seq_operations dev_seq_ops = {
        .start = dev_seq_start,
        .next  = dev_seq_next,
        .stop  = dev_seq_stop,
@@ -2205,7 +2199,7 @@ static const struct file_operations dev_seq_fops = {
        .release = seq_release,
 };
 
-static struct seq_operations softnet_seq_ops = {
+static const struct seq_operations softnet_seq_ops = {
        .start = softnet_seq_start,
        .next  = softnet_seq_next,
        .stop  = softnet_seq_stop,
@@ -2225,12 +2219,135 @@ static const struct file_operations softnet_seq_fops = {
        .release = seq_release,
 };
 
-#ifdef CONFIG_WIRELESS_EXT
-extern int wireless_proc_init(void);
-#else
-#define wireless_proc_init() 0
+static void *ptype_get_idx(loff_t pos)
+{
+       struct packet_type *pt = NULL;
+       loff_t i = 0;
+       int t;
+
+       list_for_each_entry_rcu(pt, &ptype_all, list) {
+               if (i == pos)
+                       return pt;
+               ++i;
+       }
+
+       for (t = 0; t < 16; t++) {
+               list_for_each_entry_rcu(pt, &ptype_base[t], list) {
+                       if (i == pos)
+                               return pt;
+                       ++i;
+               }
+       }
+       return NULL;
+}
+
+static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
+{
+       rcu_read_lock();
+       return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
+}
+
+static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+       struct packet_type *pt;
+       struct list_head *nxt;
+       int hash;
+
+       ++*pos;
+       if (v == SEQ_START_TOKEN)
+               return ptype_get_idx(0);
+
+       pt = v;
+       nxt = pt->list.next;
+       if (pt->type == htons(ETH_P_ALL)) {
+               if (nxt != &ptype_all)
+                       goto found;
+               hash = 0;
+               nxt = ptype_base[0].next;
+       } else
+               hash = ntohs(pt->type) & 15;
+
+       while (nxt == &ptype_base[hash]) {
+               if (++hash >= 16)
+                       return NULL;
+               nxt = ptype_base[hash].next;
+       }
+found:
+       return list_entry(nxt, struct packet_type, list);
+}
+
+static void ptype_seq_stop(struct seq_file *seq, void *v)
+{
+       rcu_read_unlock();
+}
+
+static void ptype_seq_decode(struct seq_file *seq, void *sym)
+{
+#ifdef CONFIG_KALLSYMS
+       unsigned long offset = 0, symsize;
+       const char *symname;
+       char *modname;
+       char namebuf[128];
+
+       symname = kallsyms_lookup((unsigned long)sym, &symsize, &offset,
+                                 &modname, namebuf);
+
+       if (symname) {
+               char *delim = ":";
+
+               if (!modname)
+                       modname = delim = "";
+               seq_printf(seq, "%s%s%s%s+0x%lx", delim, modname, delim,
+                          symname, offset);
+               return;
+       }
 #endif
 
+       seq_printf(seq, "[%p]", sym);
+}
+
+static int ptype_seq_show(struct seq_file *seq, void *v)
+{
+       struct packet_type *pt = v;
+
+       if (v == SEQ_START_TOKEN)
+               seq_puts(seq, "Type Device      Function\n");
+       else {
+               if (pt->type == htons(ETH_P_ALL))
+                       seq_puts(seq, "ALL ");
+               else
+                       seq_printf(seq, "%04x", ntohs(pt->type));
+
+               seq_printf(seq, " %-8s ",
+                          pt->dev ? pt->dev->name : "");
+               ptype_seq_decode(seq,  pt->func);
+               seq_putc(seq, '\n');
+       }
+
+       return 0;
+}
+
+static const struct seq_operations ptype_seq_ops = {
+       .start = ptype_seq_start,
+       .next  = ptype_seq_next,
+       .stop  = ptype_seq_stop,
+       .show  = ptype_seq_show,
+};
+
+static int ptype_seq_open(struct inode *inode, struct file *file)
+{
+       return seq_open(file, &ptype_seq_ops);
+}
+
+static const struct file_operations ptype_seq_fops = {
+       .owner   = THIS_MODULE,
+       .open    = ptype_seq_open,
+       .read    = seq_read,
+       .llseek  = seq_lseek,
+       .release = seq_release,
+};
+
+
 static int __init dev_proc_init(void)
 {
        int rc = -ENOMEM;
@@ -2239,13 +2356,18 @@ static int __init dev_proc_init(void)
                goto out;
        if (!proc_net_fops_create("softnet_stat", S_IRUGO, &softnet_seq_fops))
                goto out_dev;
-       if (wireless_proc_init())
+       if (!proc_net_fops_create("ptype", S_IRUGO, &ptype_seq_fops))
+               goto out_dev2;
+
+       if (wext_proc_init())
                goto out_softnet;
        rc = 0;
 out:
        return rc;
 out_softnet:
        proc_net_remove("softnet_stat");
+out_dev2:
+       proc_net_remove("ptype");
 out_dev:
        proc_net_remove("dev");
        goto out;
@@ -2795,29 +2917,9 @@ int dev_ioctl(unsigned int cmd, void __user *arg)
                                        ret = -EFAULT;
                                return ret;
                        }
-#ifdef CONFIG_WIRELESS_EXT
                        /* Take care of Wireless Extensions */
-                       if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) {
-                               /* If command is `set a parameter', or
-                                * `get the encoding parameters', check if
-                                * the user has the right to do it */
-                               if (IW_IS_SET(cmd) || cmd == SIOCGIWENCODE
-                                   || cmd == SIOCGIWENCODEEXT) {
-                                       if (!capable(CAP_NET_ADMIN))
-                                               return -EPERM;
-                               }
-                               dev_load(ifr.ifr_name);
-                               rtnl_lock();
-                               /* Follow me in net/core/wireless.c */
-                               ret = wireless_process_ioctl(&ifr, cmd);
-                               rtnl_unlock();
-                               if (IW_IS_GET(cmd) &&
-                                   copy_to_user(arg, &ifr,
-                                                sizeof(struct ifreq)))
-                                       ret = -EFAULT;
-                               return ret;
-                       }
-#endif /* CONFIG_WIRELESS_EXT */
+                       if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
+                               return wext_handle_ioctl(&ifr, cmd, arg);
                        return -EINVAL;
        }
 }
@@ -2847,7 +2949,7 @@ static int dev_boot_phase = 1;
 static DEFINE_SPINLOCK(net_todo_list_lock);
 static struct list_head net_todo_list = LIST_HEAD_INIT(net_todo_list);
 
-static inline void net_set_todo(struct net_device *dev)
+static void net_set_todo(struct net_device *dev)
 {
        spin_lock(&net_todo_list_lock);
        list_add_tail(&dev->todo_list, &net_todo_list);
@@ -2888,9 +2990,7 @@ int register_netdevice(struct net_device *dev)
        spin_lock_init(&dev->queue_lock);
        spin_lock_init(&dev->_xmit_lock);
        dev->xmit_lock_owner = -1;
-#ifdef CONFIG_NET_CLS_ACT
        spin_lock_init(&dev->ingress_lock);
-#endif
 
        dev->iflink = -1;
 
@@ -3002,7 +3102,7 @@ out:
  *     chain. 0 is returned on success. A negative errno code is returned
  *     on a failure to set up the device, or if the name is a duplicate.
  *
- *     This is a wrapper around register_netdev that takes the rtnl semaphore
+ *     This is a wrapper around register_netdevice that takes the rtnl semaphore
  *     and expands the device name if you passed a format string to
  *     alloc_netdev.
  */
@@ -3157,6 +3257,13 @@ out:
        mutex_unlock(&net_todo_run_mutex);
 }
 
+static struct net_device_stats *maybe_internal_stats(struct net_device *dev)
+{
+       if (dev->features & NETIF_F_INTERNAL_STATS)
+               return &dev->stats;
+       return NULL;
+}
+
 /**
  *     alloc_netdev - allocate network device
  *     @sizeof_priv:   size of private data to allocate space for
@@ -3192,6 +3299,7 @@ struct net_device *alloc_netdev(int sizeof_priv, const char *name,
        if (sizeof_priv)
                dev->priv = netdev_priv(dev);
 
+       dev->get_stats = maybe_internal_stats;
        setup(dev);
        strcpy(dev->name, name);
        return dev;
index 56b310c..7d57bf7 100644 (file)
@@ -264,7 +264,7 @@ static int dev_mc_seq_show(struct seq_file *seq, void *v)
        return 0;
 }
 
-static struct seq_operations dev_mc_seq_ops = {
+static const struct seq_operations dev_mc_seq_ops = {
        .start = dev_mc_seq_start,
        .next  = dev_mc_seq_next,
        .stop  = dev_mc_seq_stop,
index 6168edd..8d5e5a0 100644 (file)
@@ -836,7 +836,7 @@ int dev_ethtool(struct ifreq *ifr)
                        return -EPERM;
        }
 
-       if(dev->ethtool_ops->begin)
+       if (dev->ethtool_ops->begin)
                if ((rc = dev->ethtool_ops->begin(dev)) < 0)
                        return rc;
 
@@ -952,7 +952,7 @@ int dev_ethtool(struct ifreq *ifr)
                rc =  -EOPNOTSUPP;
        }
 
-       if(dev->ethtool_ops->complete)
+       if (dev->ethtool_ops->complete)
                dev->ethtool_ops->complete(dev);
 
        if (old_features != dev->features)
index 7174ced..8c5474e 100644 (file)
@@ -44,6 +44,12 @@ static void rules_ops_put(struct fib_rules_ops *ops)
                module_put(ops->owner);
 }
 
+static void flush_route_cache(struct fib_rules_ops *ops)
+{
+       if (ops->flush_cache)
+               ops->flush_cache();
+}
+
 int fib_rules_register(struct fib_rules_ops *ops)
 {
        int err = -EEXIST;
@@ -132,10 +138,25 @@ int fib_rules_lookup(struct fib_rules_ops *ops, struct flowi *fl,
        rcu_read_lock();
 
        list_for_each_entry_rcu(rule, ops->rules_list, list) {
+jumped:
                if (!fib_rule_match(rule, ops, fl, flags))
                        continue;
 
-               err = ops->action(rule, fl, flags, arg);
+               if (rule->action == FR_ACT_GOTO) {
+                       struct fib_rule *target;
+
+                       target = rcu_dereference(rule->ctarget);
+                       if (target == NULL) {
+                               continue;
+                       } else {
+                               rule = target;
+                               goto jumped;
+                       }
+               } else if (rule->action == FR_ACT_NOP)
+                       continue;
+               else
+                       err = ops->action(rule, fl, flags, arg);
+
                if (err != -EAGAIN) {
                        fib_rule_get(rule);
                        arg->rule = rule;
@@ -174,13 +195,13 @@ errout:
        return err;
 }
 
-int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
+static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
 {
        struct fib_rule_hdr *frh = nlmsg_data(nlh);
        struct fib_rules_ops *ops = NULL;
        struct fib_rule *rule, *r, *last = NULL;
        struct nlattr *tb[FRA_MAX+1];
-       int err = -EINVAL;
+       int err = -EINVAL, unresolved = 0;
 
        if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh)))
                goto errout;
@@ -237,6 +258,28 @@ int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
        if (!rule->pref && ops->default_pref)
                rule->pref = ops->default_pref();
 
+       err = -EINVAL;
+       if (tb[FRA_GOTO]) {
+               if (rule->action != FR_ACT_GOTO)
+                       goto errout_free;
+
+               rule->target = nla_get_u32(tb[FRA_GOTO]);
+               /* Backward jumps are prohibited to avoid endless loops */
+               if (rule->target <= rule->pref)
+                       goto errout_free;
+
+               list_for_each_entry(r, ops->rules_list, list) {
+                       if (r->pref == rule->target) {
+                               rule->ctarget = r;
+                               break;
+                       }
+               }
+
+               if (rule->ctarget == NULL)
+                       unresolved = 1;
+       } else if (rule->action == FR_ACT_GOTO)
+               goto errout_free;
+
        err = ops->configure(rule, skb, nlh, frh, tb);
        if (err < 0)
                goto errout_free;
@@ -249,12 +292,35 @@ int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
 
        fib_rule_get(rule);
 
+       if (ops->unresolved_rules) {
+               /*
+                * There are unresolved goto rules in the list, check if
+                * any of them are pointing to this new rule.
+                */
+               list_for_each_entry(r, ops->rules_list, list) {
+                       if (r->action == FR_ACT_GOTO &&
+                           r->target == rule->pref) {
+                               BUG_ON(r->ctarget != NULL);
+                               rcu_assign_pointer(r->ctarget, rule);
+                               if (--ops->unresolved_rules == 0)
+                                       break;
+                       }
+               }
+       }
+
+       if (rule->action == FR_ACT_GOTO)
+               ops->nr_goto_rules++;
+
+       if (unresolved)
+               ops->unresolved_rules++;
+
        if (last)
                list_add_rcu(&rule->list, &last->list);
        else
                list_add_rcu(&rule->list, ops->rules_list);
 
        notify_rule_change(RTM_NEWRULE, rule, ops, nlh, NETLINK_CB(skb).pid);
+       flush_route_cache(ops);
        rules_ops_put(ops);
        return 0;
 
@@ -265,11 +331,11 @@ errout:
        return err;
 }
 
-int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
+static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
 {
        struct fib_rule_hdr *frh = nlmsg_data(nlh);
        struct fib_rules_ops *ops = NULL;
-       struct fib_rule *rule;
+       struct fib_rule *rule, *tmp;
        struct nlattr *tb[FRA_MAX+1];
        int err = -EINVAL;
 
@@ -322,10 +388,30 @@ int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
                }
 
                list_del_rcu(&rule->list);
+
+               if (rule->action == FR_ACT_GOTO)
+                       ops->nr_goto_rules--;
+
+               /*
+                * Check if this rule is a target to any of them. If so,
+                * disable them. As this operation is eventually very
+                * expensive, it is only performed if goto rules have
+                * actually been added.
+                */
+               if (ops->nr_goto_rules > 0) {
+                       list_for_each_entry(tmp, ops->rules_list, list) {
+                               if (tmp->ctarget == rule) {
+                                       rcu_assign_pointer(tmp->ctarget, NULL);
+                                       ops->unresolved_rules++;
+                               }
+                       }
+               }
+
                synchronize_rcu();
                notify_rule_change(RTM_DELRULE, rule, ops, nlh,
                                   NETLINK_CB(skb).pid);
                fib_rule_put(rule);
+               flush_route_cache(ops);
                rules_ops_put(ops);
                return 0;
        }
@@ -371,9 +457,16 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
        frh->action = rule->action;
        frh->flags = rule->flags;
 
-       if (rule->ifname[0])
+       if (rule->action == FR_ACT_GOTO && rule->ctarget == NULL)
+               frh->flags |= FIB_RULE_UNRESOLVED;
+
+       if (rule->ifname[0]) {
                NLA_PUT_STRING(skb, FRA_IFNAME, rule->ifname);
 
+               if (rule->ifindex == -1)
+                       frh->flags |= FIB_RULE_DEV_DETACHED;
+       }
+
        if (rule->pref)
                NLA_PUT_U32(skb, FRA_PRIORITY, rule->pref);
 
@@ -383,6 +476,9 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
        if (rule->mark_mask || rule->mark)
                NLA_PUT_U32(skb, FRA_FWMASK, rule->mark_mask);
 
+       if (rule->target)
+               NLA_PUT_U32(skb, FRA_GOTO, rule->target);
+
        if (ops->fill(rule, skb, nlh, frh) < 0)
                goto nla_put_failure;
 
@@ -393,19 +489,14 @@ nla_put_failure:
        return -EMSGSIZE;
 }
 
-int fib_rules_dump(struct sk_buff *skb, struct netlink_callback *cb, int family)
+static int dump_rules(struct sk_buff *skb, struct netlink_callback *cb,
+                     struct fib_rules_ops *ops)
 {
        int idx = 0;
        struct fib_rule *rule;
-       struct fib_rules_ops *ops;
-
-       ops = lookup_rules_ops(family);
-       if (ops == NULL)
-               return -EAFNOSUPPORT;
 
-       rcu_read_lock();
-       list_for_each_entry_rcu(rule, ops->rules_list, list) {
-               if (idx < cb->args[0])
+       list_for_each_entry(rule, ops->rules_list, list) {
+               if (idx < cb->args[1])
                        goto skip;
 
                if (fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).pid,
@@ -415,14 +506,44 @@ int fib_rules_dump(struct sk_buff *skb, struct netlink_callback *cb, int family)
 skip:
                idx++;
        }
-       rcu_read_unlock();
-       cb->args[0] = idx;
+       cb->args[1] = idx;
        rules_ops_put(ops);
 
        return skb->len;
 }
 
-EXPORT_SYMBOL_GPL(fib_rules_dump);
+static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb)
+{
+       struct fib_rules_ops *ops;
+       int idx = 0, family;
+
+       family = rtnl_msg_family(cb->nlh);
+       if (family != AF_UNSPEC) {
+               /* Protocol specific dump request */
+               ops = lookup_rules_ops(family);
+               if (ops == NULL)
+                       return -EAFNOSUPPORT;
+
+               return dump_rules(skb, cb, ops);
+       }
+
+       rcu_read_lock();
+       list_for_each_entry_rcu(ops, &rules_ops, list) {
+               if (idx < cb->args[0] || !try_module_get(ops->owner))
+                       goto skip;
+
+               if (dump_rules(skb, cb, ops) < 0)
+                       break;
+
+               cb->args[1] = 0;
+       skip:
+               idx++;
+       }
+       rcu_read_unlock();
+       cb->args[0] = idx;
+
+       return skb->len;
+}
 
 static void notify_rule_change(int event, struct fib_rule *rule,
                               struct fib_rules_ops *ops, struct nlmsghdr *nlh,
@@ -501,6 +622,10 @@ static struct notifier_block fib_rules_notifier = {
 
 static int __init fib_rules_init(void)
 {
+       rtnl_register(PF_UNSPEC, RTM_NEWRULE, fib_nl_newrule, NULL);
+       rtnl_register(PF_UNSPEC, RTM_DELRULE, fib_nl_delrule, NULL);
+       rtnl_register(PF_UNSPEC, RTM_GETRULE, NULL, fib_nl_dumprule);
+
        return register_netdevice_notifier(&fib_rules_notifier);
 }
 
index 8d185a0..bd903aa 100644 (file)
@@ -42,11 +42,11 @@ static void *__load_pointer(struct sk_buff *skb, int k)
        u8 *ptr = NULL;
 
        if (k >= SKF_NET_OFF)
-               ptr = skb->nh.raw + k - SKF_NET_OFF;
+               ptr = skb_network_header(skb) + k - SKF_NET_OFF;
        else if (k >= SKF_LL_OFF)
-               ptr = skb->mac.raw + k - SKF_LL_OFF;
+               ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
 
-       if (ptr >= skb->head && ptr < skb->tail)
+       if (ptr >= skb->head && ptr < skb_tail_pointer(skb))
                return ptr;
        return NULL;
 }
index 259473d..bcc2559 100644 (file)
@@ -61,7 +61,7 @@ gnet_stats_start_copy_compat(struct sk_buff *skb, int type, int tc_stats_type,
        spin_lock_bh(lock);
        d->lock = lock;
        if (type)
-               d->tail = (struct rtattr *) skb->tail;
+               d->tail = (struct rtattr *)skb_tail_pointer(skb);
        d->skb = skb;
        d->compat_tc_stats = tc_stats_type;
        d->compat_xstats = xstats_type;
@@ -212,7 +212,7 @@ int
 gnet_stats_finish_copy(struct gnet_dump *d)
 {
        if (d->tail)
-               d->tail->rta_len = d->skb->tail - (u8 *) d->tail;
+               d->tail->rta_len = skb_tail_pointer(d->skb) - (u8 *)d->tail;
 
        if (d->compat_tc_stats)
                if (gnet_stats_copy(d, d->compat_tc_stats, &d->tc_stats,
index 8b45c9d..e3c26a9 100644 (file)
@@ -79,7 +79,7 @@ static void rfc2863_policy(struct net_device *dev)
        case IF_LINK_MODE_DEFAULT:
        default:
                break;
-       };
+       }
 
        dev->operstate = operstate;
 
index 841e3f3..6f3bb73 100644 (file)
@@ -1125,7 +1125,7 @@ int neigh_compat_output(struct sk_buff *skb)
 {
        struct net_device *dev = skb->dev;
 
-       __skb_pull(skb, skb->nh.raw - skb->data);
+       __skb_pull(skb, skb_network_offset(skb));
 
        if (dev->hard_header &&
            dev->hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL,
@@ -1147,7 +1147,7 @@ int neigh_resolve_output(struct sk_buff *skb)
        if (!dst || !(neigh = dst->neighbour))
                goto discard;
 
-       __skb_pull(skb, skb->nh.raw - skb->data);
+       __skb_pull(skb, skb_network_offset(skb));
 
        if (!neigh_event_send(neigh, skb)) {
                int err;
@@ -1190,7 +1190,7 @@ int neigh_connected_output(struct sk_buff *skb)
        struct neighbour *neigh = dst->neighbour;
        struct net_device *dev = neigh->dev;
 
-       __skb_pull(skb, skb->nh.raw - skb->data);
+       __skb_pull(skb, skb_network_offset(skb));
 
        read_lock_bh(&neigh->lock);
        err = dev->hard_header(skb, dev, ntohs(skb->protocol),
@@ -1441,7 +1441,7 @@ int neigh_table_clear(struct neigh_table *tbl)
        return 0;
 }
 
-int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
+static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
 {
        struct ndmsg *ndm;
        struct nlattr *dst_attr;
@@ -1506,7 +1506,7 @@ out:
        return err;
 }
 
-int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
+static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
 {
        struct ndmsg *ndm;
        struct nlattr *tb[NDA_MAX+1];
@@ -1786,7 +1786,7 @@ static struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] __read_mostly = {
        [NDTPA_LOCKTIME]                = { .type = NLA_U64 },
 };
 
-int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
+static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
 {
        struct neigh_table *tbl;
        struct ndtmsg *ndtmsg;
@@ -1910,7 +1910,7 @@ errout:
        return err;
 }
 
-int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
+static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
 {
        int family, tidx, nidx = 0;
        int tbl_skip = cb->args[0];
@@ -2034,7 +2034,7 @@ out:
        return rc;
 }
 
-int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
+static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
 {
        struct neigh_table *tbl;
        int t, family, s_t;
@@ -2393,7 +2393,7 @@ static int neigh_stat_seq_show(struct seq_file *seq, void *v)
        return 0;
 }
 
-static struct seq_operations neigh_stat_seq_ops = {
+static const struct seq_operations neigh_stat_seq_ops = {
        .start  = neigh_stat_seq_start,
        .next   = neigh_stat_seq_next,
        .stop   = neigh_stat_seq_stop,
@@ -2746,14 +2746,26 @@ void neigh_sysctl_unregister(struct neigh_parms *p)
 
 #endif /* CONFIG_SYSCTL */
 
+static int __init neigh_init(void)
+{
+       rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL);
+       rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL);
+       rtnl_register(PF_UNSPEC, RTM_GETNEIGH, NULL, neigh_dump_info);
+
+       rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info);
+       rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL);
+
+       return 0;
+}
+
+subsys_initcall(neigh_init);
+
 EXPORT_SYMBOL(__neigh_event_send);
 EXPORT_SYMBOL(neigh_changeaddr);
 EXPORT_SYMBOL(neigh_compat_output);
 EXPORT_SYMBOL(neigh_connected_output);
 EXPORT_SYMBOL(neigh_create);
-EXPORT_SYMBOL(neigh_delete);
 EXPORT_SYMBOL(neigh_destroy);
-EXPORT_SYMBOL(neigh_dump_info);
 EXPORT_SYMBOL(neigh_event_ns);
 EXPORT_SYMBOL(neigh_ifdown);
 EXPORT_SYMBOL(neigh_lookup);
index 4cbb129..221a64a 100644 (file)
@@ -352,8 +352,8 @@ static ssize_t wireless_show(struct device *d, char *buf,
 
        read_lock(&dev_base_lock);
        if (dev_isalive(dev)) {
-               if(dev->wireless_handlers &&
-                  dev->wireless_handlers->get_wireless_stats)
+               if (dev->wireless_handlers &&
+                   dev->wireless_handlers->get_wireless_stats)
                        iw = dev->wireless_handlers->get_wireless_stats(dev);
                if (iw != NULL)
                        ret = (*format)(iw, buf);
index 4581ece..b316435 100644 (file)
@@ -86,7 +86,7 @@ static __sum16 checksum_udp(struct sk_buff *skb, struct udphdr *uh,
 {
        __wsum psum;
 
-       if (uh->check == 0 || skb->ip_summed == CHECKSUM_UNNECESSARY)
+       if (uh->check == 0 || skb_csum_unnecessary(skb))
                return 0;
 
        psum = csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0);
@@ -293,10 +293,12 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
        if (!skb)
                return;
 
-       memcpy(skb->data, msg, len);
+       skb_copy_to_linear_data(skb, msg, len);
        skb->len += len;
 
-       skb->h.uh = udph = (struct udphdr *) skb_push(skb, sizeof(*udph));
+       skb_push(skb, sizeof(*udph));
+       skb_reset_transport_header(skb);
+       udph = udp_hdr(skb);
        udph->source = htons(np->local_port);
        udph->dest = htons(np->remote_port);
        udph->len = htons(udp_len);
@@ -308,7 +310,9 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
        if (udph->check == 0)
                udph->check = CSUM_MANGLED_0;
 
-       skb->nh.iph = iph = (struct iphdr *)skb_push(skb, sizeof(*iph));
+       skb_push(skb, sizeof(*iph));
+       skb_reset_network_header(skb);
+       iph = ip_hdr(skb);
 
        /* iph->version = 4; iph->ihl = 5; */
        put_unaligned(0x45, (unsigned char *)iph);
@@ -324,7 +328,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
        iph->check    = ip_fast_csum((unsigned char *)iph, iph->ihl);
 
        eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
-       skb->mac.raw = skb->data;
+       skb_reset_mac_header(skb);
        skb->protocol = eth->h_proto = htons(ETH_P_IP);
        memcpy(eth->h_source, np->local_mac, 6);
        memcpy(eth->h_dest, np->remote_mac, 6);
@@ -359,8 +363,9 @@ static void arp_reply(struct sk_buff *skb)
                                 (2 * sizeof(u32)))))
                return;
 
-       skb->h.raw = skb->nh.raw = skb->data;
-       arp = skb->nh.arph;
+       skb_reset_network_header(skb);
+       skb_reset_transport_header(skb);
+       arp = arp_hdr(skb);
 
        if ((arp->ar_hrd != htons(ARPHRD_ETHER) &&
             arp->ar_hrd != htons(ARPHRD_IEEE802)) ||
@@ -389,7 +394,7 @@ static void arp_reply(struct sk_buff *skb)
        if (!send_skb)
                return;
 
-       send_skb->nh.raw = send_skb->data;
+       skb_reset_network_header(send_skb);
        arp = (struct arphdr *) skb_put(send_skb, size);
        send_skb->dev = skb->dev;
        send_skb->protocol = htons(ETH_P_ARP);
@@ -443,7 +448,7 @@ int __netpoll_rx(struct sk_buff *skb)
                goto out;
 
        /* check if netpoll clients need ARP */
-       if (skb->protocol == __constant_htons(ETH_P_ARP) &&
+       if (skb->protocol == htons(ETH_P_ARP) &&
            atomic_read(&trapped)) {
                skb_queue_tail(&npi->arp_tx, skb);
                return 1;
index 4b01496..b92a322 100644 (file)
 
 #define VERSION  "pktgen v2.68: Packet Generator for packet performance testing.\n"
 
-/* #define PG_DEBUG(a) a */
-#define PG_DEBUG(a)
-
 /* The buckets are exponential in 'width' */
 #define LAT_BUCKETS_MAX 32
 #define IP_NAME_SZ 32
 #define MAX_MPLS_LABELS 16 /* This is the max label stack depth */
-#define MPLS_STACK_BOTTOM __constant_htonl(0x00000100)
+#define MPLS_STACK_BOTTOM htonl(0x00000100)
 
 /* Device flag bits */
 #define F_IPSRC_RND   (1<<0)   /* IP-Src Random  */
@@ -214,15 +211,11 @@ struct flow_state {
 };
 
 struct pktgen_dev {
-
        /*
         * Try to keep frequent/infrequent used vars. separated.
         */
-
-       char ifname[IFNAMSIZ];
-       char result[512];
-
-       struct pktgen_thread *pg_thread;        /* the owner */
+       struct proc_dir_entry *entry;   /* proc file */
+       struct pktgen_thread *pg_thread;/* the owner */
        struct list_head list;          /* Used for chaining in the thread's run-queue */
 
        int running;            /* if this changes to false, the test will stop */
@@ -349,6 +342,8 @@ struct pktgen_dev {
        unsigned cflows;        /* Concurrent flows (config) */
        unsigned lflow;         /* Flow length  (config) */
        unsigned nflows;        /* accumulated flows (stats) */
+
+       char result[512];
 };
 
 struct pktgen_hdr {
@@ -468,17 +463,6 @@ static inline __u64 pg_div64(__u64 n, __u64 base)
        return tmp;
 }
 
-static inline u32 pktgen_random(void)
-{
-#if 0
-       __u32 n;
-       get_random_bytes(&n, 4);
-       return n;
-#else
-       return net_random();
-#endif
-}
-
 static inline __u64 getCurMs(void)
 {
        struct timeval tv;
@@ -512,7 +496,7 @@ static void pktgen_stop_all_threads_ifs(void);
 static int pktgen_stop_device(struct pktgen_dev *pkt_dev);
 static void pktgen_stop(struct pktgen_thread *t);
 static void pktgen_clear_counters(struct pktgen_dev *pkt_dev);
-static int pktgen_mark_device(const char *ifname);
+
 static unsigned int scan_ip6(const char *s, char ip[16]);
 static unsigned int fmt_ip6(char *s, const char ip[16]);
 
@@ -606,7 +590,7 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
                   "     frags: %d  delay: %u  clone_skb: %d  ifname: %s\n",
                   pkt_dev->nfrags,
                   1000 * pkt_dev->delay_us + pkt_dev->delay_ns,
-                  pkt_dev->clone_skb, pkt_dev->ifname);
+                  pkt_dev->clone_skb, pkt_dev->odev->name);
 
        seq_printf(seq, "     flows: %u flowlen: %u\n", pkt_dev->cflows,
                   pkt_dev->lflow);
@@ -661,7 +645,7 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
        if (pkt_dev->nr_labels) {
                unsigned i;
                seq_printf(seq, "     mpls: ");
-               for(i = 0; i < pkt_dev->nr_labels; i++)
+               for (i = 0; i < pkt_dev->nr_labels; i++)
                        seq_printf(seq, "%08x%s", ntohl(pkt_dev->labels[i]),
                                   i == pkt_dev->nr_labels-1 ? "\n" : ", ");
        }
@@ -766,7 +750,7 @@ static int hex32_arg(const char __user *user_buffer, unsigned long maxlen, __u32
        int i = 0;
        *num = 0;
 
-       for(; i < maxlen; i++) {
+       for (; i < maxlen; i++) {
                char c;
                *num <<= 4;
                if (get_user(c, &user_buffer[i]))
@@ -802,7 +786,7 @@ static int count_trail_chars(const char __user * user_buffer,
                        break;
                default:
                        goto done;
-               };
+               }
        }
 done:
        return i;
@@ -845,7 +829,7 @@ static int strn_len(const char __user * user_buffer, unsigned int maxlen)
                        break;
                default:
                        break;
-               };
+               }
        }
 done_str:
        return i;
@@ -874,7 +858,7 @@ static ssize_t get_labels(const char __user *buffer, struct pktgen_dev *pkt_dev)
                n++;
                if (n >= MAX_MPLS_LABELS)
                        return -E2BIG;
-       } while(c == ',');
+       } while (c == ',');
 
        pkt_dev->nr_labels = n;
        return i;
@@ -1503,7 +1487,7 @@ static ssize_t pktgen_if_write(struct file *file,
                if (len < 0) { return len; }
                i += len;
                offset = sprintf(pg_result, "OK: mpls=");
-               for(n = 0; n < pkt_dev->nr_labels; n++)
+               for (n = 0; n < pkt_dev->nr_labels; n++)
                        offset += sprintf(pg_result + offset,
                                          "%08x%s", ntohl(pkt_dev->labels[n]),
                                          n == pkt_dev->nr_labels-1 ? "" : ",");
@@ -1697,13 +1681,13 @@ static int pktgen_thread_show(struct seq_file *seq, void *v)
        if_lock(t);
        list_for_each_entry(pkt_dev, &t->if_list, list)
                if (pkt_dev->running)
-                       seq_printf(seq, "%s ", pkt_dev->ifname);
+                       seq_printf(seq, "%s ", pkt_dev->odev->name);
 
        seq_printf(seq, "\nStopped: ");
 
        list_for_each_entry(pkt_dev, &t->if_list, list)
                if (!pkt_dev->running)
-                       seq_printf(seq, "%s ", pkt_dev->ifname);
+                       seq_printf(seq, "%s ", pkt_dev->odev->name);
 
        if (t->result[0])
                seq_printf(seq, "\nResult: %s\n", t->result);
@@ -1849,16 +1833,14 @@ static struct pktgen_dev *__pktgen_NN_threads(const char *ifname, int remove)
 /*
  * mark a device for removal
  */
-static int pktgen_mark_device(const char *ifname)
+static void pktgen_mark_device(const char *ifname)
 {
        struct pktgen_dev *pkt_dev = NULL;
        const int max_tries = 10, msec_per_try = 125;
        int i = 0;
-       int ret = 0;
 
        mutex_lock(&pktgen_thread_lock);
-       PG_DEBUG(printk("pktgen: pktgen_mark_device marking %s for removal\n",
-                       ifname));
+       pr_debug("pktgen: pktgen_mark_device marking %s for removal\n", ifname);
 
        while (1) {
 
@@ -1867,8 +1849,8 @@ static int pktgen_mark_device(const char *ifname)
                        break;  /* success */
 
                mutex_unlock(&pktgen_thread_lock);
-               PG_DEBUG(printk("pktgen: pktgen_mark_device waiting for %s "
-                               "to disappear....\n", ifname));
+               pr_debug("pktgen: pktgen_mark_device waiting for %s "
+                               "to disappear....\n", ifname);
                schedule_timeout_interruptible(msecs_to_jiffies(msec_per_try));
                mutex_lock(&pktgen_thread_lock);
 
@@ -1876,79 +1858,91 @@ static int pktgen_mark_device(const char *ifname)
                        printk("pktgen_mark_device: timed out after waiting "
                               "%d msec for device %s to be removed\n",
                               msec_per_try * i, ifname);
-                       ret = 1;
                        break;
                }
 
        }
 
        mutex_unlock(&pktgen_thread_lock);
+}
 
-       return ret;
+static void pktgen_change_name(struct net_device *dev)
+{
+       struct pktgen_thread *t;
+
+       list_for_each_entry(t, &pktgen_threads, th_list) {
+               struct pktgen_dev *pkt_dev;
+
+               list_for_each_entry(pkt_dev, &t->if_list, list) {
+                       if (pkt_dev->odev != dev)
+                               continue;
+
+                       remove_proc_entry(pkt_dev->entry->name, pg_proc_dir);
+
+                       pkt_dev->entry = create_proc_entry(dev->name, 0600,
+                                                          pg_proc_dir);
+                       if (!pkt_dev->entry)
+                               printk(KERN_ERR "pktgen: can't move proc "
+                                      " entry for '%s'\n", dev->name);
+                       break;
+               }
+       }
 }
 
 static int pktgen_device_event(struct notifier_block *unused,
                               unsigned long event, void *ptr)
 {
-       struct net_device *dev = (struct net_device *)(ptr);
+       struct net_device *dev = ptr;
 
        /* It is OK that we do not hold the group lock right now,
         * as we run under the RTNL lock.
         */
 
        switch (event) {
-       case NETDEV_CHANGEADDR:
-       case NETDEV_GOING_DOWN:
-       case NETDEV_DOWN:
-       case NETDEV_UP:
-               /* Ignore for now */
+       case NETDEV_CHANGENAME:
+               pktgen_change_name(dev);
                break;
 
        case NETDEV_UNREGISTER:
                pktgen_mark_device(dev->name);
                break;
-       };
+       }
 
        return NOTIFY_DONE;
 }
 
 /* Associate pktgen_dev with a device. */
 
-static struct net_device *pktgen_setup_dev(struct pktgen_dev *pkt_dev)
+static int pktgen_setup_dev(struct pktgen_dev *pkt_dev, const char *ifname)
 {
        struct net_device *odev;
+       int err;
 
        /* Clean old setups */
-
        if (pkt_dev->odev) {
                dev_put(pkt_dev->odev);
                pkt_dev->odev = NULL;
        }
 
-       odev = dev_get_by_name(pkt_dev->ifname);
-
+       odev = dev_get_by_name(ifname);
        if (!odev) {
-               printk("pktgen: no such netdevice: \"%s\"\n", pkt_dev->ifname);
-               goto out;
+               printk("pktgen: no such netdevice: \"%s\"\n", ifname);
+               return -ENODEV;
        }
+
        if (odev->type != ARPHRD_ETHER) {
-               printk("pktgen: not an ethernet device: \"%s\"\n",
-                      pkt_dev->ifname);
-               goto out_put;
-       }
-       if (!netif_running(odev)) {
-               printk("pktgen: device is down: \"%s\"\n", pkt_dev->ifname);
-               goto out_put;
+               printk("pktgen: not an ethernet device: \"%s\"\n", ifname);
+               err = -EINVAL;
+       } else if (!netif_running(odev)) {
+               printk("pktgen: device is down: \"%s\"\n", ifname);
+               err = -ENETDOWN;
+       } else {
+               pkt_dev->odev = odev;
+               return 0;
        }
-       pkt_dev->odev = odev;
 
-       return pkt_dev->odev;
-
-out_put:
        dev_put(odev);
-out:
-       return NULL;
-
+       return err;
 }
 
 /* Read pkt_dev from the interface and set up internal pktgen_dev
@@ -1956,10 +1950,6 @@ out:
  */
 static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
 {
-       /* Try once more, just in case it works now. */
-       if (!pkt_dev->odev)
-               pktgen_setup_dev(pkt_dev);
-
        if (!pkt_dev->odev) {
                printk("pktgen: ERROR: pkt_dev->odev == NULL in setup_inject.\n");
                sprintf(pkt_dev->result,
@@ -2096,7 +2086,7 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev)
        int flow = 0;
 
        if (pkt_dev->cflows) {
-               flow = pktgen_random() % pkt_dev->cflows;
+               flow = random32() % pkt_dev->cflows;
 
                if (pkt_dev->flows[flow].count > pkt_dev->lflow)
                        pkt_dev->flows[flow].count = 0;
@@ -2108,7 +2098,7 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev)
                __u32 tmp;
 
                if (pkt_dev->flags & F_MACSRC_RND)
-                       mc = pktgen_random() % (pkt_dev->src_mac_count);
+                       mc = random32() % pkt_dev->src_mac_count;
                else {
                        mc = pkt_dev->cur_src_mac_offset++;
                        if (pkt_dev->cur_src_mac_offset >
@@ -2134,7 +2124,7 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev)
                __u32 tmp;
 
                if (pkt_dev->flags & F_MACDST_RND)
-                       mc = pktgen_random() % (pkt_dev->dst_mac_count);
+                       mc = random32() % pkt_dev->dst_mac_count;
 
                else {
                        mc = pkt_dev->cur_dst_mac_offset++;
@@ -2158,27 +2148,26 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev)
 
        if (pkt_dev->flags & F_MPLS_RND) {
                unsigned i;
-               for(i = 0; i < pkt_dev->nr_labels; i++)
+               for (i = 0; i < pkt_dev->nr_labels; i++)
                        if (pkt_dev->labels[i] & MPLS_STACK_BOTTOM)
                                pkt_dev->labels[i] = MPLS_STACK_BOTTOM |
-                                            ((__force __be32)pktgen_random() &
+                                            ((__force __be32)random32() &
                                                      htonl(0x000fffff));
        }
 
        if ((pkt_dev->flags & F_VID_RND) && (pkt_dev->vlan_id != 0xffff)) {
-               pkt_dev->vlan_id = pktgen_random() % 4096;
+               pkt_dev->vlan_id = random32() & (4096-1);
        }
 
        if ((pkt_dev->flags & F_SVID_RND) && (pkt_dev->svlan_id != 0xffff)) {
-               pkt_dev->svlan_id = pktgen_random() % 4096;
+               pkt_dev->svlan_id = random32() & (4096 - 1);
        }
 
        if (pkt_dev->udp_src_min < pkt_dev->udp_src_max) {
                if (pkt_dev->flags & F_UDPSRC_RND)
-                       pkt_dev->cur_udp_src =
-                           ((pktgen_random() %
-                             (pkt_dev->udp_src_max - pkt_dev->udp_src_min)) +
-                            pkt_dev->udp_src_min);
+                       pkt_dev->cur_udp_src = random32() %
+                               (pkt_dev->udp_src_max - pkt_dev->udp_src_min)
+                               + pkt_dev->udp_src_min;
 
                else {
                        pkt_dev->cur_udp_src++;
@@ -2189,10 +2178,9 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev)
 
        if (pkt_dev->udp_dst_min < pkt_dev->udp_dst_max) {
                if (pkt_dev->flags & F_UDPDST_RND) {
-                       pkt_dev->cur_udp_dst =
-                           ((pktgen_random() %
-                             (pkt_dev->udp_dst_max - pkt_dev->udp_dst_min)) +
-                            pkt_dev->udp_dst_min);
+                       pkt_dev->cur_udp_dst = random32() %
+                               (pkt_dev->udp_dst_max - pkt_dev->udp_dst_min)
+                               + pkt_dev->udp_dst_min;
                } else {
                        pkt_dev->cur_udp_dst++;
                        if (pkt_dev->cur_udp_dst >= pkt_dev->udp_dst_max)
@@ -2207,7 +2195,7 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev)
                                                               saddr_max))) {
                        __u32 t;
                        if (pkt_dev->flags & F_IPSRC_RND)
-                               t = ((pktgen_random() % (imx - imn)) + imn);
+                               t = random32() % (imx - imn) + imn;
                        else {
                                t = ntohl(pkt_dev->cur_saddr);
                                t++;
@@ -2228,14 +2216,13 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev)
                                __be32 s;
                                if (pkt_dev->flags & F_IPDST_RND) {
 
-                                       t = pktgen_random() % (imx - imn) + imn;
+                                       t = random32() % (imx - imn) + imn;
                                        s = htonl(t);
 
                                        while (LOOPBACK(s) || MULTICAST(s)
                                               || BADCLASS(s) || ZERONET(s)
                                               || LOCAL_MCAST(s)) {
-                                               t = (pktgen_random() %
-                                                     (imx - imn)) + imn;
+                                               t = random32() % (imx - imn) + imn;
                                                s = htonl(t);
                                        }
                                        pkt_dev->cur_daddr = s;
@@ -2267,7 +2254,7 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev)
 
                        for (i = 0; i < 4; i++) {
                                pkt_dev->cur_in6_daddr.s6_addr32[i] =
-                                   (((__force __be32)pktgen_random() |
+                                   (((__force __be32)random32() |
                                      pkt_dev->min_in6_daddr.s6_addr32[i]) &
                                     pkt_dev->max_in6_daddr.s6_addr32[i]);
                        }
@@ -2277,9 +2264,9 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev)
        if (pkt_dev->min_pkt_size < pkt_dev->max_pkt_size) {
                __u32 t;
                if (pkt_dev->flags & F_TXSIZE_RND) {
-                       t = ((pktgen_random() %
-                             (pkt_dev->max_pkt_size - pkt_dev->min_pkt_size))
-                            + pkt_dev->min_pkt_size);
+                       t = random32() %
+                               (pkt_dev->max_pkt_size - pkt_dev->min_pkt_size)
+                               + pkt_dev->min_pkt_size;
                } else {
                        t = pkt_dev->cur_pkt_size + 1;
                        if (t > pkt_dev->max_pkt_size)
@@ -2294,7 +2281,7 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev)
 static void mpls_push(__be32 *mpls, struct pktgen_dev *pkt_dev)
 {
        unsigned i;
-       for(i = 0; i < pkt_dev->nr_labels; i++) {
+       for (i = 0; i < pkt_dev->nr_labels; i++) {
                *mpls++ = pkt_dev->labels[i] & ~MPLS_STACK_BOTTOM;
        }
        mpls--;
@@ -2316,7 +2303,7 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
        int datalen, iplen;
        struct iphdr *iph;
        struct pktgen_hdr *pgh = NULL;
-       __be16 protocol = __constant_htons(ETH_P_IP);
+       __be16 protocol = htons(ETH_P_IP);
        __be32 *mpls;
        __be16 *vlan_tci = NULL;                 /* Encapsulates priority and VLAN ID */
        __be16 *vlan_encapsulated_proto = NULL;  /* packet type ID field (or len) for VLAN tag */
@@ -2325,10 +2312,10 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
 
 
        if (pkt_dev->nr_labels)
-               protocol = __constant_htons(ETH_P_MPLS_UC);
+               protocol = htons(ETH_P_MPLS_UC);
 
        if (pkt_dev->vlan_id != 0xffff)
-               protocol = __constant_htons(ETH_P_8021Q);
+               protocol = htons(ETH_P_8021Q);
 
        /* Update any of the values, used when we're incrementing various
         * fields.
@@ -2354,24 +2341,28 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
                mpls_push(mpls, pkt_dev);
 
        if (pkt_dev->vlan_id != 0xffff) {
-               if(pkt_dev->svlan_id != 0xffff) {
+               if (pkt_dev->svlan_id != 0xffff) {
                        svlan_tci = (__be16 *)skb_put(skb, sizeof(__be16));
                        *svlan_tci = build_tci(pkt_dev->svlan_id,
                                               pkt_dev->svlan_cfi,
                                               pkt_dev->svlan_p);
                        svlan_encapsulated_proto = (__be16 *)skb_put(skb, sizeof(__be16));
-                       *svlan_encapsulated_proto = __constant_htons(ETH_P_8021Q);
+                       *svlan_encapsulated_proto = htons(ETH_P_8021Q);
                }
                vlan_tci = (__be16 *)skb_put(skb, sizeof(__be16));
                *vlan_tci = build_tci(pkt_dev->vlan_id,
                                      pkt_dev->vlan_cfi,
                                      pkt_dev->vlan_p);
                vlan_encapsulated_proto = (__be16 *)skb_put(skb, sizeof(__be16));
-               *vlan_encapsulated_proto = __constant_htons(ETH_P_IP);
+               *vlan_encapsulated_proto = htons(ETH_P_IP);
        }
 
-       iph = (struct iphdr *)skb_put(skb, sizeof(struct iphdr));
-       udph = (struct udphdr *)skb_put(skb, sizeof(struct udphdr));
+       skb->network_header = skb->tail;
+       skb->transport_header = skb->network_header + sizeof(struct iphdr);
+       skb_put(skb, sizeof(struct iphdr) + sizeof(struct udphdr));
+
+       iph = ip_hdr(skb);
+       udph = udp_hdr(skb);
 
        memcpy(eth, pkt_dev->hh, 12);
        *(__be16 *) & eth[12] = protocol;
@@ -2400,12 +2391,11 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
        iph->check = 0;
        iph->check = ip_fast_csum((void *)iph, iph->ihl);
        skb->protocol = protocol;
-       skb->mac.raw = ((u8 *) iph) - 14 - pkt_dev->nr_labels*sizeof(u32) -
-               VLAN_TAG_SIZE(pkt_dev) - SVLAN_TAG_SIZE(pkt_dev);
+       skb->mac_header = (skb->network_header - ETH_HLEN -
+                          pkt_dev->nr_labels * sizeof(u32) -
+                          VLAN_TAG_SIZE(pkt_dev) - SVLAN_TAG_SIZE(pkt_dev));
        skb->dev = odev;
        skb->pkt_type = PACKET_HOST;
-       skb->nh.iph = iph;
-       skb->h.uh = udph;
 
        if (pkt_dev->nfrags <= 0)
                pgh = (struct pktgen_hdr *)skb_put(skb, datalen);
@@ -2654,7 +2644,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
        int datalen;
        struct ipv6hdr *iph;
        struct pktgen_hdr *pgh = NULL;
-       __be16 protocol = __constant_htons(ETH_P_IPV6);
+       __be16 protocol = htons(ETH_P_IPV6);
        __be32 *mpls;
        __be16 *vlan_tci = NULL;                 /* Encapsulates priority and VLAN ID */
        __be16 *vlan_encapsulated_proto = NULL;  /* packet type ID field (or len) for VLAN tag */
@@ -2662,10 +2652,10 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
        __be16 *svlan_encapsulated_proto = NULL; /* packet type ID field (or len) for SVLAN tag */
 
        if (pkt_dev->nr_labels)
-               protocol = __constant_htons(ETH_P_MPLS_UC);
+               protocol = htons(ETH_P_MPLS_UC);
 
        if (pkt_dev->vlan_id != 0xffff)
-               protocol = __constant_htons(ETH_P_8021Q);
+               protocol = htons(ETH_P_8021Q);
 
        /* Update any of the values, used when we're incrementing various
         * fields.
@@ -2690,24 +2680,28 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
                mpls_push(mpls, pkt_dev);
 
        if (pkt_dev->vlan_id != 0xffff) {
-               if(pkt_dev->svlan_id != 0xffff) {
+               if (pkt_dev->svlan_id != 0xffff) {
                        svlan_tci = (__be16 *)skb_put(skb, sizeof(__be16));
                        *svlan_tci = build_tci(pkt_dev->svlan_id,
                                               pkt_dev->svlan_cfi,
                                               pkt_dev->svlan_p);
                        svlan_encapsulated_proto = (__be16 *)skb_put(skb, sizeof(__be16));
-                       *svlan_encapsulated_proto = __constant_htons(ETH_P_8021Q);
+                       *svlan_encapsulated_proto = htons(ETH_P_8021Q);
                }
                vlan_tci = (__be16 *)skb_put(skb, sizeof(__be16));
                *vlan_tci = build_tci(pkt_dev->vlan_id,
                                      pkt_dev->vlan_cfi,
                                      pkt_dev->vlan_p);
                vlan_encapsulated_proto = (__be16 *)skb_put(skb, sizeof(__be16));
-               *vlan_encapsulated_proto = __constant_htons(ETH_P_IPV6);
+               *vlan_encapsulated_proto = htons(ETH_P_IPV6);
        }
 
-       iph = (struct ipv6hdr *)skb_put(skb, sizeof(struct ipv6hdr));
-       udph = (struct udphdr *)skb_put(skb, sizeof(struct udphdr));
+       skb->network_header = skb->tail;
+       skb->transport_header = skb->network_header + sizeof(struct ipv6hdr);
+       skb_put(skb, sizeof(struct ipv6hdr) + sizeof(struct udphdr));
+
+       iph = ipv6_hdr(skb);
+       udph = udp_hdr(skb);
 
        memcpy(eth, pkt_dev->hh, 12);
        *(__be16 *) & eth[12] = protocol;
@@ -2729,7 +2723,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
        udph->len = htons(datalen + sizeof(struct udphdr));
        udph->check = 0;        /* No checksum */
 
-       *(__be32 *) iph = __constant_htonl(0x60000000); /* Version + flow */
+       *(__be32 *) iph = htonl(0x60000000);    /* Version + flow */
 
        if (pkt_dev->traffic_class) {
                /* Version + traffic class + flow (0) */
@@ -2744,13 +2738,12 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
        ipv6_addr_copy(&iph->daddr, &pkt_dev->cur_in6_daddr);
        ipv6_addr_copy(&iph->saddr, &pkt_dev->cur_in6_saddr);
 
-       skb->mac.raw = ((u8 *) iph) - 14 - pkt_dev->nr_labels*sizeof(u32) -
-               VLAN_TAG_SIZE(pkt_dev) - SVLAN_TAG_SIZE(pkt_dev);
+       skb->mac_header = (skb->network_header - ETH_HLEN -
+                          pkt_dev->nr_labels * sizeof(u32) -
+                          VLAN_TAG_SIZE(pkt_dev) - SVLAN_TAG_SIZE(pkt_dev));
        skb->protocol = protocol;
        skb->dev = odev;
        skb->pkt_type = PACKET_HOST;
-       skb->nh.ipv6h = iph;
-       skb->h.uh = udph;
 
        if (pkt_dev->nfrags <= 0)
                pgh = (struct pktgen_hdr *)skb_put(skb, datalen);
@@ -2848,7 +2841,7 @@ static void pktgen_run(struct pktgen_thread *t)
        struct pktgen_dev *pkt_dev;
        int started = 0;
 
-       PG_DEBUG(printk("pktgen: entering pktgen_run. %p\n", t));
+       pr_debug("pktgen: entering pktgen_run. %p\n", t);
 
        if_lock(t);
        list_for_each_entry(pkt_dev, &t->if_list, list) {
@@ -2880,7 +2873,7 @@ static void pktgen_stop_all_threads_ifs(void)
 {
        struct pktgen_thread *t;
 
-       PG_DEBUG(printk("pktgen: entering pktgen_stop_all_threads_ifs.\n"));
+       pr_debug("pktgen: entering pktgen_stop_all_threads_ifs.\n");
 
        mutex_lock(&pktgen_thread_lock);
 
@@ -2948,7 +2941,7 @@ static void pktgen_run_all_threads(void)
 {
        struct pktgen_thread *t;
 
-       PG_DEBUG(printk("pktgen: entering pktgen_run_all_threads.\n"));
+       pr_debug("pktgen: entering pktgen_run_all_threads.\n");
 
        mutex_lock(&pktgen_thread_lock);
 
@@ -3006,7 +2999,7 @@ static int pktgen_stop_device(struct pktgen_dev *pkt_dev)
 
        if (!pkt_dev->running) {
                printk("pktgen: interface: %s is already stopped\n",
-                      pkt_dev->ifname);
+                      pkt_dev->odev->name);
                return -EINVAL;
        }
 
@@ -3040,7 +3033,7 @@ static void pktgen_stop(struct pktgen_thread *t)
 {
        struct pktgen_dev *pkt_dev;
 
-       PG_DEBUG(printk("pktgen: entering pktgen_stop\n"));
+       pr_debug("pktgen: entering pktgen_stop\n");
 
        if_lock(t);
 
@@ -3064,7 +3057,7 @@ static void pktgen_rem_one_if(struct pktgen_thread *t)
        struct list_head *q, *n;
        struct pktgen_dev *cur;
 
-       PG_DEBUG(printk("pktgen: entering pktgen_rem_one_if\n"));
+       pr_debug("pktgen: entering pktgen_rem_one_if\n");
 
        if_lock(t);
 
@@ -3093,7 +3086,7 @@ static void pktgen_rem_all_ifs(struct pktgen_thread *t)
 
        /* Remove all devices, free mem */
 
-       PG_DEBUG(printk("pktgen: entering pktgen_rem_all_ifs\n"));
+       pr_debug("pktgen: entering pktgen_rem_all_ifs\n");
        if_lock(t);
 
        list_for_each_safe(q, n, &t->if_list) {
@@ -3276,7 +3269,7 @@ static int pktgen_thread_worker(void *arg)
 
        t->pid = current->pid;
 
-       PG_DEBUG(printk("pktgen: starting pktgen/%d:  pid=%d\n", cpu, current->pid));
+       pr_debug("pktgen: starting pktgen/%d:  pid=%d\n", cpu, current->pid);
 
        max_before_softirq = t->max_before_softirq;
 
@@ -3339,13 +3332,13 @@ static int pktgen_thread_worker(void *arg)
                set_current_state(TASK_INTERRUPTIBLE);
        }
 
-       PG_DEBUG(printk("pktgen: %s stopping all device\n", t->tsk->comm));
+       pr_debug("pktgen: %s stopping all device\n", t->tsk->comm);
        pktgen_stop(t);
 
-       PG_DEBUG(printk("pktgen: %s removing all device\n", t->tsk->comm));
+       pr_debug("pktgen: %s removing all device\n", t->tsk->comm);
        pktgen_rem_all_ifs(t);
 
-       PG_DEBUG(printk("pktgen: %s removing thread.\n", t->tsk->comm));
+       pr_debug("pktgen: %s removing thread.\n", t->tsk->comm);
        pktgen_rem_thread(t);
 
        return 0;
@@ -3358,13 +3351,13 @@ static struct pktgen_dev *pktgen_find_dev(struct pktgen_thread *t,
        if_lock(t);
 
        list_for_each_entry(p, &t->if_list, list)
-               if (strncmp(p->ifname, ifname, IFNAMSIZ) == 0) {
+               if (strncmp(p->odev->name, ifname, IFNAMSIZ) == 0) {
                        pkt_dev = p;
                        break;
                }
 
        if_unlock(t);
-       PG_DEBUG(printk("pktgen: find_dev(%s) returning %p\n", ifname, pkt_dev));
+       pr_debug("pktgen: find_dev(%s) returning %p\n", ifname, pkt_dev);
        return pkt_dev;
 }
 
@@ -3399,7 +3392,7 @@ out:
 static int pktgen_add_device(struct pktgen_thread *t, const char *ifname)
 {
        struct pktgen_dev *pkt_dev;
-       struct proc_dir_entry *pe;
+       int err;
 
        /* We don't allow a device to be on several threads */
 
@@ -3441,29 +3434,28 @@ static int pktgen_add_device(struct pktgen_thread *t, const char *ifname)
        pkt_dev->svlan_cfi = 0;
        pkt_dev->svlan_id = 0xffff;
 
-       strncpy(pkt_dev->ifname, ifname, IFNAMSIZ);
+       err = pktgen_setup_dev(pkt_dev, ifname);
+       if (err)
+               goto out1;
 
-       if (!pktgen_setup_dev(pkt_dev)) {
-               printk("pktgen: ERROR: pktgen_setup_dev failed.\n");
-               if (pkt_dev->flows)
-                       vfree(pkt_dev->flows);
-               kfree(pkt_dev);
-               return -ENODEV;
-       }
-
-       pe = create_proc_entry(ifname, 0600, pg_proc_dir);
-       if (!pe) {
+       pkt_dev->entry = create_proc_entry(ifname, 0600, pg_proc_dir);
+       if (!pkt_dev->entry) {
                printk("pktgen: cannot create %s/%s procfs entry.\n",
                       PG_PROC_DIR, ifname);
-               if (pkt_dev->flows)
-                       vfree(pkt_dev->flows);
-               kfree(pkt_dev);
-               return -EINVAL;
+               err = -EINVAL;
+               goto out2;
        }
-       pe->proc_fops = &pktgen_if_fops;
-       pe->data = pkt_dev;
+       pkt_dev->entry->proc_fops = &pktgen_if_fops;
+       pkt_dev->entry->data = pkt_dev;
 
        return add_dev_to_thread(t, pkt_dev);
+out2:
+       dev_put(pkt_dev->odev);
+out1:
+       if (pkt_dev->flows)
+               vfree(pkt_dev->flows);
+       kfree(pkt_dev);
+       return err;
 }
 
 static int __init pktgen_create_thread(int cpu)
@@ -3533,7 +3525,7 @@ static int pktgen_remove_device(struct pktgen_thread *t,
                                struct pktgen_dev *pkt_dev)
 {
 
-       PG_DEBUG(printk("pktgen: remove_device pkt_dev=%p\n", pkt_dev));
+       pr_debug("pktgen: remove_device pkt_dev=%p\n", pkt_dev);
 
        if (pkt_dev->running) {
                printk("pktgen:WARNING: trying to remove a running interface, stopping it now.\n");
@@ -3551,9 +3543,8 @@ static int pktgen_remove_device(struct pktgen_thread *t,
 
        _rem_dev_from_if_list(t, pkt_dev);
 
-       /* Clean up proc file system */
-
-       remove_proc_entry(pkt_dev->ifname, pg_proc_dir);
+       if (pkt_dev->entry)
+               remove_proc_entry(pkt_dev->entry->name, pg_proc_dir);
 
        if (pkt_dev->flows)
                vfree(pkt_dev->flows);
index 33ea8ea..cec1111 100644 (file)
 #include <net/sock.h>
 #include <net/pkt_sched.h>
 #include <net/fib_rules.h>
-#include <net/netlink.h>
-#ifdef CONFIG_NET_WIRELESS_RTNETLINK
-#include <linux/wireless.h>
-#include <net/iw_handler.h>
-#endif /* CONFIG_NET_WIRELESS_RTNETLINK */
+#include <net/rtnetlink.h>
+
+struct rtnl_link
+{
+       rtnl_doit_func          doit;
+       rtnl_dumpit_func        dumpit;
+};
 
 static DEFINE_MUTEX(rtnl_mutex);
 static struct sock *rtnl;
@@ -95,7 +97,151 @@ int rtattr_parse(struct rtattr *tb[], int maxattr, struct rtattr *rta, int len)
        return 0;
 }
 
-struct rtnetlink_link * rtnetlink_links[NPROTO];
+static struct rtnl_link *rtnl_msg_handlers[NPROTO];
+
+static inline int rtm_msgindex(int msgtype)
+{
+       int msgindex = msgtype - RTM_BASE;
+
+       /*
+        * msgindex < 0 implies someone tried to register a netlink
+        * control code. msgindex >= RTM_NR_MSGTYPES may indicate that
+        * the message type has not been added to linux/rtnetlink.h
+        */
+       BUG_ON(msgindex < 0 || msgindex >= RTM_NR_MSGTYPES);
+
+       return msgindex;
+}
+
+static rtnl_doit_func rtnl_get_doit(int protocol, int msgindex)
+{
+       struct rtnl_link *tab;
+
+       tab = rtnl_msg_handlers[protocol];
+       if (tab == NULL || tab[msgindex].doit == NULL)
+               tab = rtnl_msg_handlers[PF_UNSPEC];
+
+       return tab ? tab[msgindex].doit : NULL;
+}
+
+static rtnl_dumpit_func rtnl_get_dumpit(int protocol, int msgindex)
+{
+       struct rtnl_link *tab;
+
+       tab = rtnl_msg_handlers[protocol];
+       if (tab == NULL || tab[msgindex].dumpit == NULL)
+               tab = rtnl_msg_handlers[PF_UNSPEC];
+
+       return tab ? tab[msgindex].dumpit : NULL;
+}
+
+/**
+ * __rtnl_register - Register a rtnetlink message type
+ * @protocol: Protocol family or PF_UNSPEC
+ * @msgtype: rtnetlink message type
+ * @doit: Function pointer called for each request message
+ * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message
+ *
+ * Registers the specified function pointers (at least one of them has
+ * to be non-NULL) to be called whenever a request message for the
+ * specified protocol family and message type is received.
+ *
+ * The special protocol family PF_UNSPEC may be used to define fallback
+ * function pointers for the case when no entry for the specific protocol
+ * family exists.
+ *
+ * Returns 0 on success or a negative error code.
+ */
+int __rtnl_register(int protocol, int msgtype,
+                   rtnl_doit_func doit, rtnl_dumpit_func dumpit)
+{
+       struct rtnl_link *tab;
+       int msgindex;
+
+       BUG_ON(protocol < 0 || protocol >= NPROTO);
+       msgindex = rtm_msgindex(msgtype);
+
+       tab = rtnl_msg_handlers[protocol];
+       if (tab == NULL) {
+               tab = kcalloc(RTM_NR_MSGTYPES, sizeof(*tab), GFP_KERNEL);
+               if (tab == NULL)
+                       return -ENOBUFS;
+
+               rtnl_msg_handlers[protocol] = tab;
+       }
+
+       if (doit)
+               tab[msgindex].doit = doit;
+
+       if (dumpit)
+               tab[msgindex].dumpit = dumpit;
+
+       return 0;
+}
+
+EXPORT_SYMBOL_GPL(__rtnl_register);
+
+/**
+ * rtnl_register - Register a rtnetlink message type
+ *
+ * Identical to __rtnl_register() but panics on failure. This is useful
+ * as failure of this function is very unlikely, it can only happen due
+ * to lack of memory when allocating the chain to store all message
+ * handlers for a protocol. Meant for use in init functions where lack
+ * of memory implies no sense in continueing.
+ */
+void rtnl_register(int protocol, int msgtype,
+                  rtnl_doit_func doit, rtnl_dumpit_func dumpit)
+{
+       if (__rtnl_register(protocol, msgtype, doit, dumpit) < 0)
+               panic("Unable to register rtnetlink message handler, "
+                     "protocol = %d, message type = %d\n",
+                     protocol, msgtype);
+}
+
+EXPORT_SYMBOL_GPL(rtnl_register);
+
+/**
+ * rtnl_unregister - Unregister a rtnetlink message type
+ * @protocol: Protocol family or PF_UNSPEC
+ * @msgtype: rtnetlink message type
+ *
+ * Returns 0 on success or a negative error code.
+ */
+int rtnl_unregister(int protocol, int msgtype)
+{
+       int msgindex;
+
+       BUG_ON(protocol < 0 || protocol >= NPROTO);
+       msgindex = rtm_msgindex(msgtype);
+
+       if (rtnl_msg_handlers[protocol] == NULL)
+               return -ENOENT;
+
+       rtnl_msg_handlers[protocol][msgindex].doit = NULL;
+       rtnl_msg_handlers[protocol][msgindex].dumpit = NULL;
+
+       return 0;
+}
+
+EXPORT_SYMBOL_GPL(rtnl_unregister);
+
+/**
+ * rtnl_unregister_all - Unregister all rtnetlink message type of a protocol
+ * @protocol : Protocol family or PF_UNSPEC
+ *
+ * Identical to calling rtnl_unregster() for all registered message types
+ * of a certain protocol family.
+ */
+void rtnl_unregister_all(int protocol)
+{
+       BUG_ON(protocol < 0 || protocol >= NPROTO);
+
+       kfree(rtnl_msg_handlers[protocol]);
+       rtnl_msg_handlers[protocol] = NULL;
+}
+
+EXPORT_SYMBOL_GPL(rtnl_unregister_all);
 
 static const int rtm_min[RTM_NR_FAMILIES] =
 {
@@ -249,7 +395,7 @@ static void set_operstate(struct net_device *dev, unsigned char transition)
                    operstate == IF_OPER_UNKNOWN)
                        operstate = IF_OPER_DORMANT;
                break;
-       };
+       }
 
        if (dev->operstate != operstate) {
                write_lock_bh(&dev_base_lock);
@@ -393,7 +539,6 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
        int s_idx = cb->args[0];
        struct net_device *dev;
 
-       read_lock(&dev_base_lock);
        for (dev=dev_base, idx=0; dev; dev = dev->next, idx++) {
                if (idx < s_idx)
                        continue;
@@ -402,7 +547,6 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
                                     cb->nlh->nlmsg_seq, 0, NLM_F_MULTI) <= 0)
                        break;
        }
-       read_unlock(&dev_base_lock);
        cb->args[0] = idx;
 
        return skb->len;
@@ -536,17 +680,6 @@ static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
                modified = 1;
        }
 
-#ifdef CONFIG_NET_WIRELESS_RTNETLINK
-       if (tb[IFLA_WIRELESS]) {
-               /* Call Wireless Extensions.
-                * Various stuff checked in there... */
-               err = wireless_rtnetlink_set(dev, nla_data(tb[IFLA_WIRELESS]),
-                                            nla_len(tb[IFLA_WIRELESS]));
-               if (err < 0)
-                       goto errout_dev;
-       }
-#endif /* CONFIG_NET_WIRELESS_RTNETLINK */
-
        if (tb[IFLA_BROADCAST]) {
                nla_memcpy(dev->broadcast, tb[IFLA_BROADCAST], dev->addr_len);
                send_addr_notify = 1;
@@ -610,22 +743,6 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
        } else
                return -EINVAL;
 
-
-#ifdef CONFIG_NET_WIRELESS_RTNETLINK
-       if (tb[IFLA_WIRELESS]) {
-               /* Call Wireless Extensions. We need to know the size before
-                * we can alloc. Various stuff checked in there... */
-               err = wireless_rtnetlink_get(dev, nla_data(tb[IFLA_WIRELESS]),
-                                            nla_len(tb[IFLA_WIRELESS]),
-                                            &iw_buf, &iw_buf_len);
-               if (err < 0)
-                       goto errout;
-
-               /* Payload is at an offset in buffer */
-               iw = iw_buf + IW_EV_POINT_OFF;
-       }
-#endif /* CONFIG_NET_WIRELESS_RTNETLINK */
-
        nskb = nlmsg_new(if_nlmsg_size(iw_buf_len), GFP_KERNEL);
        if (nskb == NULL) {
                err = -ENOBUFS;
@@ -659,12 +776,12 @@ static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb)
                int type = cb->nlh->nlmsg_type-RTM_BASE;
                if (idx < s_idx || idx == PF_PACKET)
                        continue;
-               if (rtnetlink_links[idx] == NULL ||
-                   rtnetlink_links[idx][type].dumpit == NULL)
+               if (rtnl_msg_handlers[idx] == NULL ||
+                   rtnl_msg_handlers[idx][type].dumpit == NULL)
                        continue;
                if (idx > s_idx)
                        memset(&cb->args[0], 0, sizeof(cb->args));
-               if (rtnetlink_links[idx][type].dumpit(skb, cb))
+               if (rtnl_msg_handlers[idx][type].dumpit(skb, cb))
                        break;
        }
        cb->family = idx;
@@ -700,30 +817,18 @@ static int rtattr_max;
 
 /* Process one rtnetlink message. */
 
-static __inline__ int
-rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, int *errp)
+static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
 {
-       struct rtnetlink_link *link;
-       struct rtnetlink_link *link_tab;
+       rtnl_doit_func doit;
        int sz_idx, kind;
        int min_len;
        int family;
        int type;
        int err;
 
-       /* Only requests are handled by kernel now */
-       if (!(nlh->nlmsg_flags&NLM_F_REQUEST))
-               return 0;
-
        type = nlh->nlmsg_type;
-
-       /* A control message: ignore them */
-       if (type < RTM_BASE)
-               return 0;
-
-       /* Unknown message: reply with EINVAL */
        if (type > RTM_MAX)
-               goto err_inval;
+               return -EOPNOTSUPP;
 
        type -= RTM_BASE;
 
@@ -732,45 +837,33 @@ rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, int *errp)
                return 0;
 
        family = ((struct rtgenmsg*)NLMSG_DATA(nlh))->rtgen_family;
-       if (family >= NPROTO) {
-               *errp = -EAFNOSUPPORT;
-               return -1;
-       }
-
-       link_tab = rtnetlink_links[family];
-       if (link_tab == NULL)
-               link_tab = rtnetlink_links[PF_UNSPEC];
-       link = &link_tab[type];
+       if (family >= NPROTO)
+               return -EAFNOSUPPORT;
 
        sz_idx = type>>2;
        kind = type&3;
 
-       if (kind != 2 && security_netlink_recv(skb, CAP_NET_ADMIN)) {
-               *errp = -EPERM;
-               return -1;
-       }
+       if (kind != 2 && security_netlink_recv(skb, CAP_NET_ADMIN))
+               return -EPERM;
 
        if (kind == 2 && nlh->nlmsg_flags&NLM_F_DUMP) {
-               if (link->dumpit == NULL)
-                       link = &(rtnetlink_links[PF_UNSPEC][type]);
-
-               if (link->dumpit == NULL)
-                       goto err_inval;
+               rtnl_dumpit_func dumpit;
 
-               if ((*errp = netlink_dump_start(rtnl, skb, nlh,
-                                               link->dumpit, NULL)) != 0) {
-                       return -1;
-               }
+               dumpit = rtnl_get_dumpit(family, type);
+               if (dumpit == NULL)
+                       return -EOPNOTSUPP;
 
-               netlink_queue_skip(nlh, skb);
-               return -1;
+               __rtnl_unlock();
+               err = netlink_dump_start(rtnl, skb, nlh, dumpit, NULL);
+               rtnl_lock();
+               return err;
        }
 
        memset(rta_buf, 0, (rtattr_max * sizeof(struct rtattr *)));
 
        min_len = rtm_min[sz_idx];
        if (nlh->nlmsg_len < min_len)
-               goto err_inval;
+               return -EINVAL;
 
        if (nlh->nlmsg_len > min_len) {
                int attrlen = nlh->nlmsg_len - NLMSG_ALIGN(min_len);
@@ -780,25 +873,18 @@ rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, int *errp)
                        unsigned flavor = attr->rta_type;
                        if (flavor) {
                                if (flavor > rta_max[sz_idx])
-                                       goto err_inval;
+                                       return -EINVAL;
                                rta_buf[flavor-1] = attr;
                        }
                        attr = RTA_NEXT(attr, attrlen);
                }
        }
 
-       if (link->doit == NULL)
-               link = &(rtnetlink_links[PF_UNSPEC][type]);
-       if (link->doit == NULL)
-               goto err_inval;
-       err = link->doit(skb, nlh, (void *)&rta_buf[0]);
+       doit = rtnl_get_doit(family, type);
+       if (doit == NULL)
+               return -EOPNOTSUPP;
 
-       *errp = err;
-       return err;
-
-err_inval:
-       *errp = -EINVAL;
-       return -1;
+       return doit(skb, nlh, (void *)&rta_buf[0]);
 }
 
 static void rtnetlink_rcv(struct sock *sk, int len)
@@ -814,25 +900,6 @@ static void rtnetlink_rcv(struct sock *sk, int len)
        } while (qlen);
 }
 
-static struct rtnetlink_link link_rtnetlink_table[RTM_NR_MSGTYPES] =
-{
-       [RTM_GETLINK     - RTM_BASE] = { .doit   = rtnl_getlink,
-                                        .dumpit = rtnl_dump_ifinfo      },
-       [RTM_SETLINK     - RTM_BASE] = { .doit   = rtnl_setlink          },
-       [RTM_GETADDR     - RTM_BASE] = { .dumpit = rtnl_dump_all         },
-       [RTM_GETROUTE    - RTM_BASE] = { .dumpit = rtnl_dump_all         },
-       [RTM_NEWNEIGH    - RTM_BASE] = { .doit   = neigh_add             },
-       [RTM_DELNEIGH    - RTM_BASE] = { .doit   = neigh_delete          },
-       [RTM_GETNEIGH    - RTM_BASE] = { .dumpit = neigh_dump_info       },
-#ifdef CONFIG_FIB_RULES
-       [RTM_NEWRULE     - RTM_BASE] = { .doit   = fib_nl_newrule        },
-       [RTM_DELRULE     - RTM_BASE] = { .doit   = fib_nl_delrule        },
-#endif
-       [RTM_GETRULE     - RTM_BASE] = { .dumpit = rtnl_dump_all         },
-       [RTM_GETNEIGHTBL - RTM_BASE] = { .dumpit = neightbl_dump_info    },
-       [RTM_SETNEIGHTBL - RTM_BASE] = { .doit   = neightbl_set          },
-};
-
 static int rtnetlink_event(struct notifier_block *this, unsigned long event, void *ptr)
 {
        struct net_device *dev = ptr;
@@ -874,19 +941,22 @@ void __init rtnetlink_init(void)
                panic("rtnetlink_init: cannot allocate rta_buf\n");
 
        rtnl = netlink_kernel_create(NETLINK_ROUTE, RTNLGRP_MAX, rtnetlink_rcv,
-                                    THIS_MODULE);
+                                    &rtnl_mutex, THIS_MODULE);
        if (rtnl == NULL)
                panic("rtnetlink_init: cannot initialize rtnetlink\n");
        netlink_set_nonroot(NETLINK_ROUTE, NL_NONROOT_RECV);
        register_netdevice_notifier(&rtnetlink_dev_notifier);
-       rtnetlink_links[PF_UNSPEC] = link_rtnetlink_table;
-       rtnetlink_links[PF_PACKET] = link_rtnetlink_table;
+
+       rtnl_register(PF_UNSPEC, RTM_GETLINK, rtnl_getlink, rtnl_dump_ifinfo);
+       rtnl_register(PF_UNSPEC, RTM_SETLINK, rtnl_setlink, NULL);
+
+       rtnl_register(PF_UNSPEC, RTM_GETADDR, NULL, rtnl_dump_all);
+       rtnl_register(PF_UNSPEC, RTM_GETROUTE, NULL, rtnl_dump_all);
 }
 
 EXPORT_SYMBOL(__rta_fill);
 EXPORT_SYMBOL(rtattr_strlcpy);
 EXPORT_SYMBOL(rtattr_parse);
-EXPORT_SYMBOL(rtnetlink_links);
 EXPORT_SYMBOL(rtnetlink_put_metrics);
 EXPORT_SYMBOL(rtnl_lock);
 EXPORT_SYMBOL(rtnl_trylock);
index 336958f..32f087b 100644 (file)
@@ -55,6 +55,7 @@
 #include <linux/cache.h>
 #include <linux/rtnetlink.h>
 #include <linux/init.h>
+#include <linux/scatterlist.h>
 
 #include <net/protocol.h>
 #include <net/dst.h>
@@ -87,8 +88,9 @@ static struct kmem_cache *skbuff_fclone_cache __read_mostly;
 void skb_over_panic(struct sk_buff *skb, int sz, void *here)
 {
        printk(KERN_EMERG "skb_over_panic: text:%p len:%d put:%d head:%p "
-                         "data:%p tail:%p end:%p dev:%s\n",
-              here, skb->len, sz, skb->head, skb->data, skb->tail, skb->end,
+                         "data:%p tail:%#lx end:%#lx dev:%s\n",
+              here, skb->len, sz, skb->head, skb->data,
+              (unsigned long)skb->tail, (unsigned long)skb->end,
               skb->dev ? skb->dev->name : "<NULL>");
        BUG();
 }
@@ -105,8 +107,9 @@ void skb_over_panic(struct sk_buff *skb, int sz, void *here)
 void skb_under_panic(struct sk_buff *skb, int sz, void *here)
 {
        printk(KERN_EMERG "skb_under_panic: text:%p len:%d put:%d head:%p "
-                         "data:%p tail:%p end:%p dev:%s\n",
-              here, skb->len, sz, skb->head, skb->data, skb->tail, skb->end,
+                         "data:%p tail:%#lx end:%#lx dev:%s\n",
+              here, skb->len, sz, skb->head, skb->data,
+              (unsigned long)skb->tail, (unsigned long)skb->end,
               skb->dev ? skb->dev->name : "<NULL>");
        BUG();
 }
@@ -155,20 +158,22 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
        if (!skb)
                goto out;
 
-       /* Get the DATA. Size must match skb_add_mtu(). */
        size = SKB_DATA_ALIGN(size);
        data = kmalloc_node_track_caller(size + sizeof(struct skb_shared_info),
                        gfp_mask, node);
        if (!data)
                goto nodata;
 
-       memset(skb, 0, offsetof(struct sk_buff, truesize));
+       /*
+        * See comment in sk_buff definition, just before the 'tail' member
+        */
+       memset(skb, 0, offsetof(struct sk_buff, tail));
        skb->truesize = size + sizeof(struct sk_buff);
        atomic_set(&skb->users, 1);
        skb->head = data;
        skb->data = data;
-       skb->tail = data;
-       skb->end  = data + size;
+       skb_reset_tail_pointer(skb);
+       skb->end = skb->tail + size;
        /* make sure we initialize shinfo sequentially */
        shinfo = skb_shinfo(skb);
        atomic_set(&shinfo->dataref, 1);
@@ -299,7 +304,7 @@ void kfree_skbmem(struct sk_buff *skb)
                if (atomic_dec_and_test(fclone_ref))
                        kmem_cache_free(skbuff_fclone_cache, other);
                break;
-       };
+       }
 }
 
 /**
@@ -321,15 +326,13 @@ void __kfree_skb(struct sk_buff *skb)
                WARN_ON(in_irq());
                skb->destructor(skb);
        }
-#ifdef CONFIG_NETFILTER
-       nf_conntrack_put(skb->nfct);
 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+       nf_conntrack_put(skb->nfct);
        nf_conntrack_put_reasm(skb->nfct_reasm);
 #endif
 #ifdef CONFIG_BRIDGE_NETFILTER
        nf_bridge_put(skb->nf_bridge);
 #endif
-#endif
 /* XXX: IS this still necessary? - JHS */
 #ifdef CONFIG_NET_SCHED
        skb->tc_index = 0;
@@ -396,9 +399,9 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
        n->sk = NULL;
        C(tstamp);
        C(dev);
-       C(h);
-       C(nh);
-       C(mac);
+       C(transport_header);
+       C(network_header);
+       C(mac_header);
        C(dst);
        dst_clone(skb->dst);
        C(sp);
@@ -422,19 +425,7 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
        C(protocol);
        n->destructor = NULL;
        C(mark);
-#ifdef CONFIG_NETFILTER
-       C(nfct);
-       nf_conntrack_get(skb->nfct);
-       C(nfctinfo);
-#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
-       C(nfct_reasm);
-       nf_conntrack_get_reasm(skb->nfct_reasm);
-#endif
-#ifdef CONFIG_BRIDGE_NETFILTER
-       C(nf_bridge);
-       nf_bridge_get(skb->nf_bridge);
-#endif
-#endif /*CONFIG_NETFILTER*/
+       __nf_copy(n, skb);
 #ifdef CONFIG_NET_SCHED
        C(tc_index);
 #ifdef CONFIG_NET_CLS_ACT
@@ -460,11 +451,12 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
 
 static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
 {
+#ifndef NET_SKBUFF_DATA_USES_OFFSET
        /*
         *      Shift between the two data areas in bytes
         */
        unsigned long offset = new->data - old->data;
-
+#endif
        new->sk         = NULL;
        new->dev        = old->dev;
        new->priority   = old->priority;
@@ -473,9 +465,15 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
 #ifdef CONFIG_INET
        new->sp         = secpath_get(old->sp);
 #endif
-       new->h.raw      = old->h.raw + offset;
-       new->nh.raw     = old->nh.raw + offset;
-       new->mac.raw    = old->mac.raw + offset;
+       new->transport_header = old->transport_header;
+       new->network_header   = old->network_header;
+       new->mac_header       = old->mac_header;
+#ifndef NET_SKBUFF_DATA_USES_OFFSET
+       /* {transport,network,mac}_header are relative to skb->head */
+       new->transport_header += offset;
+       new->network_header   += offset;
+       new->mac_header       += offset;
+#endif
        memcpy(new->cb, old->cb, sizeof(old->cb));
        new->local_df   = old->local_df;
        new->fclone     = SKB_FCLONE_UNAVAILABLE;
@@ -483,22 +481,10 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
        new->tstamp     = old->tstamp;
        new->destructor = NULL;
        new->mark       = old->mark;
-#ifdef CONFIG_NETFILTER
-       new->nfct       = old->nfct;
-       nf_conntrack_get(old->nfct);
-       new->nfctinfo   = old->nfctinfo;
-#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
-       new->nfct_reasm = old->nfct_reasm;
-       nf_conntrack_get_reasm(old->nfct_reasm);
-#endif
+       __nf_copy(new, old);
 #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
        new->ipvs_property = old->ipvs_property;
 #endif
-#ifdef CONFIG_BRIDGE_NETFILTER
-       new->nf_bridge  = old->nf_bridge;
-       nf_bridge_get(old->nf_bridge);
-#endif
-#endif
 #ifdef CONFIG_NET_SCHED
 #ifdef CONFIG_NET_CLS_ACT
        new->tc_verd = old->tc_verd;
@@ -535,8 +521,12 @@ struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
        /*
         *      Allocate the copy buffer
         */
-       struct sk_buff *n = alloc_skb(skb->end - skb->head + skb->data_len,
-                                     gfp_mask);
+       struct sk_buff *n;
+#ifdef NET_SKBUFF_DATA_USES_OFFSET
+       n = alloc_skb(skb->end + skb->data_len, gfp_mask);
+#else
+       n = alloc_skb(skb->end - skb->head + skb->data_len, gfp_mask);
+#endif
        if (!n)
                return NULL;
 
@@ -573,8 +563,12 @@ struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask)
        /*
         *      Allocate the copy buffer
         */
-       struct sk_buff *n = alloc_skb(skb->end - skb->head, gfp_mask);
-
+       struct sk_buff *n;
+#ifdef NET_SKBUFF_DATA_USES_OFFSET
+       n = alloc_skb(skb->end, gfp_mask);
+#else
+       n = alloc_skb(skb->end - skb->head, gfp_mask);
+#endif
        if (!n)
                goto out;
 
@@ -583,7 +577,7 @@ struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask)
        /* Set the tail pointer and length */
        skb_put(n, skb_headlen(skb));
        /* Copy the bytes */
-       memcpy(n->data, skb->data, n->len);
+       skb_copy_from_linear_data(skb, n->data, n->len);
        n->csum      = skb->csum;
        n->ip_summed = skb->ip_summed;
 
@@ -632,7 +626,11 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
 {
        int i;
        u8 *data;
+#ifdef NET_SKBUFF_DATA_USES_OFFSET
+       int size = nhead + skb->end + ntail;
+#else
        int size = nhead + (skb->end - skb->head) + ntail;
+#endif
        long off;
 
        if (skb_shared(skb))
@@ -646,8 +644,14 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
 
        /* Copy only real data... and, alas, header. This should be
         * optimized for the cases when header is void. */
-       memcpy(data + nhead, skb->head, skb->tail - skb->head);
-       memcpy(data + size, skb->end, sizeof(struct skb_shared_info));
+       memcpy(data + nhead, skb->head,
+#ifdef NET_SKBUFF_DATA_USES_OFFSET
+               skb->tail);
+#else
+               skb->tail - skb->head);
+#endif
+       memcpy(data + size, skb_end_pointer(skb),
+              sizeof(struct skb_shared_info));
 
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
                get_page(skb_shinfo(skb)->frags[i].page);
@@ -660,12 +664,18 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
        off = (data + nhead) - skb->head;
 
        skb->head     = data;
-       skb->end      = data + size;
        skb->data    += off;
-       skb->tail    += off;
-       skb->mac.raw += off;
-       skb->h.raw   += off;
-       skb->nh.raw  += off;
+#ifdef NET_SKBUFF_DATA_USES_OFFSET
+       skb->end      = size;
+       off           = nhead;
+#else
+       skb->end      = skb->head + size;
+#endif
+       /* {transport,network,mac}_header and tail are relative to skb->head */
+       skb->tail             += off;
+       skb->transport_header += off;
+       skb->network_header   += off;
+       skb->mac_header       += off;
        skb->cloned   = 0;
        skb->nohdr    = 0;
        atomic_set(&skb_shinfo(skb)->dataref, 1);
@@ -726,7 +736,9 @@ struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
         */
        struct sk_buff *n = alloc_skb(newheadroom + skb->len + newtailroom,
                                      gfp_mask);
+       int oldheadroom = skb_headroom(skb);
        int head_copy_len, head_copy_off;
+       int off = 0;
 
        if (!n)
                return NULL;
@@ -736,7 +748,7 @@ struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
        /* Set the tail pointer and length */
        skb_put(n, skb->len);
 
-       head_copy_len = skb_headroom(skb);
+       head_copy_len = oldheadroom;
        head_copy_off = 0;
        if (newheadroom <= head_copy_len)
                head_copy_len = newheadroom;
@@ -750,6 +762,13 @@ struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
 
        copy_skb_header(n, skb);
 
+#ifdef NET_SKBUFF_DATA_USES_OFFSET
+       off                  = newheadroom - oldheadroom;
+#endif
+       n->transport_header += off;
+       n->network_header   += off;
+       n->mac_header       += off;
+
        return n;
 }
 
@@ -877,7 +896,7 @@ done:
        } else {
                skb->len       = len;
                skb->data_len  = 0;
-               skb->tail      = skb->data + len;
+               skb_set_tail_pointer(skb, len);
        }
 
        return 0;
@@ -922,7 +941,7 @@ unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta)
                        return NULL;
        }
 
-       if (skb_copy_bits(skb, skb_headlen(skb), skb->tail, delta))
+       if (skb_copy_bits(skb, skb_headlen(skb), skb_tail_pointer(skb), delta))
                BUG();
 
        /* Optimization: no fragments, no reasons to preestimate
@@ -1018,7 +1037,7 @@ pull_pages:
        skb->tail     += delta;
        skb->data_len -= delta;
 
-       return skb->tail;
+       return skb_tail_pointer(skb);
 }
 
 /* Copy some data bits from skb to kernel buffer. */
@@ -1026,16 +1045,16 @@ pull_pages:
 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
 {
        int i, copy;
-       int start = skb_headlen(skb);
+       int end = skb_headlen(skb);
 
        if (offset > (int)skb->len - len)
                goto fault;
 
        /* Copy header. */
-       if ((copy = start - offset) > 0) {
+       if ((copy = end - offset) > 0) {
                if (copy > len)
                        copy = len;
-               memcpy(to, skb->data + offset, copy);
+               skb_copy_from_linear_data_offset(skb, offset, to, copy);
                if ((len -= copy) == 0)
                        return 0;
                offset += copy;
@@ -1043,11 +1062,9 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
        }
 
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-               int end;
+               BUG_TRAP(len >= 0);
 
-               BUG_TRAP(start <= offset + len);
-
-               end = start + skb_shinfo(skb)->frags[i].size;
+               end = offset + skb_shinfo(skb)->frags[i].size;
                if ((copy = end - offset) > 0) {
                        u8 *vaddr;
 
@@ -1056,8 +1073,8 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
 
                        vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]);
                        memcpy(to,
-                              vaddr + skb_shinfo(skb)->frags[i].page_offset+
-                              offset - start, copy);
+                              vaddr + skb_shinfo(skb)->frags[i].page_offset,
+                              copy);
                        kunmap_skb_frag(vaddr);
 
                        if ((len -= copy) == 0)
@@ -1065,30 +1082,25 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
                        offset += copy;
                        to     += copy;
                }
-               start = end;
        }
 
        if (skb_shinfo(skb)->frag_list) {
                struct sk_buff *list = skb_shinfo(skb)->frag_list;
 
                for (; list; list = list->next) {
-                       int end;
-
-                       BUG_TRAP(start <= offset + len);
+                       BUG_TRAP(len >= 0);
 
-                       end = start + list->len;
+                       end = offset + list->len;
                        if ((copy = end - offset) > 0) {
                                if (copy > len)
                                        copy = len;
-                               if (skb_copy_bits(list, offset - start,
-                                                 to, copy))
+                               if (skb_copy_bits(list, 0, to, copy))
                                        goto fault;
                                if ((len -= copy) == 0)
                                        return 0;
                                offset += copy;
                                to     += copy;
                        }
-                       start = end;
                }
        }
        if (!len)
@@ -1110,18 +1122,18 @@ fault:
  *     traversing fragment lists and such.
  */
 
-int skb_store_bits(const struct sk_buff *skb, int offset, void *from, int len)
+int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
 {
        int i, copy;
-       int start = skb_headlen(skb);
+       int end = skb_headlen(skb);
 
        if (offset > (int)skb->len - len)
                goto fault;
 
-       if ((copy = start - offset) > 0) {
+       if ((copy = end - offset) > 0) {
                if (copy > len)
                        copy = len;
-               memcpy(skb->data + offset, from, copy);
+               skb_copy_to_linear_data_offset(skb, offset, from, copy);
                if ((len -= copy) == 0)
                        return 0;
                offset += copy;
@@ -1130,11 +1142,9 @@ int skb_store_bits(const struct sk_buff *skb, int offset, void *from, int len)
 
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
                skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
-               int end;
+               BUG_TRAP(len >= 0);
 
-               BUG_TRAP(start <= offset + len);
-
-               end = start + frag->size;
+               end = offset + frag->size;
                if ((copy = end - offset) > 0) {
                        u8 *vaddr;
 
@@ -1142,8 +1152,7 @@ int skb_store_bits(const struct sk_buff *skb, int offset, void *from, int len)
                                copy = len;
 
                        vaddr = kmap_skb_frag(frag);
-                       memcpy(vaddr + frag->page_offset + offset - start,
-                              from, copy);
+                       memcpy(vaddr + frag->page_offset, from, copy);
                        kunmap_skb_frag(vaddr);
 
                        if ((len -= copy) == 0)
@@ -1151,30 +1160,25 @@ int skb_store_bits(const struct sk_buff *skb, int offset, void *from, int len)
                        offset += copy;
                        from += copy;
                }
-               start = end;
        }
 
        if (skb_shinfo(skb)->frag_list) {
                struct sk_buff *list = skb_shinfo(skb)->frag_list;
 
                for (; list; list = list->next) {
-                       int end;
-
-                       BUG_TRAP(start <= offset + len);
+                       BUG_TRAP(len >= 0);
 
-                       end = start + list->len;
+                       end = offset + list->len;
                        if ((copy = end - offset) > 0) {
                                if (copy > len)
                                        copy = len;
-                               if (skb_store_bits(list, offset - start,
-                                                  from, copy))
+                               if (skb_store_bits(list, 0, from, copy))
                                        goto fault;
                                if ((len -= copy) == 0)
                                        return 0;
                                offset += copy;
                                from += copy;
                        }
-                       start = end;
                }
        }
        if (!len)
@@ -1191,8 +1195,8 @@ EXPORT_SYMBOL(skb_store_bits);
 __wsum skb_checksum(const struct sk_buff *skb, int offset,
                          int len, __wsum csum)
 {
-       int start = skb_headlen(skb);
-       int i, copy = start - offset;
+       int end = skb_headlen(skb);
+       int i, copy = end - offset;
        int pos = 0;
 
        /* Checksum header. */
@@ -1207,11 +1211,9 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset,
        }
 
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-               int end;
+               BUG_TRAP(len >= 0);
 
-               BUG_TRAP(start <= offset + len);
-
-               end = start + skb_shinfo(skb)->frags[i].size;
+               end = offset + skb_shinfo(skb)->frags[i].size;
                if ((copy = end - offset) > 0) {
                        __wsum csum2;
                        u8 *vaddr;
@@ -1220,8 +1222,8 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset,
                        if (copy > len)
                                copy = len;
                        vaddr = kmap_skb_frag(frag);
-                       csum2 = csum_partial(vaddr + frag->page_offset +
-                                            offset - start, copy, 0);
+                       csum2 = csum_partial(vaddr + frag->page_offset,
+                                            copy, 0);
                        kunmap_skb_frag(vaddr);
                        csum = csum_block_add(csum, csum2, pos);
                        if (!(len -= copy))
@@ -1229,31 +1231,26 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset,
                        offset += copy;
                        pos    += copy;
                }
-               start = end;
        }
 
        if (skb_shinfo(skb)->frag_list) {
                struct sk_buff *list = skb_shinfo(skb)->frag_list;
 
                for (; list; list = list->next) {
-                       int end;
-
-                       BUG_TRAP(start <= offset + len);
+                       BUG_TRAP(len >= 0);
 
-                       end = start + list->len;
+                       end = offset + list->len;
                        if ((copy = end - offset) > 0) {
                                __wsum csum2;
                                if (copy > len)
                                        copy = len;
-                               csum2 = skb_checksum(list, offset - start,
-                                                    copy, 0);
+                               csum2 = skb_checksum(list, 0, copy, 0);
                                csum = csum_block_add(csum, csum2, pos);
                                if ((len -= copy) == 0)
                                        return csum;
                                offset += copy;
                                pos    += copy;
                        }
-                       start = end;
                }
        }
        BUG_ON(len);
@@ -1266,8 +1263,8 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset,
 __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
                                    u8 *to, int len, __wsum csum)
 {
-       int start = skb_headlen(skb);
-       int i, copy = start - offset;
+       int end = skb_headlen(skb);
+       int i, copy = end - offset;
        int pos = 0;
 
        /* Copy header. */
@@ -1284,11 +1281,9 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
        }
 
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-               int end;
+               BUG_TRAP(len >= 0);
 
-               BUG_TRAP(start <= offset + len);
-
-               end = start + skb_shinfo(skb)->frags[i].size;
+               end = offset + skb_shinfo(skb)->frags[i].size;
                if ((copy = end - offset) > 0) {
                        __wsum csum2;
                        u8 *vaddr;
@@ -1298,9 +1293,8 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
                                copy = len;
                        vaddr = kmap_skb_frag(frag);
                        csum2 = csum_partial_copy_nocheck(vaddr +
-                                                         frag->page_offset +
-                                                         offset - start, to,
-                                                         copy, 0);
+                                                         frag->page_offset,
+                                                         to, copy, 0);
                        kunmap_skb_frag(vaddr);
                        csum = csum_block_add(csum, csum2, pos);
                        if (!(len -= copy))
@@ -1309,7 +1303,6 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
                        to     += copy;
                        pos    += copy;
                }
-               start = end;
        }
 
        if (skb_shinfo(skb)->frag_list) {
@@ -1317,16 +1310,13 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
 
                for (; list; list = list->next) {
                        __wsum csum2;
-                       int end;
-
-                       BUG_TRAP(start <= offset + len);
+                       BUG_TRAP(len >= 0);
 
-                       end = start + list->len;
+                       end = offset + list->len;
                        if ((copy = end - offset) > 0) {
                                if (copy > len)
                                        copy = len;
-                               csum2 = skb_copy_and_csum_bits(list,
-                                                              offset - start,
+                               csum2 = skb_copy_and_csum_bits(list, 0,
                                                               to, copy, 0);
                                csum = csum_block_add(csum, csum2, pos);
                                if ((len -= copy) == 0)
@@ -1335,7 +1325,6 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
                                to     += copy;
                                pos    += copy;
                        }
-                       start = end;
                }
        }
        BUG_ON(len);
@@ -1348,13 +1337,13 @@ void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
        long csstart;
 
        if (skb->ip_summed == CHECKSUM_PARTIAL)
-               csstart = skb->h.raw - skb->data;
+               csstart = skb->csum_start - skb_headroom(skb);
        else
                csstart = skb_headlen(skb);
 
        BUG_ON(csstart > skb_headlen(skb));
 
-       memcpy(to, skb->data, csstart);
+       skb_copy_from_linear_data(skb, to, csstart);
 
        csum = 0;
        if (csstart != skb->len)
@@ -1522,27 +1511,14 @@ void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head
        spin_unlock_irqrestore(&list->lock, flags);
 }
 
-#if 0
-/*
- *     Tune the memory allocator for a new MTU size.
- */
-void skb_add_mtu(int mtu)
-{
-       /* Must match allocation in alloc_skb */
-       mtu = SKB_DATA_ALIGN(mtu) + sizeof(struct skb_shared_info);
-
-       kmem_add_cache_size(mtu);
-}
-#endif
-
 static inline void skb_split_inside_header(struct sk_buff *skb,
                                           struct sk_buff* skb1,
                                           const u32 len, const int pos)
 {
        int i;
 
-       memcpy(skb_put(skb1, pos - len), skb->data + len, pos - len);
-
+       skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len),
+                                        pos - len);
        /* And move data appendix as is. */
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
                skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i];
@@ -1553,7 +1529,7 @@ static inline void skb_split_inside_header(struct sk_buff *skb,
        skb1->len                  += skb1->data_len;
        skb->data_len              = 0;
        skb->len                   = len;
-       skb->tail                  = skb->data + len;
+       skb_set_tail_pointer(skb, len);
 }
 
 static inline void skb_split_no_header(struct sk_buff *skb,
@@ -1878,7 +1854,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features)
        struct sk_buff *segs = NULL;
        struct sk_buff *tail = NULL;
        unsigned int mss = skb_shinfo(skb)->gso_size;
-       unsigned int doffset = skb->data - skb->mac.raw;
+       unsigned int doffset = skb->data - skb_mac_header(skb);
        unsigned int offset = doffset;
        unsigned int headroom;
        unsigned int len;
@@ -1928,11 +1904,12 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features)
                nskb->mac_len = skb->mac_len;
 
                skb_reserve(nskb, headroom);
-               nskb->mac.raw = nskb->data;
-               nskb->nh.raw = nskb->data + skb->mac_len;
-               nskb->h.raw = nskb->nh.raw + (skb->h.raw - skb->nh.raw);
-               memcpy(skb_put(nskb, doffset), skb->data, doffset);
-
+               skb_reset_mac_header(nskb);
+               skb_set_network_header(nskb, skb->mac_len);
+               nskb->transport_header = (nskb->network_header +
+                                         skb_network_header_len(skb));
+               skb_copy_from_linear_data(skb, skb_put(nskb, doffset),
+                                         doffset);
                if (!sg) {
                        nskb->csum = skb_copy_and_csum_bits(skb, offset,
                                                            skb_put(nskb, len),
@@ -1945,7 +1922,8 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features)
 
                nskb->ip_summed = CHECKSUM_PARTIAL;
                nskb->csum = skb->csum;
-               memcpy(skb_put(nskb, hsize), skb->data + offset, hsize);
+               skb_copy_from_linear_data_offset(skb, offset,
+                                                skb_put(nskb, hsize), hsize);
 
                while (pos < offset + len) {
                        BUG_ON(i >= nfrags);
@@ -2005,6 +1983,184 @@ void __init skb_init(void)
                                                NULL, NULL);
 }
 
+/**
+ *     skb_to_sgvec - Fill a scatter-gather list from a socket buffer
+ *     @skb: Socket buffer containing the buffers to be mapped
+ *     @sg: The scatter-gather list to map into
+ *     @offset: The offset into the buffer's contents to start mapping
+ *     @len: Length of buffer space to be mapped
+ *
+ *     Fill the specified scatter-gather list with mappings/pointers into a
+ *     region of the buffer space attached to a socket buffer.
+ */
+int
+skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
+{
+       int end = skb_headlen(skb);
+       int i, copy = end - offset;
+       int elt = 0;
+
+       if (copy > 0) {
+               if (copy > len)
+                       copy = len;
+               sg[elt].page = virt_to_page(skb->data + offset);
+               sg[elt].offset = (unsigned long)(skb->data + offset) % PAGE_SIZE;
+               sg[elt].length = copy;
+               elt++;
+               if ((len -= copy) == 0)
+                       return elt;
+               offset += copy;
+       }
+
+       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+               BUG_TRAP(len >= 0);
+
+               end = offset + skb_shinfo(skb)->frags[i].size;
+               if ((copy = end - offset) > 0) {
+                       skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+                       if (copy > len)
+                               copy = len;
+                       sg[elt].page = frag->page;
+                       sg[elt].offset = frag->page_offset;
+                       sg[elt].length = copy;
+                       elt++;
+                       if (!(len -= copy))
+                               return elt;
+                       offset += copy;
+               }
+       }
+
+       if (skb_shinfo(skb)->frag_list) {
+               struct sk_buff *list = skb_shinfo(skb)->frag_list;
+
+               for (; list; list = list->next) {
+                       BUG_TRAP(len >= 0);
+
+                       end = offset + list->len;
+                       if ((copy = end - offset) > 0) {
+                               if (copy > len)
+                                       copy = len;
+                               elt += skb_to_sgvec(list, sg+elt, 0, copy);
+                               if ((len -= copy) == 0)
+                                       return elt;
+                               offset += copy;
+                       }
+               }
+       }
+       BUG_ON(len);
+       return elt;
+}
+
+/**
+ *     skb_cow_data - Check that a socket buffer's data buffers are writable
+ *     @skb: The socket buffer to check.
+ *     @tailbits: Amount of trailing space to be added
+ *     @trailer: Returned pointer to the skb where the @tailbits space begins
+ *
+ *     Make sure that the data buffers attached to a socket buffer are
+ *     writable. If they are not, private copies are made of the data buffers
+ *     and the socket buffer is set to use these instead.
+ *
+ *     If @tailbits is given, make sure that there is space to write @tailbits
+ *     bytes of data beyond current end of socket buffer.  @trailer will be
+ *     set to point to the skb in which this space begins.
+ *
+ *     The number of scatterlist elements required to completely map the
+ *     COW'd and extended socket buffer will be returned.
+ */
+int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
+{
+       int copyflag;
+       int elt;
+       struct sk_buff *skb1, **skb_p;
+
+       /* If skb is cloned or its head is paged, reallocate
+        * head pulling out all the pages (pages are considered not writable
+        * at the moment even if they are anonymous).
+        */
+       if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) &&
+           __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL)
+               return -ENOMEM;
+
+       /* Easy case. Most of packets will go this way. */
+       if (!skb_shinfo(skb)->frag_list) {
+               /* A little of trouble, not enough of space for trailer.
+                * This should not happen, when stack is tuned to generate
+                * good frames. OK, on miss we reallocate and reserve even more
+                * space, 128 bytes is fair. */
+
+               if (skb_tailroom(skb) < tailbits &&
+                   pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC))
+                       return -ENOMEM;
+
+               /* Voila! */
+               *trailer = skb;
+               return 1;
+       }
+
+       /* Misery. We are in troubles, going to mincer fragments... */
+
+       elt = 1;
+       skb_p = &skb_shinfo(skb)->frag_list;
+       copyflag = 0;
+
+       while ((skb1 = *skb_p) != NULL) {
+               int ntail = 0;
+
+               /* The fragment is partially pulled by someone,
+                * this can happen on input. Copy it and everything
+                * after it. */
+
+               if (skb_shared(skb1))
+                       copyflag = 1;
+
+               /* If the skb is the last, worry about trailer. */
+
+               if (skb1->next == NULL && tailbits) {
+                       if (skb_shinfo(skb1)->nr_frags ||
+                           skb_shinfo(skb1)->frag_list ||
+                           skb_tailroom(skb1) < tailbits)
+                               ntail = tailbits + 128;
+               }
+
+               if (copyflag ||
+                   skb_cloned(skb1) ||
+                   ntail ||
+                   skb_shinfo(skb1)->nr_frags ||
+                   skb_shinfo(skb1)->frag_list) {
+                       struct sk_buff *skb2;
+
+                       /* Fuck, we are miserable poor guys... */
+                       if (ntail == 0)
+                               skb2 = skb_copy(skb1, GFP_ATOMIC);
+                       else
+                               skb2 = skb_copy_expand(skb1,
+                                                      skb_headroom(skb1),
+                                                      ntail,
+                                                      GFP_ATOMIC);
+                       if (unlikely(skb2 == NULL))
+                               return -ENOMEM;
+
+                       if (skb1->sk)
+                               skb_set_owner_w(skb2, skb1->sk);
+
+                       /* Looking around. Are we still alive?
+                        * OK, link new skb, drop old one */
+
+                       skb2->next = skb1->next;
+                       *skb_p = skb2;
+                       kfree_skb(skb1);
+                       skb1 = skb2;
+               }
+               elt++;
+               *trailer = skb1;
+               skb_p = &skb1->next;
+       }
+
+       return elt;
+}
+
 EXPORT_SYMBOL(___pskb_trim);
 EXPORT_SYMBOL(__kfree_skb);
 EXPORT_SYMBOL(kfree_skb);
@@ -2039,3 +2195,6 @@ EXPORT_SYMBOL(skb_seq_read);
 EXPORT_SYMBOL(skb_abort_seq_read);
 EXPORT_SYMBOL(skb_find_text);
 EXPORT_SYMBOL(skb_append_datato_frags);
+
+EXPORT_SYMBOL_GPL(skb_to_sgvec);
+EXPORT_SYMBOL_GPL(skb_cow_data);
index 27c4f62..22183c2 100644 (file)
@@ -154,7 +154,8 @@ static const char *af_family_key_strings[AF_MAX+1] = {
   "sk_lock-21"       , "sk_lock-AF_SNA"      , "sk_lock-AF_IRDA"     ,
   "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE"  , "sk_lock-AF_LLC"      ,
   "sk_lock-27"       , "sk_lock-28"          , "sk_lock-29"          ,
-  "sk_lock-AF_TIPC"  , "sk_lock-AF_BLUETOOTH", "sk_lock-AF_MAX"
+  "sk_lock-AF_TIPC"  , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV"        ,
+  "sk_lock-AF_RXRPC" , "sk_lock-AF_MAX"
 };
 static const char *af_family_slock_key_strings[AF_MAX+1] = {
   "slock-AF_UNSPEC", "slock-AF_UNIX"     , "slock-AF_INET"     ,
@@ -167,7 +168,8 @@ static const char *af_family_slock_key_strings[AF_MAX+1] = {
   "slock-21"       , "slock-AF_SNA"      , "slock-AF_IRDA"     ,
   "slock-AF_PPPOX" , "slock-AF_WANPIPE"  , "slock-AF_LLC"      ,
   "slock-27"       , "slock-28"          , "slock-29"          ,
-  "slock-AF_TIPC"  , "slock-AF_BLUETOOTH", "slock-AF_MAX"
+  "slock-AF_TIPC"  , "slock-AF_BLUETOOTH", "slock-AF_IUCV"     ,
+  "slock-AF_RXRPC" , "slock-AF_MAX"
 };
 #endif
 
@@ -361,8 +363,8 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
        }
 #endif
 
-       if(optlen<sizeof(int))
-               return(-EINVAL);
+       if (optlen < sizeof(int))
+               return -EINVAL;
 
        if (get_user(val, (int __user *)optval))
                return -EFAULT;
@@ -371,265 +373,270 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
 
        lock_sock(sk);
 
-       switch(optname)
-       {
-               case SO_DEBUG:
-                       if(val && !capable(CAP_NET_ADMIN))
-                       {
-                               ret = -EACCES;
-                       }
-                       else if (valbool)
-                               sock_set_flag(sk, SOCK_DBG);
-                       else
-                               sock_reset_flag(sk, SOCK_DBG);
-                       break;
-               case SO_REUSEADDR:
-                       sk->sk_reuse = valbool;
-                       break;
-               case SO_TYPE:
-               case SO_ERROR:
-                       ret = -ENOPROTOOPT;
-                       break;
-               case SO_DONTROUTE:
-                       if (valbool)
-                               sock_set_flag(sk, SOCK_LOCALROUTE);
-                       else
-                               sock_reset_flag(sk, SOCK_LOCALROUTE);
-                       break;
-               case SO_BROADCAST:
-                       sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
-                       break;
-               case SO_SNDBUF:
-                       /* Don't error on this BSD doesn't and if you think
-                          about it this is right. Otherwise apps have to
-                          play 'guess the biggest size' games. RCVBUF/SNDBUF
-                          are treated in BSD as hints */
-
-                       if (val > sysctl_wmem_max)
-                               val = sysctl_wmem_max;
+       switch(optname) {
+       case SO_DEBUG:
+               if (val && !capable(CAP_NET_ADMIN)) {
+                       ret = -EACCES;
+               }
+               else if (valbool)
+                       sock_set_flag(sk, SOCK_DBG);
+               else
+                       sock_reset_flag(sk, SOCK_DBG);
+               break;
+       case SO_REUSEADDR:
+               sk->sk_reuse = valbool;
+               break;
+       case SO_TYPE:
+       case SO_ERROR:
+               ret = -ENOPROTOOPT;
+               break;
+       case SO_DONTROUTE:
+               if (valbool)
+                       sock_set_flag(sk, SOCK_LOCALROUTE);
+               else
+                       sock_reset_flag(sk, SOCK_LOCALROUTE);
+               break;
+       case SO_BROADCAST:
+               sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
+               break;
+       case SO_SNDBUF:
+               /* Don't error on this BSD doesn't and if you think
+                  about it this is right. Otherwise apps have to
+                  play 'guess the biggest size' games. RCVBUF/SNDBUF
+                  are treated in BSD as hints */
+
+               if (val > sysctl_wmem_max)
+                       val = sysctl_wmem_max;
 set_sndbuf:
-                       sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
-                       if ((val * 2) < SOCK_MIN_SNDBUF)
-                               sk->sk_sndbuf = SOCK_MIN_SNDBUF;
-                       else
-                               sk->sk_sndbuf = val * 2;
+               sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
+               if ((val * 2) < SOCK_MIN_SNDBUF)
+                       sk->sk_sndbuf = SOCK_MIN_SNDBUF;
+               else
+                       sk->sk_sndbuf = val * 2;
 
-                       /*
-                        *      Wake up sending tasks if we
-                        *      upped the value.
-                        */
-                       sk->sk_write_space(sk);
-                       break;
+               /*
+                *      Wake up sending tasks if we
+                *      upped the value.
+                */
+               sk->sk_write_space(sk);
+               break;
 
-               case SO_SNDBUFFORCE:
-                       if (!capable(CAP_NET_ADMIN)) {
-                               ret = -EPERM;
-                               break;
-                       }
-                       goto set_sndbuf;
+       case SO_SNDBUFFORCE:
+               if (!capable(CAP_NET_ADMIN)) {
+                       ret = -EPERM;
+                       break;
+               }
+               goto set_sndbuf;
 
-               case SO_RCVBUF:
-                       /* Don't error on this BSD doesn't and if you think
-                          about it this is right. Otherwise apps have to
-                          play 'guess the biggest size' games. RCVBUF/SNDBUF
-                          are treated in BSD as hints */
+       case SO_RCVBUF:
+               /* Don't error on this BSD doesn't and if you think
+                  about it this is right. Otherwise apps have to
+                  play 'guess the biggest size' games. RCVBUF/SNDBUF
+                  are treated in BSD as hints */
 
-                       if (val > sysctl_rmem_max)
-                               val = sysctl_rmem_max;
+               if (val > sysctl_rmem_max)
+                       val = sysctl_rmem_max;
 set_rcvbuf:
-                       sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
-                       /*
-                        * We double it on the way in to account for
-                        * "struct sk_buff" etc. overhead.   Applications
-                        * assume that the SO_RCVBUF setting they make will
-                        * allow that much actual data to be received on that
-                        * socket.
-                        *
-                        * Applications are unaware that "struct sk_buff" and
-                        * other overheads allocate from the receive buffer
-                        * during socket buffer allocation.
-                        *
-                        * And after considering the possible alternatives,
-                        * returning the value we actually used in getsockopt
-                        * is the most desirable behavior.
-                        */
-                       if ((val * 2) < SOCK_MIN_RCVBUF)
-                               sk->sk_rcvbuf = SOCK_MIN_RCVBUF;
-                       else
-                               sk->sk_rcvbuf = val * 2;
+               sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
+               /*
+                * We double it on the way in to account for
+                * "struct sk_buff" etc. overhead.   Applications
+                * assume that the SO_RCVBUF setting they make will
+                * allow that much actual data to be received on that
+                * socket.
+                *
+                * Applications are unaware that "struct sk_buff" and
+                * other overheads allocate from the receive buffer
+                * during socket buffer allocation.
+                *
+                * And after considering the possible alternatives,
+                * returning the value we actually used in getsockopt
+                * is the most desirable behavior.
+                */
+               if ((val * 2) < SOCK_MIN_RCVBUF)
+                       sk->sk_rcvbuf = SOCK_MIN_RCVBUF;
+               else
+                       sk->sk_rcvbuf = val * 2;
+               break;
+
+       case SO_RCVBUFFORCE:
+               if (!capable(CAP_NET_ADMIN)) {
+                       ret = -EPERM;
                        break;
+               }
+               goto set_rcvbuf;
 
-               case SO_RCVBUFFORCE:
-                       if (!capable(CAP_NET_ADMIN)) {
-                               ret = -EPERM;
-                               break;
-                       }
-                       goto set_rcvbuf;
-
-               case SO_KEEPALIVE:
+       case SO_KEEPALIVE:
 #ifdef CONFIG_INET
-                       if (sk->sk_protocol == IPPROTO_TCP)
-                               tcp_set_keepalive(sk, valbool);
+               if (sk->sk_protocol == IPPROTO_TCP)
+                       tcp_set_keepalive(sk, valbool);
 #endif
-                       sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
-                       break;
-
-               case SO_OOBINLINE:
-                       sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
+               sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
+               break;
+
+       case SO_OOBINLINE:
+               sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
+               break;
+
+       case SO_NO_CHECK:
+               sk->sk_no_check = valbool;
+               break;
+
+       case SO_PRIORITY:
+               if ((val >= 0 && val <= 6) || capable(CAP_NET_ADMIN))
+                       sk->sk_priority = val;
+               else
+                       ret = -EPERM;
+               break;
+
+       case SO_LINGER:
+               if (optlen < sizeof(ling)) {
+                       ret = -EINVAL;  /* 1003.1g */
                        break;
-
-               case SO_NO_CHECK:
-                       sk->sk_no_check = valbool;
-                       break;
-
-               case SO_PRIORITY:
-                       if ((val >= 0 && val <= 6) || capable(CAP_NET_ADMIN))
-                               sk->sk_priority = val;
-                       else
-                               ret = -EPERM;
+               }
+               if (copy_from_user(&ling,optval,sizeof(ling))) {
+                       ret = -EFAULT;
                        break;
-
-               case SO_LINGER:
-                       if(optlen<sizeof(ling)) {
-                               ret = -EINVAL;  /* 1003.1g */
-                               break;
-                       }
-                       if (copy_from_user(&ling,optval,sizeof(ling))) {
-                               ret = -EFAULT;
-                               break;
-                       }
-                       if (!ling.l_onoff)
-                               sock_reset_flag(sk, SOCK_LINGER);
-                       else {
+               }
+               if (!ling.l_onoff)
+                       sock_reset_flag(sk, SOCK_LINGER);
+               else {
 #if (BITS_PER_LONG == 32)
-                               if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
-                                       sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
-                               else
+                       if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
+                               sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
+                       else
 #endif
-                                       sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
-                               sock_set_flag(sk, SOCK_LINGER);
-                       }
-                       break;
-
-               case SO_BSDCOMPAT:
-                       sock_warn_obsolete_bsdism("setsockopt");
-                       break;
-
-               case SO_PASSCRED:
-                       if (valbool)
-                               set_bit(SOCK_PASSCRED, &sock->flags);
+                               sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
+                       sock_set_flag(sk, SOCK_LINGER);
+               }
+               break;
+
+       case SO_BSDCOMPAT:
+               sock_warn_obsolete_bsdism("setsockopt");
+               break;
+
+       case SO_PASSCRED:
+               if (valbool)
+                       set_bit(SOCK_PASSCRED, &sock->flags);
+               else
+                       clear_bit(SOCK_PASSCRED, &sock->flags);
+               break;
+
+       case SO_TIMESTAMP:
+       case SO_TIMESTAMPNS:
+               if (valbool)  {
+                       if (optname == SO_TIMESTAMP)
+                               sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
                        else
-                               clear_bit(SOCK_PASSCRED, &sock->flags);
-                       break;
+                               sock_set_flag(sk, SOCK_RCVTSTAMPNS);
+                       sock_set_flag(sk, SOCK_RCVTSTAMP);
+                       sock_enable_timestamp(sk);
+               } else {
+                       sock_reset_flag(sk, SOCK_RCVTSTAMP);
+                       sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
+               }
+               break;
 
-               case SO_TIMESTAMP:
-                       if (valbool)  {
-                               sock_set_flag(sk, SOCK_RCVTSTAMP);
-                               sock_enable_timestamp(sk);
-                       } else
-                               sock_reset_flag(sk, SOCK_RCVTSTAMP);
-                       break;
+       case SO_RCVLOWAT:
+               if (val < 0)
+                       val = INT_MAX;
+               sk->sk_rcvlowat = val ? : 1;
+               break;
 
-               case SO_RCVLOWAT:
-                       if (val < 0)
-                               val = INT_MAX;
-                       sk->sk_rcvlowat = val ? : 1;
-                       break;
+       case SO_RCVTIMEO:
+               ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen);
+               break;
 
-               case SO_RCVTIMEO:
-                       ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen);
-                       break;
+       case SO_SNDTIMEO:
+               ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen);
+               break;
 
-               case SO_SNDTIMEO:
-                       ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen);
+#ifdef CONFIG_NETDEVICES
+       case SO_BINDTODEVICE:
+       {
+               char devname[IFNAMSIZ];
+
+               /* Sorry... */
+               if (!capable(CAP_NET_RAW)) {
+                       ret = -EPERM;
                        break;
+               }
 
-#ifdef CONFIG_NETDEVICES
-               case SO_BINDTODEVICE:
-               {
-                       char devname[IFNAMSIZ];
+               /* Bind this socket to a particular device like "eth0",
+                * as specified in the passed interface name. If the
+                * name is "" or the option length is zero the socket
+                * is not bound.
+                */
 
-                       /* Sorry... */
-                       if (!capable(CAP_NET_RAW)) {
-                               ret = -EPERM;
+               if (!valbool) {
+                       sk->sk_bound_dev_if = 0;
+               } else {
+                       if (optlen > IFNAMSIZ - 1)
+                               optlen = IFNAMSIZ - 1;
+                       memset(devname, 0, sizeof(devname));
+                       if (copy_from_user(devname, optval, optlen)) {
+                               ret = -EFAULT;
                                break;
                        }
 
-                       /* Bind this socket to a particular device like "eth0",
-                        * as specified in the passed interface name. If the
-                        * name is "" or the option length is zero the socket
-                        * is not bound.
-                        */
+                       /* Remove any cached route for this socket. */
+                       sk_dst_reset(sk);
 
-                       if (!valbool) {
+                       if (devname[0] == '\0') {
                                sk->sk_bound_dev_if = 0;
                        } else {
-                               if (optlen > IFNAMSIZ - 1)
-                                       optlen = IFNAMSIZ - 1;
-                               memset(devname, 0, sizeof(devname));
-                               if (copy_from_user(devname, optval, optlen)) {
-                                       ret = -EFAULT;
+                               struct net_device *dev = dev_get_by_name(devname);
+                               if (!dev) {
+                                       ret = -ENODEV;
                                        break;
                                }
-
-                               /* Remove any cached route for this socket. */
-                               sk_dst_reset(sk);
-
-                               if (devname[0] == '\0') {
-                                       sk->sk_bound_dev_if = 0;
-                               } else {
-                                       struct net_device *dev = dev_get_by_name(devname);
-                                       if (!dev) {
-                                               ret = -ENODEV;
-                                               break;
-                                       }
-                                       sk->sk_bound_dev_if = dev->ifindex;
-                                       dev_put(dev);
-                               }
+                               sk->sk_bound_dev_if = dev->ifindex;
+                               dev_put(dev);
                        }
-                       break;
                }
+               break;
+       }
 #endif
 
 
-               case SO_ATTACH_FILTER:
-                       ret = -EINVAL;
-                       if (optlen == sizeof(struct sock_fprog)) {
-                               struct sock_fprog fprog;
+       case SO_ATTACH_FILTER:
+               ret = -EINVAL;
+               if (optlen == sizeof(struct sock_fprog)) {
+                       struct sock_fprog fprog;
 
-                               ret = -EFAULT;
-                               if (copy_from_user(&fprog, optval, sizeof(fprog)))
-                                       break;
-
-                               ret = sk_attach_filter(&fprog, sk);
-                       }
-                       break;
-
-               case SO_DETACH_FILTER:
-                       rcu_read_lock_bh();
-                       filter = rcu_dereference(sk->sk_filter);
-                       if (filter) {
-                               rcu_assign_pointer(sk->sk_filter, NULL);
-                               sk_filter_release(sk, filter);
-                               rcu_read_unlock_bh();
+                       ret = -EFAULT;
+                       if (copy_from_user(&fprog, optval, sizeof(fprog)))
                                break;
-                       }
+
+                       ret = sk_attach_filter(&fprog, sk);
+               }
+               break;
+
+       case SO_DETACH_FILTER:
+               rcu_read_lock_bh();
+               filter = rcu_dereference(sk->sk_filter);
+               if (filter) {
+                       rcu_assign_pointer(sk->sk_filter, NULL);
+                       sk_filter_release(sk, filter);
                        rcu_read_unlock_bh();
-                       ret = -ENONET;
                        break;
+               }
+               rcu_read_unlock_bh();
+               ret = -ENONET;
+               break;
 
-               case SO_PASSSEC:
-                       if (valbool)
-                               set_bit(SOCK_PASSSEC, &sock->flags);
-                       else
-                               clear_bit(SOCK_PASSSEC, &sock->flags);
-                       break;
+       case SO_PASSSEC:
+               if (valbool)
+                       set_bit(SOCK_PASSSEC, &sock->flags);
+               else
+                       clear_bit(SOCK_PASSSEC, &sock->flags);
+               break;
 
                /* We implement the SO_SNDLOWAT etc to
                   not be settable (1003.1g 5.3) */
-               default:
-                       ret = -ENOPROTOOPT;
-                       break;
+       default:
+               ret = -ENOPROTOOPT;
+               break;
        }
        release_sock(sk);
        return ret;
@@ -641,8 +648,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
 {
        struct sock *sk = sock->sk;
 
-       union
-       {
+       union {
                int val;
                struct linger ling;
                struct timeval tm;
@@ -651,148 +657,153 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
        unsigned int lv = sizeof(int);
        int len;
 
-       if(get_user(len,optlen))
+       if (get_user(len, optlen))
                return -EFAULT;
-       if(len < 0)
+       if (len < 0)
                return -EINVAL;
 
-       switch(optname)
-       {
-               case SO_DEBUG:
-                       v.val = sock_flag(sk, SOCK_DBG);
-                       break;
-
-               case SO_DONTROUTE:
-                       v.val = sock_flag(sk, SOCK_LOCALROUTE);
-                       break;
-
-               case SO_BROADCAST:
-                       v.val = !!sock_flag(sk, SOCK_BROADCAST);
-                       break;
-
-               case SO_SNDBUF:
-                       v.val = sk->sk_sndbuf;
-                       break;
-
-               case SO_RCVBUF:
-                       v.val = sk->sk_rcvbuf;
-                       break;
-
-               case SO_REUSEADDR:
-                       v.val = sk->sk_reuse;
-                       break;
-
-               case SO_KEEPALIVE:
-                       v.val = !!sock_flag(sk, SOCK_KEEPOPEN);
-                       break;
-
-               case SO_TYPE:
-                       v.val = sk->sk_type;
-                       break;
-
-               case SO_ERROR:
-                       v.val = -sock_error(sk);
-                       if(v.val==0)
-                               v.val = xchg(&sk->sk_err_soft, 0);
-                       break;
-
-               case SO_OOBINLINE:
-                       v.val = !!sock_flag(sk, SOCK_URGINLINE);
-                       break;
-
-               case SO_NO_CHECK:
-                       v.val = sk->sk_no_check;
-                       break;
-
-               case SO_PRIORITY:
-                       v.val = sk->sk_priority;
-                       break;
-
-               case SO_LINGER:
-                       lv              = sizeof(v.ling);
-                       v.ling.l_onoff  = !!sock_flag(sk, SOCK_LINGER);
-                       v.ling.l_linger = sk->sk_lingertime / HZ;
-                       break;
-
-               case SO_BSDCOMPAT:
-                       sock_warn_obsolete_bsdism("getsockopt");
-                       break;
-
-               case SO_TIMESTAMP:
-                       v.val = sock_flag(sk, SOCK_RCVTSTAMP);
-                       break;
+       switch(optname) {
+       case SO_DEBUG:
+               v.val = sock_flag(sk, SOCK_DBG);
+               break;
+
+       case SO_DONTROUTE:
+               v.val = sock_flag(sk, SOCK_LOCALROUTE);
+               break;
+
+       case SO_BROADCAST:
+               v.val = !!sock_flag(sk, SOCK_BROADCAST);
+               break;
+
+       case SO_SNDBUF:
+               v.val = sk->sk_sndbuf;
+               break;
+
+       case SO_RCVBUF:
+               v.val = sk->sk_rcvbuf;
+               break;
+
+       case SO_REUSEADDR:
+               v.val = sk->sk_reuse;
+               break;
+
+       case SO_KEEPALIVE:
+               v.val = !!sock_flag(sk, SOCK_KEEPOPEN);
+               break;
+
+       case SO_TYPE:
+               v.val = sk->sk_type;
+               break;
+
+       case SO_ERROR:
+               v.val = -sock_error(sk);
+               if (v.val==0)
+                       v.val = xchg(&sk->sk_err_soft, 0);
+               break;
+
+       case SO_OOBINLINE:
+               v.val = !!sock_flag(sk, SOCK_URGINLINE);
+               break;
+
+       case SO_NO_CHECK:
+               v.val = sk->sk_no_check;
+               break;
+
+       case SO_PRIORITY:
+               v.val = sk->sk_priority;
+               break;
+
+       case SO_LINGER:
+               lv              = sizeof(v.ling);
+               v.ling.l_onoff  = !!sock_flag(sk, SOCK_LINGER);
+               v.ling.l_linger = sk->sk_lingertime / HZ;
+               break;
+
+       case SO_BSDCOMPAT:
+               sock_warn_obsolete_bsdism("getsockopt");
+               break;
+
+       case SO_TIMESTAMP:
+               v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
+                               !sock_flag(sk, SOCK_RCVTSTAMPNS);
+               break;
+
+       case SO_TIMESTAMPNS:
+               v.val = sock_flag(sk, SOCK_RCVTSTAMPNS);
+               break;
+
+       case SO_RCVTIMEO:
+               lv=sizeof(struct timeval);
+               if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
+                       v.tm.tv_sec = 0;
+                       v.tm.tv_usec = 0;
+               } else {
+                       v.tm.tv_sec = sk->sk_rcvtimeo / HZ;
+                       v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ;
+               }
+               break;
+
+       case SO_SNDTIMEO:
+               lv=sizeof(struct timeval);
+               if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) {
+                       v.tm.tv_sec = 0;
+                       v.tm.tv_usec = 0;
+               } else {
+                       v.tm.tv_sec = sk->sk_sndtimeo / HZ;
+                       v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ;
+               }
+               break;
 
-               case SO_RCVTIMEO:
-                       lv=sizeof(struct timeval);
-                       if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
-                               v.tm.tv_sec = 0;
-                               v.tm.tv_usec = 0;
-                       } else {
-                               v.tm.tv_sec = sk->sk_rcvtimeo / HZ;
-                               v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ;
-                       }
-                       break;
+       case SO_RCVLOWAT:
+               v.val = sk->sk_rcvlowat;
+               break;
 
-               case SO_SNDTIMEO:
-                       lv=sizeof(struct timeval);
-                       if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) {
-                               v.tm.tv_sec = 0;
-                               v.tm.tv_usec = 0;
-                       } else {
-                               v.tm.tv_sec = sk->sk_sndtimeo / HZ;
-                               v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ;
-                       }
-                       break;
+       case SO_SNDLOWAT:
+               v.val=1;
+               break;
 
-               case SO_RCVLOWAT:
-                       v.val = sk->sk_rcvlowat;
-                       break;
+       case SO_PASSCRED:
+               v.val = test_bit(SOCK_PASSCRED, &sock->flags) ? 1 : 0;
+               break;
 
-               case SO_SNDLOWAT:
-                       v.val=1;
-                       break;
+       case SO_PEERCRED:
+               if (len > sizeof(sk->sk_peercred))
+                       len = sizeof(sk->sk_peercred);
+               if (copy_to_user(optval, &sk->sk_peercred, len))
+                       return -EFAULT;
+               goto lenout;
 
-               case SO_PASSCRED:
-                       v.val = test_bit(SOCK_PASSCRED, &sock->flags) ? 1 : 0;
-                       break;
-
-               case SO_PEERCRED:
-                       if (len > sizeof(sk->sk_peercred))
-                               len = sizeof(sk->sk_peercred);
-                       if (copy_to_user(optval, &sk->sk_peercred, len))
-                               return -EFAULT;
-                       goto lenout;
-
-               case SO_PEERNAME:
-               {
-                       char address[128];
-
-                       if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
-                               return -ENOTCONN;
-                       if (lv < len)
-                               return -EINVAL;
-                       if (copy_to_user(optval, address, len))
-                               return -EFAULT;
-                       goto lenout;
-               }
+       case SO_PEERNAME:
+       {
+               char address[128];
+
+               if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
+                       return -ENOTCONN;
+               if (lv < len)
+                       return -EINVAL;
+               if (copy_to_user(optval, address, len))
+                       return -EFAULT;
+               goto lenout;
+       }
 
-               /* Dubious BSD thing... Probably nobody even uses it, but
-                * the UNIX standard wants it for whatever reason... -DaveM
-                */
-               case SO_ACCEPTCONN:
-                       v.val = sk->sk_state == TCP_LISTEN;
-                       break;
+       /* Dubious BSD thing... Probably nobody even uses it, but
+        * the UNIX standard wants it for whatever reason... -DaveM
+        */
+       case SO_ACCEPTCONN:
+               v.val = sk->sk_state == TCP_LISTEN;
+               break;
 
-               case SO_PASSSEC:
-                       v.val = test_bit(SOCK_PASSSEC, &sock->flags) ? 1 : 0;
-                       break;
+       case SO_PASSSEC:
+               v.val = test_bit(SOCK_PASSSEC, &sock->flags) ? 1 : 0;
+               break;
 
-               case SO_PEERSEC:
-                       return security_socket_getpeersec_stream(sock, optval, optlen, len);
+       case SO_PEERSEC:
+               return security_socket_getpeersec_stream(sock, optval, optlen, len);
 
-               default:
-                       return(-ENOPROTOOPT);
+       default:
+               return -ENOPROTOOPT;
        }
+
        if (len > lv)
                len = lv;
        if (copy_to_user(optval, &v, len))
@@ -904,6 +915,7 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
                sk_node_init(&newsk->sk_node);
                sock_lock_init(newsk);
                bh_lock_sock(newsk);
+               newsk->sk_backlog.head  = newsk->sk_backlog.tail = NULL;
 
                atomic_set(&newsk->sk_rmem_alloc, 0);
                atomic_set(&newsk->sk_wmem_alloc, 0);
@@ -923,7 +935,6 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
                newsk->sk_wmem_queued   = 0;
                newsk->sk_forward_alloc = 0;
                newsk->sk_send_head     = NULL;
-               newsk->sk_backlog.head  = newsk->sk_backlog.tail = NULL;
                newsk->sk_userlocks     = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
 
                sock_reset_flag(newsk, SOCK_DONE);
@@ -970,6 +981,21 @@ out:
 
 EXPORT_SYMBOL_GPL(sk_clone);
 
+void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
+{
+       __sk_dst_set(sk, dst);
+       sk->sk_route_caps = dst->dev->features;
+       if (sk->sk_route_caps & NETIF_F_GSO)
+               sk->sk_route_caps |= NETIF_F_GSO_MASK;
+       if (sk_can_gso(sk)) {
+               if (dst->header_len)
+                       sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
+               else
+                       sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
+       }
+}
+EXPORT_SYMBOL_GPL(sk_setup_caps);
+
 void __init sk_init(void)
 {
        if (num_physpages <= 4096) {
@@ -1220,13 +1246,13 @@ static void __lock_sock(struct sock *sk)
 {
        DEFINE_WAIT(wait);
 
-       for(;;) {
+       for (;;) {
                prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
                                        TASK_UNINTERRUPTIBLE);
                spin_unlock_bh(&sk->sk_lock.slock);
                schedule();
                spin_lock_bh(&sk->sk_lock.slock);
-               if(!sock_owned_by_user(sk))
+               if (!sock_owned_by_user(sk))
                        break;
        }
        finish_wait(&sk->sk_lock.wq, &wait);
@@ -1258,7 +1284,7 @@ static void __release_sock(struct sock *sk)
                } while (skb != NULL);
 
                bh_lock_sock(sk);
-       } while((skb = sk->sk_backlog.head) != NULL);
+       } while ((skb = sk->sk_backlog.head) != NULL);
 }
 
 /**
@@ -1420,7 +1446,7 @@ static void sock_def_write_space(struct sock *sk)
        /* Do not wake up a writer until he can make "significant"
         * progress.  --DaveM
         */
-       if((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
+       if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
                if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
                        wake_up_interruptible(sk->sk_sleep);
 
@@ -1482,8 +1508,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
 
        sock_set_flag(sk, SOCK_ZAPPED);
 
-       if(sock)
-       {
+       if (sock) {
                sk->sk_type     =       sock->type;
                sk->sk_sleep    =       &sock->wait;
                sock->sk        =       sk;
@@ -1512,8 +1537,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
        sk->sk_rcvtimeo         =       MAX_SCHEDULE_TIMEOUT;
        sk->sk_sndtimeo         =       MAX_SCHEDULE_TIMEOUT;
 
-       sk->sk_stamp.tv_sec     = -1L;
-       sk->sk_stamp.tv_usec    = -1L;
+       sk->sk_stamp = ktime_set(-1L, -1L);
 
        atomic_set(&sk->sk_refcnt, 1);
 }
@@ -1554,17 +1578,36 @@ EXPORT_SYMBOL(release_sock);
 
 int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
 {
+       struct timeval tv;
        if (!sock_flag(sk, SOCK_TIMESTAMP))
                sock_enable_timestamp(sk);
-       if (sk->sk_stamp.tv_sec == -1)
+       tv = ktime_to_timeval(sk->sk_stamp);
+       if (tv.tv_sec == -1)
                return -ENOENT;
-       if (sk->sk_stamp.tv_sec == 0)
-               do_gettimeofday(&sk->sk_stamp);
-       return copy_to_user(userstamp, &sk->sk_stamp, sizeof(struct timeval)) ?
-               -EFAULT : 0;
+       if (tv.tv_sec == 0) {
+               sk->sk_stamp = ktime_get_real();
+               tv = ktime_to_timeval(sk->sk_stamp);
+       }
+       return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0;
 }
 EXPORT_SYMBOL(sock_get_timestamp);
 
+int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp)
+{
+       struct timespec ts;
+       if (!sock_flag(sk, SOCK_TIMESTAMP))
+               sock_enable_timestamp(sk);
+       ts = ktime_to_timespec(sk->sk_stamp);
+       if (ts.tv_sec == -1)
+               return -ENOENT;
+       if (ts.tv_sec == 0) {
+               sk->sk_stamp = ktime_get_real();
+               ts = ktime_to_timespec(sk->sk_stamp);
+       }
+       return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0;
+}
+EXPORT_SYMBOL(sock_get_timestampns);
+
 void sock_enable_timestamp(struct sock *sk)
 {
        if (!sock_flag(sk, SOCK_TIMESTAMP)) {
@@ -1899,7 +1942,7 @@ static int proto_seq_show(struct seq_file *seq, void *v)
        return 0;
 }
 
-static struct seq_operations proto_seq_ops = {
+static const struct seq_operations proto_seq_ops = {
        .start  = proto_seq_start,
        .next   = proto_seq_next,
        .stop   = proto_seq_stop,
index 1e75b15..b297120 100644 (file)
@@ -136,6 +136,14 @@ ctl_table core_table[] = {
                .mode           = 0644,
                .proc_handler   = &proc_dointvec
        },
+       {
+               .ctl_name       = NET_CORE_WARNINGS,
+               .procname       = "warnings",
+               .data           = &net_msg_warn,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = &proc_dointvec
+       },
        { .ctl_name = 0 }
 };
 
index 0ad1cd5..89241cd 100644 (file)
@@ -49,8 +49,8 @@ int dma_skb_copy_datagram_iovec(struct dma_chan *chan,
                        struct sk_buff *skb, int offset, struct iovec *to,
                        size_t len, struct dma_pinned_list *pinned_list)
 {
-       int start = skb_headlen(skb);
-       int i, copy = start - offset;
+       int end = skb_headlen(skb);
+       int i, copy = end - offset;
        dma_cookie_t cookie = 0;
 
        /* Copy header. */
@@ -69,11 +69,9 @@ int dma_skb_copy_datagram_iovec(struct dma_chan *chan,
 
        /* Copy paged appendix. Hmm... why does this look so complicated? */
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-               int end;
+               BUG_TRAP(len >= 0);
 
-               BUG_TRAP(start <= offset + len);
-
-               end = start + skb_shinfo(skb)->frags[i].size;
+               end = offset + skb_shinfo(skb)->frags[i].size;
                copy = end - offset;
                if ((copy = end - offset) > 0) {
                        skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
@@ -82,8 +80,8 @@ int dma_skb_copy_datagram_iovec(struct dma_chan *chan,
                        if (copy > len)
                                copy = len;
 
-                       cookie = dma_memcpy_pg_to_iovec(chan, to, pinned_list, page,
-                                       frag->page_offset + offset - start, copy);
+                       cookie = dma_memcpy_pg_to_iovec(chan, to, pinned_list,
+                                       page, frag->page_offset, copy);
                        if (cookie < 0)
                                goto fault;
                        len -= copy;
@@ -91,25 +89,21 @@ int dma_skb_copy_datagram_iovec(struct dma_chan *chan,
                                goto end;
                        offset += copy;
                }
-               start = end;
        }
 
        if (skb_shinfo(skb)->frag_list) {
                struct sk_buff *list = skb_shinfo(skb)->frag_list;
 
                for (; list; list = list->next) {
-                       int end;
-
-                       BUG_TRAP(start <= offset + len);
+                       BUG_TRAP(len >= 0);
 
-                       end = start + list->len;
+                       end = offset + list->len;
                        copy = end - offset;
                        if (copy > 0) {
                                if (copy > len)
                                        copy = len;
                                cookie = dma_skb_copy_datagram_iovec(chan, list,
-                                               offset - start, to, copy,
-                                               pinned_list);
+                                               0, to, copy, pinned_list);
                                if (cookie < 0)
                                        goto fault;
                                len -= copy;
@@ -117,7 +111,6 @@ int dma_skb_copy_datagram_iovec(struct dma_chan *chan,
                                        goto end;
                                offset += copy;
                        }
-                       start = end;
                }
        }
 
index 07236c1..adecfd2 100644 (file)
 #include <asm/system.h>
 #include <asm/uaccess.h>
 
-int net_msg_cost = 5*HZ;
-int net_msg_burst = 10;
+int net_msg_cost __read_mostly = 5*HZ;
+int net_msg_burst __read_mostly = 10;
+int net_msg_warn __read_mostly = 1;
+EXPORT_SYMBOL(net_msg_warn);
 
 /*
  * All net warning printk()s should be guarded by this function.
index a086c63..01030f3 100644 (file)
@@ -157,7 +157,7 @@ struct dccp_ackvec *dccp_ackvec_alloc(const gfp_t priority)
 
        if (av != NULL) {
                av->dccpav_buf_head     = DCCP_MAX_ACKVEC_LEN - 1;
-               av->dccpav_buf_ackno    = DCCP_MAX_SEQNO + 1;
+               av->dccpav_buf_ackno    = UINT48_MAX + 1;
                av->dccpav_buf_nonce = av->dccpav_buf_nonce = 0;
                av->dccpav_time.tv_sec  = 0;
                av->dccpav_time.tv_usec = 0;
index 746f79d..d7d9ce7 100644 (file)
@@ -33,7 +33,6 @@
  *  along with this program; if not, write to the Free Software
  *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
-
 #include "../ccid.h"
 #include "../dccp.h"
 #include "lib/packet_history.h"
@@ -52,6 +51,9 @@ static struct dccp_tx_hist *ccid3_tx_hist;
 static struct dccp_rx_hist *ccid3_rx_hist;
 static struct dccp_li_hist *ccid3_li_hist;
 
+/*
+ *     Transmitter Half-Connection Routines
+ */
 #ifdef CONFIG_IP_DCCP_CCID3_DEBUG
 static const char *ccid3_tx_state_name(enum ccid3_hc_tx_states state)
 {
@@ -80,23 +82,37 @@ static void ccid3_hc_tx_set_state(struct sock *sk,
 }
 
 /*
- * Recalculate scheduled nominal send time t_nom, inter-packet interval
- * t_ipi, and delta value. Should be called after each change to X.
+ * Compute the initial sending rate X_init according to RFC 3390:
+ *     w_init   =    min(4 * MSS, max(2 * MSS, 4380 bytes))
+ *     X_init   =    w_init / RTT
+ * For consistency with other parts of the code, X_init is scaled by 2^6.
  */
-static inline void ccid3_update_send_time(struct ccid3_hc_tx_sock *hctx)
+static inline u64 rfc3390_initial_rate(struct sock *sk)
 {
-       timeval_sub_usecs(&hctx->ccid3hctx_t_nom, hctx->ccid3hctx_t_ipi);
+       const struct dccp_sock *dp = dccp_sk(sk);
+       const __u32 w_init = min(4 * dp->dccps_mss_cache,
+                                max(2 * dp->dccps_mss_cache, 4380U));
 
-       /* Calculate new t_ipi = s / X_inst (X_inst is in 64 * bytes/second) */
-       hctx->ccid3hctx_t_ipi = scaled_div(hctx->ccid3hctx_s,
-                                          hctx->ccid3hctx_x >> 6);
+       return scaled_div(w_init << 6, ccid3_hc_tx_sk(sk)->ccid3hctx_rtt);
+}
 
-       /* Update nominal send time with regard to the new t_ipi */
-       timeval_add_usecs(&hctx->ccid3hctx_t_nom, hctx->ccid3hctx_t_ipi);
+/*
+ * Recalculate t_ipi and delta (should be called whenever X changes)
+ */
+static inline void ccid3_update_send_interval(struct ccid3_hc_tx_sock *hctx)
+{
+       /* Calculate new t_ipi = s / X_inst (X_inst is in 64 * bytes/second) */
+       hctx->ccid3hctx_t_ipi = scaled_div32(((u64)hctx->ccid3hctx_s) << 6,
+                                            hctx->ccid3hctx_x);
 
        /* Calculate new delta by delta = min(t_ipi / 2, t_gran / 2) */
        hctx->ccid3hctx_delta = min_t(u32, hctx->ccid3hctx_t_ipi / 2,
                                           TFRC_OPSYS_HALF_TIME_GRAN);
+
+       ccid3_pr_debug("t_ipi=%u, delta=%u, s=%u, X=%u\n",
+                      hctx->ccid3hctx_t_ipi, hctx->ccid3hctx_delta,
+                      hctx->ccid3hctx_s, (unsigned)(hctx->ccid3hctx_x >> 6));
+
 }
 /*
  * Update X by
@@ -112,19 +128,28 @@ static inline void ccid3_update_send_time(struct ccid3_hc_tx_sock *hctx)
  *       fine-grained resolution of sending rates. This requires scaling by 2^6
  *       throughout the code. Only X_calc is unscaled (in bytes/second).
  *
- * If X has changed, we also update the scheduled send time t_now,
- * the inter-packet interval t_ipi, and the delta value.
  */
 static void ccid3_hc_tx_update_x(struct sock *sk, struct timeval *now)
 
 {
        struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
+       __u64 min_rate = 2 * hctx->ccid3hctx_x_recv;
        const  __u64 old_x = hctx->ccid3hctx_x;
 
+       /*
+        * Handle IDLE periods: do not reduce below RFC3390 initial sending rate
+        * when idling [RFC 4342, 5.1]. See also draft-ietf-dccp-rfc3448bis.
+        * For consistency with X and X_recv, min_rate is also scaled by 2^6.
+        */
+       if (unlikely(hctx->ccid3hctx_idle)) {
+               min_rate = rfc3390_initial_rate(sk);
+               min_rate = max(min_rate, 2 * hctx->ccid3hctx_x_recv);
+       }
+
        if (hctx->ccid3hctx_p > 0) {
 
                hctx->ccid3hctx_x = min(((__u64)hctx->ccid3hctx_x_calc) << 6,
-                                       hctx->ccid3hctx_x_recv * 2);
+                                       min_rate);
                hctx->ccid3hctx_x = max(hctx->ccid3hctx_x,
                                        (((__u64)hctx->ccid3hctx_s) << 6) /
                                                                TFRC_T_MBI);
@@ -133,14 +158,21 @@ static void ccid3_hc_tx_update_x(struct sock *sk, struct timeval *now)
                        (suseconds_t)hctx->ccid3hctx_rtt >= 0) {
 
                hctx->ccid3hctx_x =
-                       max(2 * min(hctx->ccid3hctx_x, hctx->ccid3hctx_x_recv),
+                       max(min(2 * hctx->ccid3hctx_x, min_rate),
                            scaled_div(((__u64)hctx->ccid3hctx_s) << 6,
                                       hctx->ccid3hctx_rtt));
                hctx->ccid3hctx_t_ld = *now;
        }
 
-       if (hctx->ccid3hctx_x != old_x)
-               ccid3_update_send_time(hctx);
+       if (hctx->ccid3hctx_x != old_x) {
+               ccid3_pr_debug("X_prev=%u, X_now=%u, X_calc=%u, "
+                              "X_recv=%u\n", (unsigned)(old_x >> 6),
+                              (unsigned)(hctx->ccid3hctx_x >> 6),
+                              hctx->ccid3hctx_x_calc,
+                              (unsigned)(hctx->ccid3hctx_x_recv >> 6));
+
+               ccid3_update_send_interval(hctx);
+       }
 }
 
 /*
@@ -149,17 +181,12 @@ static void ccid3_hc_tx_update_x(struct sock *sk, struct timeval *now)
  */
 static inline void ccid3_hc_tx_update_s(struct ccid3_hc_tx_sock *hctx, int len)
 {
-       if (unlikely(len == 0))
-               ccid3_pr_debug("Packet payload length is 0 - not updating\n");
-       else
-               hctx->ccid3hctx_s = hctx->ccid3hctx_s == 0 ? len :
-                                   (9 * hctx->ccid3hctx_s + len) / 10;
-       /*
-        * Note: We could do a potential optimisation here - when `s' changes,
-        *       recalculate sending rate and consequently t_ipi, t_delta, and
-        *       t_now. This is however non-standard, and the benefits are not
-        *       clear, so it is currently left out.
-        */
+       const u16 old_s = hctx->ccid3hctx_s;
+
+       hctx->ccid3hctx_s = old_s == 0 ? len : (9 * old_s + len) / 10;
+
+       if (hctx->ccid3hctx_s != old_s)
+               ccid3_update_send_interval(hctx);
 }
 
 /*
@@ -193,6 +220,7 @@ static void ccid3_hc_tx_no_feedback_timer(unsigned long data)
 {
        struct sock *sk = (struct sock *)data;
        struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
+       struct timeval now;
        unsigned long t_nfb = USEC_PER_SEC / 5;
 
        bh_lock_sock(sk);
@@ -205,6 +233,8 @@ static void ccid3_hc_tx_no_feedback_timer(unsigned long data)
        ccid3_pr_debug("%s(%p, state=%s) - entry \n", dccp_role(sk), sk,
                       ccid3_tx_state_name(hctx->ccid3hctx_state));
 
+       hctx->ccid3hctx_idle = 1;
+
        switch (hctx->ccid3hctx_state) {
        case TFRC_SSTATE_NO_FBACK:
                /* RFC 3448, 4.4: Halve send rate directly */
@@ -219,53 +249,37 @@ static void ccid3_hc_tx_no_feedback_timer(unsigned long data)
                /* The value of R is still undefined and so we can not recompute
                 * the timout value. Keep initial value as per [RFC 4342, 5]. */
                t_nfb = TFRC_INITIAL_TIMEOUT;
-               ccid3_update_send_time(hctx);
+               ccid3_update_send_interval(hctx);
                break;
        case TFRC_SSTATE_FBACK:
                /*
-                * Check if IDLE since last timeout and recv rate is less than
-                * 4 packets (in units of 64*bytes/sec) per RTT
+                *  Modify the cached value of X_recv [RFC 3448, 4.4]
+                *
+                *  If (p == 0 || X_calc > 2 * X_recv)
+                *    X_recv = max(X_recv / 2, s / (2 * t_mbi));
+                *  Else
+                *    X_recv = X_calc / 4;
+                *
+                *  Note that X_recv is scaled by 2^6 while X_calc is not
                 */
-               if (!hctx->ccid3hctx_idle ||
-                   (hctx->ccid3hctx_x_recv >= 4 *
-                    scaled_div(((__u64)hctx->ccid3hctx_s) << 6,
-                               hctx->ccid3hctx_rtt))) {
-                       struct timeval now;
+               BUG_ON(hctx->ccid3hctx_p && !hctx->ccid3hctx_x_calc);
 
-                       ccid3_pr_debug("%s(%p, state=%s), not idle\n",
-                                      dccp_role(sk), sk,
-                                  ccid3_tx_state_name(hctx->ccid3hctx_state));
+               if (hctx->ccid3hctx_p == 0 ||
+                   (hctx->ccid3hctx_x_calc > (hctx->ccid3hctx_x_recv >> 5))) {
 
-                       /*
-                        *  Modify the cached value of X_recv [RFC 3448, 4.4]
-                        *
-                        *  If (p == 0 || X_calc > 2 * X_recv)
-                        *    X_recv = max(X_recv / 2, s / (2 * t_mbi));
-                        *  Else
-                        *    X_recv = X_calc / 4;
-                        *
-                        *  Note that X_recv is scaled by 2^6 while X_calc is not
-                        */
-                       BUG_ON(hctx->ccid3hctx_p && !hctx->ccid3hctx_x_calc);
-
-                       if (hctx->ccid3hctx_p  == 0 ||
-                           (hctx->ccid3hctx_x_calc >
-                            (hctx->ccid3hctx_x_recv >> 5))) {
-
-                               hctx->ccid3hctx_x_recv =
-                                       max(hctx->ccid3hctx_x_recv / 2,
-                                           (((__u64)hctx->ccid3hctx_s) << 6) /
-                                                         (2 * TFRC_T_MBI));
-
-                               if (hctx->ccid3hctx_p == 0)
-                                       dccp_timestamp(sk, &now);
-                       } else {
-                               hctx->ccid3hctx_x_recv = hctx->ccid3hctx_x_calc;
-                               hctx->ccid3hctx_x_recv <<= 4;
-                       }
-                       /* Now recalculate X [RFC 3448, 4.3, step (4)] */
-                       ccid3_hc_tx_update_x(sk, &now);
+                       hctx->ccid3hctx_x_recv =
+                               max(hctx->ccid3hctx_x_recv / 2,
+                                   (((__u64)hctx->ccid3hctx_s) << 6) /
+                                                             (2 * TFRC_T_MBI));
+
+                       if (hctx->ccid3hctx_p == 0)
+                               dccp_timestamp(sk, &now);
+               } else {
+                       hctx->ccid3hctx_x_recv = hctx->ccid3hctx_x_calc;
+                       hctx->ccid3hctx_x_recv <<= 4;
                }
+               /* Now recalculate X [RFC 3448, 4.3, step (4)] */
+               ccid3_hc_tx_update_x(sk, &now);
                /*
                 * Schedule no feedback timer to expire in
                 * max(t_RTO, 2 * s/X)  =  max(t_RTO, 2 * t_ipi)
@@ -280,8 +294,6 @@ static void ccid3_hc_tx_no_feedback_timer(unsigned long data)
                goto out;
        }
 
-       hctx->ccid3hctx_idle = 1;
-
 restart_timer:
        sk_reset_timer(sk, &hctx->ccid3hctx_no_feedback_timer,
                           jiffies + usecs_to_jiffies(t_nfb));
@@ -322,24 +334,35 @@ static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
                                usecs_to_jiffies(TFRC_INITIAL_TIMEOUT)));
                hctx->ccid3hctx_last_win_count   = 0;
                hctx->ccid3hctx_t_last_win_count = now;
-               ccid3_hc_tx_set_state(sk, TFRC_SSTATE_NO_FBACK);
-
-               /* Set initial sending rate X/s to 1pps (X is scaled by 2^6) */
-               ccid3_hc_tx_update_s(hctx, skb->len);
-               hctx->ccid3hctx_x = hctx->ccid3hctx_s;
-               hctx->ccid3hctx_x <<= 6;
-
-               /* First timeout, according to [RFC 3448, 4.2], is 1 second */
-               hctx->ccid3hctx_t_ipi = USEC_PER_SEC;
-               /* Initial delta: minimum of 0.5 sec and t_gran/2 */
-               hctx->ccid3hctx_delta = TFRC_OPSYS_HALF_TIME_GRAN;
 
                /* Set t_0 for initial packet */
                hctx->ccid3hctx_t_nom = now;
+
+               hctx->ccid3hctx_s = skb->len;
+
+               /*
+                * Use initial RTT sample when available: recommended by erratum
+                * to RFC 4342. This implements the initialisation procedure of
+                * draft rfc3448bis, section 4.2. Remember, X is scaled by 2^6.
+                */
+               if (dp->dccps_syn_rtt) {
+                       ccid3_pr_debug("SYN RTT = %uus\n", dp->dccps_syn_rtt);
+                       hctx->ccid3hctx_rtt  = dp->dccps_syn_rtt;
+                       hctx->ccid3hctx_x    = rfc3390_initial_rate(sk);
+                       hctx->ccid3hctx_t_ld = now;
+               } else {
+                       /* Sender does not have RTT sample: X = MSS/second */
+                       hctx->ccid3hctx_x = dp->dccps_mss_cache;
+                       hctx->ccid3hctx_x <<= 6;
+               }
+               ccid3_update_send_interval(hctx);
+
+               ccid3_hc_tx_set_state(sk, TFRC_SSTATE_NO_FBACK);
                break;
        case TFRC_SSTATE_NO_FBACK:
        case TFRC_SSTATE_FBACK:
                delay = timeval_delta(&hctx->ccid3hctx_t_nom, &now);
+               ccid3_pr_debug("delay=%ld\n", (long)delay);
                /*
                 *      Scheduling of packet transmissions [RFC 3448, 4.6]
                 *
@@ -361,6 +384,7 @@ static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
        /* prepare to send now (add options etc.) */
        dp->dccps_hc_tx_insert_options = 1;
        DCCP_SKB_CB(skb)->dccpd_ccval = hctx->ccid3hctx_last_win_count;
+       hctx->ccid3hctx_idle = 0;
 
        /* set the nominal send time for the next following packet */
        timeval_add_usecs(&hctx->ccid3hctx_t_nom, hctx->ccid3hctx_t_ipi);
@@ -391,7 +415,6 @@ static void ccid3_hc_tx_packet_sent(struct sock *sk, int more,
        packet->dccphtx_seqno  = dccp_sk(sk)->dccps_gss;
        packet->dccphtx_rtt    = hctx->ccid3hctx_rtt;
        packet->dccphtx_sent   = 1;
-       hctx->ccid3hctx_idle   = 0;
 }
 
 static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
@@ -402,8 +425,7 @@ static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
        struct dccp_tx_hist_entry *packet;
        struct timeval now;
        unsigned long t_nfb;
-       u32 pinv;
-       suseconds_t r_sample, t_elapsed;
+       u32 pinv, r_sample;
 
        BUG_ON(hctx == NULL);
 
@@ -445,18 +467,10 @@ static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
                 * Calculate new round trip sample as per [RFC 3448, 4.3] by
                 *      R_sample  =  (now - t_recvdata) - t_elapsed
                 */
-               r_sample  = timeval_delta(&now, &packet->dccphtx_tstamp);
-               t_elapsed = dp->dccps_options_received.dccpor_elapsed_time * 10;
-
-               DCCP_BUG_ON(r_sample < 0);
-               if (unlikely(r_sample <= t_elapsed))
-                       DCCP_WARN("WARNING: r_sample=%dus <= t_elapsed=%dus\n",
-                                 (int)r_sample, (int)t_elapsed);
-               else
-                       r_sample -= t_elapsed;
-               CCID3_RTT_SANITY_CHECK(r_sample);
+               r_sample = dccp_sample_rtt(sk, &now, &packet->dccphtx_tstamp);
 
-               /* Update RTT estimate by
+               /*
+                * Update RTT estimate by
                 * If (No feedback recv)
                 *    R = R_sample;
                 * Else
@@ -467,27 +481,23 @@ static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
                if (hctx->ccid3hctx_state == TFRC_SSTATE_NO_FBACK) {
                        /*
                         * Larger Initial Windows [RFC 4342, sec. 5]
-                        * We deviate in that we use `s' instead of `MSS'.
                         */
-                       __u64 w_init = min(4 * hctx->ccid3hctx_s,
-                                          max(2 * hctx->ccid3hctx_s, 4380));
                        hctx->ccid3hctx_rtt  = r_sample;
-                       hctx->ccid3hctx_x    = scaled_div(w_init << 6, r_sample);
+                       hctx->ccid3hctx_x    = rfc3390_initial_rate(sk);
                        hctx->ccid3hctx_t_ld = now;
 
-                       ccid3_update_send_time(hctx);
+                       ccid3_update_send_interval(hctx);
 
-                       ccid3_pr_debug("%s(%p), s=%u, w_init=%llu, "
-                                      "R_sample=%dus, X=%u\n", dccp_role(sk),
+                       ccid3_pr_debug("%s(%p), s=%u, MSS=%u, "
+                                      "R_sample=%uus, X=%u\n", dccp_role(sk),
                                       sk, hctx->ccid3hctx_s,
-                                      (unsigned long long)w_init,
-                                      (int)r_sample,
+                                      dp->dccps_mss_cache, r_sample,
                                       (unsigned)(hctx->ccid3hctx_x >> 6));
 
                        ccid3_hc_tx_set_state(sk, TFRC_SSTATE_FBACK);
                } else {
                        hctx->ccid3hctx_rtt = (9 * hctx->ccid3hctx_rtt +
-                                                  (u32)r_sample) / 10;
+                                                  r_sample) / 10;
 
                        /* Update sending rate (step 4 of [RFC 3448, 4.3]) */
                        if (hctx->ccid3hctx_p > 0)
@@ -497,10 +507,10 @@ static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
                                                    hctx->ccid3hctx_p);
                        ccid3_hc_tx_update_x(sk, &now);
 
-                       ccid3_pr_debug("%s(%p), RTT=%uus (sample=%dus), s=%u, "
+                       ccid3_pr_debug("%s(%p), RTT=%uus (sample=%uus), s=%u, "
                                       "p=%u, X_calc=%u, X_recv=%u, X=%u\n",
                                       dccp_role(sk),
-                                      sk, hctx->ccid3hctx_rtt, (int)r_sample,
+                                      sk, hctx->ccid3hctx_rtt, r_sample,
                                       hctx->ccid3hctx_s, hctx->ccid3hctx_p,
                                       hctx->ccid3hctx_x_calc,
                                       (unsigned)(hctx->ccid3hctx_x_recv >> 6),
@@ -644,10 +654,50 @@ static void ccid3_hc_tx_exit(struct sock *sk)
        dccp_tx_hist_purge(ccid3_tx_hist, &hctx->ccid3hctx_hist);
 }
 
+static void ccid3_hc_tx_get_info(struct sock *sk, struct tcp_info *info)
+{
+       const struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
+
+       /* Listen socks doesn't have a private CCID block */
+       if (sk->sk_state == DCCP_LISTEN)
+               return;
+
+       BUG_ON(hctx == NULL);
+
+       info->tcpi_rto = hctx->ccid3hctx_t_rto;
+       info->tcpi_rtt = hctx->ccid3hctx_rtt;
+}
+
+static int ccid3_hc_tx_getsockopt(struct sock *sk, const int optname, int len,
+                                 u32 __user *optval, int __user *optlen)
+{
+       const struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
+       const void *val;
+
+       /* Listen socks doesn't have a private CCID block */
+       if (sk->sk_state == DCCP_LISTEN)
+               return -EINVAL;
+
+       switch (optname) {
+       case DCCP_SOCKOPT_CCID_TX_INFO:
+               if (len < sizeof(hctx->ccid3hctx_tfrc))
+                       return -EINVAL;
+               len = sizeof(hctx->ccid3hctx_tfrc);
+               val = &hctx->ccid3hctx_tfrc;
+               break;
+       default:
+               return -ENOPROTOOPT;
+       }
+
+       if (put_user(len, optlen) || copy_to_user(optval, val, len))
+               return -EFAULT;
+
+       return 0;
+}
+
 /*
- * RX Half Connection methods
+ *     Receiver Half-Connection Routines
  */
-
 #ifdef CONFIG_IP_DCCP_CCID3_DEBUG
 static const char *ccid3_rx_state_name(enum ccid3_hc_rx_states state)
 {
@@ -977,8 +1027,7 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
        const struct dccp_options_received *opt_recv;
        struct dccp_rx_hist_entry *packet;
        struct timeval now;
-       u32 p_prev, rtt_prev;
-       suseconds_t r_sample, t_elapsed;
+       u32 p_prev, r_sample, rtt_prev;
        int loss, payload_size;
 
        BUG_ON(hcrx == NULL);
@@ -994,17 +1043,7 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
                        break;
                rtt_prev = hcrx->ccid3hcrx_rtt;
                dccp_timestamp(sk, &now);
-               timeval_sub_usecs(&now, opt_recv->dccpor_timestamp_echo * 10);
-               r_sample = timeval_usecs(&now);
-               t_elapsed = opt_recv->dccpor_elapsed_time * 10;
-
-               DCCP_BUG_ON(r_sample < 0);
-               if (unlikely(r_sample <= t_elapsed))
-                       DCCP_WARN("r_sample=%ldus, t_elapsed=%ldus\n",
-                                 (long)r_sample, (long)t_elapsed);
-               else
-                       r_sample -= t_elapsed;
-               CCID3_RTT_SANITY_CHECK(r_sample);
+               r_sample = dccp_sample_rtt(sk, &now, NULL);
 
                if (hcrx->ccid3hcrx_state == TFRC_RSTATE_NO_DATA)
                        hcrx->ccid3hcrx_rtt = r_sample;
@@ -1132,20 +1171,6 @@ static void ccid3_hc_rx_get_info(struct sock *sk, struct tcp_info *info)
        info->tcpi_rcv_rtt  = hcrx->ccid3hcrx_rtt;
 }
 
-static void ccid3_hc_tx_get_info(struct sock *sk, struct tcp_info *info)
-{
-       const struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
-
-       /* Listen socks doesn't have a private CCID block */
-       if (sk->sk_state == DCCP_LISTEN)
-               return;
-
-       BUG_ON(hctx == NULL);
-
-       info->tcpi_rto = hctx->ccid3hctx_t_rto;
-       info->tcpi_rtt = hctx->ccid3hctx_rtt;
-}
-
 static int ccid3_hc_rx_getsockopt(struct sock *sk, const int optname, int len,
                                  u32 __user *optval, int __user *optlen)
 {
@@ -1173,33 +1198,6 @@ static int ccid3_hc_rx_getsockopt(struct sock *sk, const int optname, int len,
        return 0;
 }
 
-static int ccid3_hc_tx_getsockopt(struct sock *sk, const int optname, int len,
-                                 u32 __user *optval, int __user *optlen)
-{
-       const struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
-       const void *val;
-
-       /* Listen socks doesn't have a private CCID block */
-       if (sk->sk_state == DCCP_LISTEN)
-               return -EINVAL;
-
-       switch (optname) {
-       case DCCP_SOCKOPT_CCID_TX_INFO:
-               if (len < sizeof(hctx->ccid3hctx_tfrc))
-                       return -EINVAL;
-               len = sizeof(hctx->ccid3hctx_tfrc);
-               val = &hctx->ccid3hctx_tfrc;
-               break;
-       default:
-               return -ENOPROTOOPT;
-       }
-
-       if (put_user(len, optlen) || copy_to_user(optval, val, len))
-               return -EFAULT;
-
-       return 0;
-}
-
 static struct ccid_operations ccid3 = {
        .ccid_id                   = DCCPC_CCID3,
        .ccid_name                 = "ccid3",
index 15776a8..8d31b38 100644 (file)
 /* Parameter t_mbi from [RFC 3448, 4.3]: backoff interval in seconds */
 #define TFRC_T_MBI                64
 
-/* What we think is a reasonable upper limit on RTT values */
-#define CCID3_SANE_RTT_MAX        ((suseconds_t)(4 * USEC_PER_SEC))
-
-#define CCID3_RTT_SANITY_CHECK(rtt)                    do {               \
-               if (rtt > CCID3_SANE_RTT_MAX) {                            \
-                       DCCP_CRIT("RTT (%d) too large, substituting %d",   \
-                                 (int)rtt, (int)CCID3_SANE_RTT_MAX);      \
-                       rtt = CCID3_SANE_RTT_MAX;                          \
-               }                                       } while (0)
-
 enum ccid3_options {
        TFRC_OPT_LOSS_EVENT_RATE = 192,
        TFRC_OPT_LOSS_INTERVALS  = 193,
index 0a0baef..372d7e7 100644 (file)
@@ -91,7 +91,7 @@ u32 dccp_li_hist_calc_i_mean(struct list_head *list)
        u32 w_tot  = 0;
 
        list_for_each_entry_safe(li_entry, li_next, list, dccplih_node) {
-               if (li_entry->dccplih_interval != ~0) {
+               if (li_entry->dccplih_interval != ~0U) {
                        i_tot0 += li_entry->dccplih_interval * dccp_li_hist_w[i];
                        w_tot  += dccp_li_hist_w[i];
                        if (i != 0)
index e33a9ed..d8ad27b 100644 (file)
                                              __stringify(cond));          \
                             } while (0)
 
-#ifdef MODULE
 #define DCCP_PRINTK(enable, fmt, args...)      do { if (enable)             \
                                                        printk(fmt, ##args); \
                                                } while(0)
-#else
-#define DCCP_PRINTK(enable, fmt, args...)      printk(fmt, ##args)
-#endif
 #define DCCP_PR_DEBUG(enable, fmt, a...)       DCCP_PRINTK(enable, KERN_DEBUG \
                                                  "%s: " fmt, __FUNCTION__, ##a)
 
@@ -75,11 +71,15 @@ extern void dccp_time_wait(struct sock *sk, int state, int timeo);
 /* RFC 1122, 4.2.3.1 initial RTO value */
 #define DCCP_TIMEOUT_INIT ((unsigned)(3 * HZ))
 
+#define DCCP_RTO_MAX ((unsigned)(120 * HZ)) /* FIXME: using TCP value */
+
+/* bounds for sampled RTT values from packet exchanges (in usec) */
+#define DCCP_SANE_RTT_MIN      100
+#define DCCP_SANE_RTT_MAX      (4 * USEC_PER_SEC)
+
 /* Maximal interval between probes for local resources.  */
 #define DCCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ / 2U))
 
-#define DCCP_RTO_MAX ((unsigned)(120 * HZ)) /* FIXME: using TCP value */
-
 /* sysctl variables for DCCP */
 extern int  sysctl_dccp_request_retries;
 extern int  sysctl_dccp_retries1;
@@ -92,17 +92,43 @@ extern int  sysctl_dccp_feat_send_ack_vector;
 extern int  sysctl_dccp_feat_send_ndp_count;
 extern int  sysctl_dccp_tx_qlen;
 
+/*
+ *     48-bit sequence number arithmetic (signed and unsigned)
+ */
+#define INT48_MIN        0x800000000000LL              /* 2^47     */
+#define UINT48_MAX       0xFFFFFFFFFFFFLL              /* 2^48 - 1 */
+#define COMPLEMENT48(x)         (0x1000000000000LL - (x))      /* 2^48 - x */
+#define TO_SIGNED48(x)  (((x) < INT48_MIN)? (x) : -COMPLEMENT48( (x)))
+#define TO_UNSIGNED48(x) (((x) >= 0)?       (x) :  COMPLEMENT48(-(x)))
+#define ADD48(a, b)     (((a) + (b)) & UINT48_MAX)
+#define SUB48(a, b)     ADD48((a), COMPLEMENT48(b))
+
+static inline void dccp_set_seqno(u64 *seqno, u64 value)
+{
+       *seqno = value & UINT48_MAX;
+}
+
+static inline void dccp_inc_seqno(u64 *seqno)
+{
+       *seqno = ADD48(*seqno, 1);
+}
+
+/* signed mod-2^48 distance: pos. if seqno1 < seqno2, neg. if seqno1 > seqno2 */
+static inline s64 dccp_delta_seqno(const u64 seqno1, const u64 seqno2)
+{
+       u64 delta = SUB48(seqno2, seqno1);
+
+       return TO_SIGNED48(delta);
+}
+
 /* is seq1 < seq2 ? */
 static inline int before48(const u64 seq1, const u64 seq2)
 {
-       return (s64)((seq1 << 16) - (seq2 << 16)) < 0;
+       return (s64)((seq2 << 16) - (seq1 << 16)) > 0;
 }
 
 /* is seq1 > seq2 ? */
-static inline int after48(const u64 seq1, const u64 seq2)
-{
-       return (s64)((seq2 << 16) - (seq1 << 16)) < 0;
-}
+#define after48(seq1, seq2)    before48(seq2, seq1)
 
 /* is seq2 <= seq1 <= seq3 ? */
 static inline int between48(const u64 seq1, const u64 seq2, const u64 seq3)
@@ -118,9 +144,7 @@ static inline u64 max48(const u64 seq1, const u64 seq2)
 /* is seq1 next seqno after seq2 */
 static inline int follows48(const u64 seq1, const u64 seq2)
 {
-       int diff = (seq1 & 0xFFFF) - (seq2 & 0xFFFF);
-
-       return diff==1;
+       return dccp_delta_seqno(seq2, seq1) == 1;
 }
 
 enum {
@@ -272,6 +296,8 @@ extern int     dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr,
 extern int        dccp_send_reset(struct sock *sk, enum dccp_reset_codes code);
 extern void       dccp_send_close(struct sock *sk, const int active);
 extern int        dccp_invalid_packet(struct sk_buff *skb);
+extern u32        dccp_sample_rtt(struct sock *sk, struct timeval *t_recv,
+                                                   struct timeval *t_history);
 
 static inline int dccp_bad_service_code(const struct sock *sk,
                                        const __be32 service)
@@ -313,26 +339,7 @@ static inline int dccp_packet_without_ack(const struct sk_buff *skb)
        return type == DCCP_PKT_DATA || type == DCCP_PKT_REQUEST;
 }
 
-#define DCCP_MAX_SEQNO ((((u64)1) << 48) - 1)
-#define DCCP_PKT_WITHOUT_ACK_SEQ (DCCP_MAX_SEQNO << 2)
-
-static inline void dccp_set_seqno(u64 *seqno, u64 value)
-{
-       if (value > DCCP_MAX_SEQNO)
-               value -= DCCP_MAX_SEQNO + 1;
-       *seqno = value;
-}
-
-static inline u64 dccp_delta_seqno(u64 seqno1, u64 seqno2)
-{
-       return ((seqno2 << 16) - (seqno1 << 16)) >> 16;
-}
-
-static inline void dccp_inc_seqno(u64 *seqno)
-{
-       if (++*seqno > DCCP_MAX_SEQNO)
-               *seqno = 0;
-}
+#define DCCP_PKT_WITHOUT_ACK_SEQ (UINT48_MAX << 2)
 
 static inline void dccp_hdr_set_seq(struct dccp_hdr *dh, const u64 gss)
 {
index 78b043c..da6ec18 100644 (file)
@@ -86,7 +86,8 @@ static int dccp_check_seqno(struct sock *sk, struct sk_buff *skb)
            dh->dccph_type == DCCP_PKT_SYNCACK) {
                if (between48(DCCP_SKB_CB(skb)->dccpd_ack_seq,
                              dp->dccps_awl, dp->dccps_awh) &&
-                   !before48(DCCP_SKB_CB(skb)->dccpd_seq, dp->dccps_swl))
+                   dccp_delta_seqno(dp->dccps_swl,
+                                    DCCP_SKB_CB(skb)->dccpd_seq) >= 0)
                        dccp_update_gsr(sk, DCCP_SKB_CB(skb)->dccpd_seq);
                else
                        return -1;
@@ -203,7 +204,8 @@ static int __dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
                if (dp->dccps_role != DCCP_ROLE_CLIENT)
                        goto send_sync;
 check_seq:
-               if (!before48(DCCP_SKB_CB(skb)->dccpd_seq, dp->dccps_osr)) {
+               if (dccp_delta_seqno(dp->dccps_osr,
+                                    DCCP_SKB_CB(skb)->dccpd_seq) >= 0) {
 send_sync:
                        dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq,
                                       DCCP_PKT_SYNC);
@@ -298,6 +300,14 @@ static int dccp_rcv_request_sent_state_process(struct sock *sk,
                if (dccp_parse_options(sk, skb))
                        goto out_invalid_packet;
 
+               /* Obtain RTT sample from SYN exchange (used by CCID 3) */
+               if (dp->dccps_options_received.dccpor_timestamp_echo) {
+                       struct timeval now;
+
+                       dccp_timestamp(sk, &now);
+                       dp->dccps_syn_rtt = dccp_sample_rtt(sk, &now, NULL);
+               }
+
                if (dccp_msk(sk)->dccpms_send_ack_vector &&
                    dccp_ackvec_add(dp->dccps_hc_rx_ackvec, sk,
                                    DCCP_SKB_CB(skb)->dccpd_seq,
@@ -575,3 +585,43 @@ discard:
 }
 
 EXPORT_SYMBOL_GPL(dccp_rcv_state_process);
+
+/**
+ * dccp_sample_rtt  -  Sample RTT from packet exchange
+ *
+ * @sk:     connected dccp_sock
+ * @t_recv: receive timestamp of packet with timestamp echo
+ * @t_hist: packet history timestamp or NULL
+ */
+u32 dccp_sample_rtt(struct sock *sk, struct timeval *t_recv,
+                                    struct timeval *t_hist)
+{
+       struct dccp_sock *dp = dccp_sk(sk);
+       struct dccp_options_received *or = &dp->dccps_options_received;
+       suseconds_t delta;
+
+       if (t_hist == NULL) {
+               if (!or->dccpor_timestamp_echo) {
+                       DCCP_WARN("packet without timestamp echo\n");
+                       return DCCP_SANE_RTT_MAX;
+               }
+               timeval_sub_usecs(t_recv, or->dccpor_timestamp_echo * 10);
+               delta = timeval_usecs(t_recv);
+       } else
+               delta = timeval_delta(t_recv, t_hist);
+
+       delta -= or->dccpor_elapsed_time * 10;          /* either set or 0 */
+
+       if (unlikely(delta <= 0)) {
+               DCCP_WARN("unusable RTT sample %ld, using min\n", (long)delta);
+               return DCCP_SANE_RTT_MIN;
+       }
+       if (unlikely(delta - (suseconds_t)DCCP_SANE_RTT_MAX > 0)) {
+               DCCP_WARN("RTT sample %ld too large, using max\n", (long)delta);
+               return DCCP_SANE_RTT_MAX;
+       }
+
+       return delta;
+}
+
+EXPORT_SYMBOL_GPL(dccp_sample_rtt);
index 4a83978..718f2fa 100644 (file)
@@ -207,8 +207,8 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
                                                        (iph->ihl << 2));
        struct dccp_sock *dp;
        struct inet_sock *inet;
-       const int type = skb->h.icmph->type;
-       const int code = skb->h.icmph->code;
+       const int type = icmp_hdr(skb)->type;
+       const int code = icmp_hdr(skb)->code;
        struct sock *sk;
        __u64 seq;
        int err;
@@ -363,8 +363,8 @@ EXPORT_SYMBOL_GPL(dccp_v4_send_check);
 
 static inline u64 dccp_v4_init_sequence(const struct sk_buff *skb)
 {
-       return secure_dccp_sequence_number(skb->nh.iph->daddr,
-                                          skb->nh.iph->saddr,
+       return secure_dccp_sequence_number(ip_hdr(skb)->daddr,
+                                          ip_hdr(skb)->saddr,
                                           dccp_hdr(skb)->dccph_dport,
                                           dccp_hdr(skb)->dccph_sport);
 }
@@ -405,7 +405,7 @@ struct sock *dccp_v4_request_recv_sock(struct sock *sk, struct sk_buff *skb,
        newinet->opt       = ireq->opt;
        ireq->opt          = NULL;
        newinet->mc_index  = inet_iif(skb);
-       newinet->mc_ttl    = skb->nh.iph->ttl;
+       newinet->mc_ttl    = ip_hdr(skb)->ttl;
        newinet->id        = jiffies;
 
        dccp_sync_mss(newsk, dst_mtu(dst));
@@ -428,7 +428,7 @@ EXPORT_SYMBOL_GPL(dccp_v4_request_recv_sock);
 static struct sock *dccp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
 {
        const struct dccp_hdr *dh = dccp_hdr(skb);
-       const struct iphdr *iph = skb->nh.iph;
+       const struct iphdr *iph = ip_hdr(skb);
        struct sock *nsk;
        struct request_sock **prev;
        /* Find possible connection requests. */
@@ -460,8 +460,8 @@ static struct dst_entry* dccp_v4_route_skb(struct sock *sk,
        struct rtable *rt;
        struct flowi fl = { .oif = ((struct rtable *)skb->dst)->rt_iif,
                            .nl_u = { .ip4_u =
-                                     { .daddr = skb->nh.iph->saddr,
-                                       .saddr = skb->nh.iph->daddr,
+                                     { .daddr = ip_hdr(skb)->saddr,
+                                       .saddr = ip_hdr(skb)->daddr,
                                        .tos = RT_CONN_FLAGS(sk) } },
                            .proto = sk->sk_protocol,
                            .uli_u = { .ports =
@@ -513,6 +513,7 @@ static void dccp_v4_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb)
 {
        int err;
        struct dccp_hdr *rxdh = dccp_hdr(rxskb), *dh;
+       const struct iphdr *rxiph;
        const int dccp_hdr_reset_len = sizeof(struct dccp_hdr) +
                                       sizeof(struct dccp_hdr_ext) +
                                       sizeof(struct dccp_hdr_reset);
@@ -559,13 +560,13 @@ static void dccp_v4_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb)
        dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), DCCP_SKB_CB(rxskb)->dccpd_seq);
 
        dccp_csum_outgoing(skb);
-       dh->dccph_checksum = dccp_v4_csum_finish(skb, rxskb->nh.iph->saddr,
-                                                     rxskb->nh.iph->daddr);
+       rxiph = ip_hdr(rxskb);
+       dh->dccph_checksum = dccp_v4_csum_finish(skb, rxiph->saddr,
+                                                rxiph->daddr);
 
        bh_lock_sock(dccp_v4_ctl_socket->sk);
        err = ip_build_and_send_pkt(skb, dccp_v4_ctl_socket->sk,
-                                   rxskb->nh.iph->daddr,
-                                   rxskb->nh.iph->saddr, NULL);
+                                   rxiph->daddr, rxiph->saddr, NULL);
        bh_unlock_sock(dccp_v4_ctl_socket->sk);
 
        if (net_xmit_eval(err) == 0) {
@@ -640,8 +641,8 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
                goto drop_and_free;
 
        ireq = inet_rsk(req);
-       ireq->loc_addr = skb->nh.iph->daddr;
-       ireq->rmt_addr = skb->nh.iph->saddr;
+       ireq->loc_addr = ip_hdr(skb)->daddr;
+       ireq->rmt_addr = ip_hdr(skb)->saddr;
        ireq->opt       = NULL;
 
        /*
@@ -809,6 +810,7 @@ EXPORT_SYMBOL_GPL(dccp_invalid_packet);
 static int dccp_v4_rcv(struct sk_buff *skb)
 {
        const struct dccp_hdr *dh;
+       const struct iphdr *iph;
        struct sock *sk;
        int min_cov;
 
@@ -817,8 +819,9 @@ static int dccp_v4_rcv(struct sk_buff *skb)
        if (dccp_invalid_packet(skb))
                goto discard_it;
 
+       iph = ip_hdr(skb);
        /* Step 1: If header checksum is incorrect, drop packet and return */
-       if (dccp_v4_csum_finish(skb, skb->nh.iph->saddr, skb->nh.iph->daddr)) {
+       if (dccp_v4_csum_finish(skb, iph->saddr, iph->daddr)) {
                DCCP_WARN("dropped packet with invalid checksum\n");
                goto discard_it;
        }
@@ -832,8 +835,8 @@ static int dccp_v4_rcv(struct sk_buff *skb)
                      "src=%u.%u.%u.%u@%-5d "
                      "dst=%u.%u.%u.%u@%-5d seq=%llu",
                      dccp_packet_name(dh->dccph_type),
-                     NIPQUAD(skb->nh.iph->saddr), ntohs(dh->dccph_sport),
-                     NIPQUAD(skb->nh.iph->daddr), ntohs(dh->dccph_dport),
+                     NIPQUAD(iph->saddr), ntohs(dh->dccph_sport),
+                     NIPQUAD(iph->daddr), ntohs(dh->dccph_dport),
                      (unsigned long long) DCCP_SKB_CB(skb)->dccpd_seq);
 
        if (dccp_packet_without_ack(skb)) {
@@ -848,10 +851,8 @@ static int dccp_v4_rcv(struct sk_buff *skb)
        /* Step 2:
         *      Look up flow ID in table and get corresponding socket */
        sk = __inet_lookup(&dccp_hashinfo,
-                          skb->nh.iph->saddr, dh->dccph_sport,
-                          skb->nh.iph->daddr, dh->dccph_dport,
-                          inet_iif(skb));
-
+                          iph->saddr, dh->dccph_sport,
+                          iph->daddr, dh->dccph_dport, inet_iif(skb));
        /*
         * Step 2:
         *      If no socket ...
index 7f51e8d..64eac25 100644 (file)
@@ -84,8 +84,8 @@ static inline __u32 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
 
 static inline __u32 dccp_v6_init_sequence(struct sk_buff *skb)
 {
-       return secure_dccpv6_sequence_number(skb->nh.ipv6h->daddr.s6_addr32,
-                                            skb->nh.ipv6h->saddr.s6_addr32,
+       return secure_dccpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
+                                            ipv6_hdr(skb)->saddr.s6_addr32,
                                             dccp_hdr(skb)->dccph_dport,
                                             dccp_hdr(skb)->dccph_sport     );
 
@@ -261,8 +261,8 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req,
 
                        if (rxopt->srcrt)
                                opt = ipv6_invert_rthdr(sk,
-                                       (struct ipv6_rt_hdr *)(pktopts->nh.raw +
-                                                              rxopt->srcrt));
+                         (struct ipv6_rt_hdr *)(skb_network_header(pktopts) +
+                                                rxopt->srcrt));
                }
 
                if (opt != NULL && opt->srcrt != NULL) {
@@ -313,6 +313,7 @@ static void dccp_v6_reqsk_destructor(struct request_sock *req)
 static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb)
 {
        struct dccp_hdr *rxdh = dccp_hdr(rxskb), *dh;
+       struct ipv6hdr *rxip6h;
        const u32 dccp_hdr_reset_len = sizeof(struct dccp_hdr) +
                                       sizeof(struct dccp_hdr_ext) +
                                       sizeof(struct dccp_hdr_reset);
@@ -352,12 +353,13 @@ static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb)
        dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), DCCP_SKB_CB(rxskb)->dccpd_seq);
 
        dccp_csum_outgoing(skb);
-       dh->dccph_checksum = dccp_v6_csum_finish(skb, &rxskb->nh.ipv6h->saddr,
-                                                     &rxskb->nh.ipv6h->daddr);
+       rxip6h = ipv6_hdr(rxskb);
+       dh->dccph_checksum = dccp_v6_csum_finish(skb, &rxip6h->saddr,
+                                                     &rxip6h->daddr);
 
        memset(&fl, 0, sizeof(fl));
-       ipv6_addr_copy(&fl.fl6_dst, &rxskb->nh.ipv6h->saddr);
-       ipv6_addr_copy(&fl.fl6_src, &rxskb->nh.ipv6h->daddr);
+       ipv6_addr_copy(&fl.fl6_dst, &rxip6h->saddr);
+       ipv6_addr_copy(&fl.fl6_src, &rxip6h->daddr);
 
        fl.proto = IPPROTO_DCCP;
        fl.oif = inet6_iif(rxskb);
@@ -390,7 +392,7 @@ static struct request_sock_ops dccp6_request_sock_ops = {
 static struct sock *dccp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
 {
        const struct dccp_hdr *dh = dccp_hdr(skb);
-       const struct ipv6hdr *iph = skb->nh.ipv6h;
+       const struct ipv6hdr *iph = ipv6_hdr(skb);
        struct sock *nsk;
        struct request_sock **prev;
        /* Find possible connection requests. */
@@ -460,8 +462,8 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
                goto drop_and_free;
 
        ireq6 = inet6_rsk(req);
-       ipv6_addr_copy(&ireq6->rmt_addr, &skb->nh.ipv6h->saddr);
-       ipv6_addr_copy(&ireq6->loc_addr, &skb->nh.ipv6h->daddr);
+       ipv6_addr_copy(&ireq6->rmt_addr, &ipv6_hdr(skb)->saddr);
+       ipv6_addr_copy(&ireq6->loc_addr, &ipv6_hdr(skb)->daddr);
        ireq6->pktopts  = NULL;
 
        if (ipv6_opt_accepted(sk, skb) ||
@@ -546,7 +548,7 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
                newnp->pktoptions  = NULL;
                newnp->opt         = NULL;
                newnp->mcast_oif   = inet6_iif(skb);
-               newnp->mcast_hops  = skb->nh.ipv6h->hop_limit;
+               newnp->mcast_hops  = ipv6_hdr(skb)->hop_limit;
 
                /*
                 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
@@ -573,8 +575,8 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
 
                if (rxopt->srcrt)
                        opt = ipv6_invert_rthdr(sk,
-                               (struct ipv6_rt_hdr *)(ireq6->pktopts->nh.raw +
-                                                      rxopt->srcrt));
+                  (struct ipv6_rt_hdr *)(skb_network_header(ireq6->pktopts) +
+                                         rxopt->srcrt));
        }
 
        if (dst == NULL) {
@@ -653,7 +655,7 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
        }
        newnp->opt        = NULL;
        newnp->mcast_oif  = inet6_iif(skb);
-       newnp->mcast_hops = skb->nh.ipv6h->hop_limit;
+       newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
 
        /*
         * Clone native IPv6 options from listening socket (if any)
@@ -826,8 +828,8 @@ static int dccp_v6_rcv(struct sk_buff **pskb)
                goto discard_it;
 
        /* Step 1: If header checksum is incorrect, drop packet and return. */
-       if (dccp_v6_csum_finish(skb, &skb->nh.ipv6h->saddr,
-                                    &skb->nh.ipv6h->daddr)) {
+       if (dccp_v6_csum_finish(skb, &ipv6_hdr(skb)->saddr,
+                                    &ipv6_hdr(skb)->daddr)) {
                DCCP_WARN("dropped packet with invalid checksum\n");
                goto discard_it;
        }
@@ -844,9 +846,9 @@ static int dccp_v6_rcv(struct sk_buff **pskb)
 
        /* Step 2:
         *      Look up flow ID in table and get corresponding socket */
-       sk = __inet6_lookup(&dccp_hashinfo, &skb->nh.ipv6h->saddr,
+       sk = __inet6_lookup(&dccp_hashinfo, &ipv6_hdr(skb)->saddr,
                            dh->dccph_sport,
-                           &skb->nh.ipv6h->daddr, ntohs(dh->dccph_dport),
+                           &ipv6_hdr(skb)->daddr, ntohs(dh->dccph_dport),
                            inet6_iif(skb));
        /*
         * Step 2:
index 6d235b3..e18e249 100644 (file)
@@ -27,7 +27,7 @@
 struct inet_timewait_death_row dccp_death_row = {
        .sysctl_max_tw_buckets = NR_FILE * 2,
        .period         = DCCP_TIMEWAIT_LEN / INET_TWDR_TWKILL_SLOTS,
-       .death_lock     = SPIN_LOCK_UNLOCKED,
+       .death_lock     = __SPIN_LOCK_UNLOCKED(dccp_death_row.death_lock),
        .hashinfo       = &dccp_hashinfo,
        .tw_timer       = TIMER_INITIALIZER(inet_twdr_hangman, 0,
                                            (unsigned long)&dccp_death_row),
index ca13f77..34d536d 100644 (file)
@@ -29,8 +29,6 @@ int sysctl_dccp_feat_ack_ratio              = DCCPF_INITIAL_ACK_RATIO;
 int sysctl_dccp_feat_send_ack_vector = DCCPF_INITIAL_SEND_ACK_VECTOR;
 int sysctl_dccp_feat_send_ndp_count  = DCCPF_INITIAL_SEND_NDP_COUNT;
 
-EXPORT_SYMBOL_GPL(sysctl_dccp_feat_sequence_window);
-
 void dccp_minisock_init(struct dccp_minisock *dmsk)
 {
        dmsk->dccpms_sequence_window = sysctl_dccp_feat_sequence_window;
@@ -174,21 +172,25 @@ int dccp_parse_options(struct sock *sk, struct sk_buff *skb)
                        opt_recv->dccpor_timestamp_echo = ntohl(*(__be32 *)value);
 
                        dccp_pr_debug("%s rx opt: TIMESTAMP_ECHO=%u, len=%d, "
-                                     "ackno=%llu, ",  dccp_role(sk),
+                                     "ackno=%llu", dccp_role(sk),
                                      opt_recv->dccpor_timestamp_echo,
                                      len + 2,
                                      (unsigned long long)
                                      DCCP_SKB_CB(skb)->dccpd_ack_seq);
 
 
-                       if (len == 4)
+                       if (len == 4) {
+                               dccp_pr_debug_cat("\n");
                                break;
+                       }
 
                        if (len == 6)
                                elapsed_time = ntohs(*(__be16 *)(value + 4));
                        else
                                elapsed_time = ntohl(*(__be32 *)(value + 4));
 
+                       dccp_pr_debug_cat(", ELAPSED_TIME=%d\n", elapsed_time);
+
                        /* Give precedence to the biggest ELAPSED_TIME */
                        if (elapsed_time > opt_recv->dccpor_elapsed_time)
                                opt_recv->dccpor_elapsed_time = elapsed_time;
@@ -565,6 +567,14 @@ int dccp_insert_options(struct sock *sk, struct sk_buff *skb)
            dccp_insert_options_feat(sk, skb))
                return -1;
 
+       /*
+        * Obtain RTT sample from Request/Response exchange.
+        * This is currently used in CCID 3 initialisation.
+        */
+       if (DCCP_SKB_CB(skb)->dccpd_type == DCCP_PKT_REQUEST &&
+           dccp_insert_option_timestamp(sk, skb))
+               return -1;
+
        /* XXX: insert other options when appropriate */
 
        if (DCCP_SKB_CB(skb)->dccpd_opt_len != 0) {
index aa21cc4..c8d843e 100644 (file)
@@ -194,6 +194,7 @@ static int dccp_wait_for_ccid(struct sock *sk, struct sk_buff *skb)
                rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb);
                if (rc <= 0)
                        break;
+               dccp_pr_debug("delayed send by %d msec\n", rc);
                delay = msecs_to_jiffies(rc);
                sk->sk_write_pending++;
                release_sock(sk);
@@ -255,7 +256,7 @@ void dccp_write_xmit(struct sock *sk, int block)
                                DCCP_BUG("err=%d after ccid_hc_tx_packet_sent",
                                         err);
                } else {
-                       dccp_pr_debug("packet discarded\n");
+                       dccp_pr_debug("packet discarded due to err=%d\n", err);
                        kfree_skb(skb);
                }
        }
index 3b1f509..1f5e3ba 100644 (file)
@@ -90,15 +90,18 @@ static int jdccp_sendmsg(struct kiocb *iocb, struct sock *sk,
        if (port == 0 || ntohs(inet->dport) == port ||
            ntohs(inet->sport) == port) {
                if (hctx)
-                       printl("%d.%d.%d.%d:%u %d.%d.%d.%d:%u %d %d %d %d %d\n",
-                          NIPQUAD(inet->saddr), ntohs(inet->sport),
-                          NIPQUAD(inet->daddr), ntohs(inet->dport), size,
-                          hctx->ccid3hctx_s, hctx->ccid3hctx_rtt,
-                          hctx->ccid3hctx_p, hctx->ccid3hctx_t_ipi);
+                       printl("%d.%d.%d.%d:%u %d.%d.%d.%d:%u %d %d %d %d %u "
+                              "%llu %llu %d\n",
+                              NIPQUAD(inet->saddr), ntohs(inet->sport),
+                              NIPQUAD(inet->daddr), ntohs(inet->dport), size,
+                              hctx->ccid3hctx_s, hctx->ccid3hctx_rtt,
+                              hctx->ccid3hctx_p, hctx->ccid3hctx_x_calc,
+                              hctx->ccid3hctx_x_recv >> 6,
+                              hctx->ccid3hctx_x >> 6, hctx->ccid3hctx_t_ipi);
                else
                        printl("%d.%d.%d.%d:%u %d.%d.%d.%d:%u %d\n",
-                          NIPQUAD(inet->saddr), ntohs(inet->sport),
-                          NIPQUAD(inet->daddr), ntohs(inet->dport), size);
+                              NIPQUAD(inet->saddr), ntohs(inet->sport),
+                              NIPQUAD(inet->daddr), ntohs(inet->dport), size);
        }
 
        jprobe_return();
index c6568d6..a205eaa 100644 (file)
@@ -2413,6 +2413,7 @@ module_init(decnet_init);
 static void __exit decnet_exit(void)
 {
        sock_unregister(AF_DECnet);
+       rtnl_unregister_all(PF_DECnet);
        dev_remove_pack(&dn_dix_packet_type);
 
        dn_unregister_sysctl();
index 060d725..5c2a995 100644 (file)
@@ -799,7 +799,6 @@ static int dn_nl_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
        skip_ndevs = cb->args[0];
        skip_naddr = cb->args[1];
 
-       read_lock(&dev_base_lock);
        for (dev = dev_base, idx = 0; dev; dev = dev->next, idx++) {
                if (idx < skip_ndevs)
                        continue;
@@ -824,8 +823,6 @@ static int dn_nl_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
                }
        }
 done:
-       read_unlock(&dev_base_lock);
-
        cb->args[0] = idx;
        cb->args[1] = dn_idx;
 
@@ -913,7 +910,7 @@ static void dn_send_endnode_hello(struct net_device *dev, struct dn_ifaddr *ifa)
        pktlen = (__le16 *)skb_push(skb,2);
        *pktlen = dn_htons(skb->len - 2);
 
-       skb->nh.raw = skb->data;
+       skb_reset_network_header(skb);
 
        dn_rt_finish_output(skb, dn_rt_all_rt_mcast, msg->id);
 }
@@ -1005,7 +1002,7 @@ static void dn_send_router_hello(struct net_device *dev, struct dn_ifaddr *ifa)
        pktlen = (__le16 *)skb_push(skb, 2);
        *pktlen = dn_htons(skb->len - 2);
 
-       skb->nh.raw = skb->data;
+       skb_reset_network_header(skb);
 
        if (dn_am_i_a_router(dn, dn_db, ifa)) {
                struct sk_buff *skb2 = skb_copy(skb, GFP_ATOMIC);
@@ -1447,24 +1444,6 @@ static const struct file_operations dn_dev_seq_fops = {
 
 #endif /* CONFIG_PROC_FS */
 
-static struct rtnetlink_link dnet_rtnetlink_table[RTM_NR_MSGTYPES] =
-{
-       [RTM_NEWADDR  - RTM_BASE] = { .doit     = dn_nl_newaddr,        },
-       [RTM_DELADDR  - RTM_BASE] = { .doit     = dn_nl_deladdr,        },
-       [RTM_GETADDR  - RTM_BASE] = { .dumpit   = dn_nl_dump_ifaddr,    },
-#ifdef CONFIG_DECNET_ROUTER
-       [RTM_NEWROUTE - RTM_BASE] = { .doit     = dn_fib_rtm_newroute,  },
-       [RTM_DELROUTE - RTM_BASE] = { .doit     = dn_fib_rtm_delroute,  },
-       [RTM_GETROUTE - RTM_BASE] = { .doit     = dn_cache_getroute,
-                                     .dumpit   = dn_fib_dump,          },
-       [RTM_GETRULE  - RTM_BASE] = { .dumpit   = dn_fib_dump_rules,    },
-#else
-       [RTM_GETROUTE - RTM_BASE] = { .doit     = dn_cache_getroute,
-                                     .dumpit   = dn_cache_dump,        },
-#endif
-
-};
-
 static int __initdata addr[2];
 module_param_array(addr, int, NULL, 0444);
 MODULE_PARM_DESC(addr, "The DECnet address of this machine: area,node");
@@ -1485,7 +1464,9 @@ void __init dn_dev_init(void)
 
        dn_dev_devices_on();
 
-       rtnetlink_links[PF_DECnet] = dnet_rtnetlink_table;
+       rtnl_register(PF_DECnet, RTM_NEWADDR, dn_nl_newaddr, NULL);
+       rtnl_register(PF_DECnet, RTM_DELADDR, dn_nl_deladdr, NULL);
+       rtnl_register(PF_DECnet, RTM_GETADDR, NULL, dn_nl_dump_ifaddr);
 
        proc_net_fops_create("decnet_dev", S_IRUGO, &dn_dev_seq_fops);
 
@@ -1500,8 +1481,6 @@ void __init dn_dev_init(void)
 
 void __exit dn_dev_cleanup(void)
 {
-       rtnetlink_links[PF_DECnet] = NULL;
-
 #ifdef CONFIG_SYSCTL
        {
                int i;
index 82d58a9..310a862 100644 (file)
@@ -504,7 +504,7 @@ static int dn_fib_check_attr(struct rtmsg *r, struct rtattr **rta)
        return 0;
 }
 
-int dn_fib_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
+static int dn_fib_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
 {
        struct dn_fib_table *tb;
        struct rtattr **rta = arg;
@@ -520,7 +520,7 @@ int dn_fib_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
        return -ESRCH;
 }
 
-int dn_fib_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
+static int dn_fib_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
 {
        struct dn_fib_table *tb;
        struct rtattr **rta = arg;
@@ -748,11 +748,13 @@ void __exit dn_fib_cleanup(void)
 
 void __init dn_fib_init(void)
 {
-
        dn_fib_table_init();
        dn_fib_rules_init();
 
        register_dnaddr_notifier(&dn_fib_dnaddr_notifier);
+
+       rtnl_register(PF_DECnet, RTM_NEWROUTE, dn_fib_rtm_newroute, NULL);
+       rtnl_register(PF_DECnet, RTM_DELROUTE, dn_fib_rtm_delroute, NULL);
 }
 
 
index bf701cf..4bf066c 100644 (file)
@@ -261,7 +261,7 @@ static int dn_long_output(struct sk_buff *skb)
        lp->s_class  = 0;
        lp->pt       = 0;
 
-       skb->nh.raw = skb->data;
+       skb_reset_network_header(skb);
 
        return NF_HOOK(PF_DECnet, NF_DN_POST_ROUTING, skb, NULL, neigh->dev, dn_neigh_output_packet);
 }
@@ -300,7 +300,7 @@ static int dn_short_output(struct sk_buff *skb)
        sp->srcnode    = cb->src;
        sp->forward    = cb->hops & 0x3f;
 
-       skb->nh.raw = skb->data;
+       skb_reset_network_header(skb);
 
        return NF_HOOK(PF_DECnet, NF_DN_POST_ROUTING, skb, NULL, neigh->dev, dn_neigh_output_packet);
 }
@@ -342,7 +342,7 @@ static int dn_phase3_output(struct sk_buff *skb)
        sp->srcnode  = cb->src & dn_htons(0x03ff);
        sp->forward  = cb->hops & 0x3f;
 
-       skb->nh.raw = skb->data;
+       skb_reset_network_header(skb);
 
        return NF_HOOK(PF_DECnet, NF_DN_POST_ROUTING, skb, NULL, neigh->dev, dn_neigh_output_packet);
 }
index 9d20904..4074a6e 100644 (file)
@@ -362,7 +362,8 @@ static void dn_nsp_conn_conf(struct sock *sk, struct sk_buff *skb)
                        u16 dlen = *skb->data;
                        if ((dlen <= 16) && (dlen <= skb->len)) {
                                scp->conndata_in.opt_optl = dn_htons(dlen);
-                               memcpy(scp->conndata_in.opt_data, skb->data + 1, dlen);
+                               skb_copy_from_linear_data_offset(skb, 1,
+                                             scp->conndata_in.opt_data, dlen);
                        }
                }
                dn_nsp_send_link(sk, DN_NOCHANGE, 0);
@@ -406,7 +407,7 @@ static void dn_nsp_disc_init(struct sock *sk, struct sk_buff *skb)
                u16 dlen = *skb->data;
                if ((dlen <= 16) && (dlen <= skb->len)) {
                        scp->discdata_in.opt_optl = dn_htons(dlen);
-                       memcpy(scp->discdata_in.opt_data, skb->data + 1, dlen);
+                       skb_copy_from_linear_data_offset(skb, 1, scp->discdata_in.opt_data, dlen);
                }
        }
 
@@ -725,7 +726,7 @@ static int dn_nsp_rx_packet(struct sk_buff *skb)
        if (!pskb_may_pull(skb, 2))
                goto free_out;
 
-       skb->h.raw    = skb->data;
+       skb_reset_transport_header(skb);
        cb->nsp_flags = *ptr++;
 
        if (decnet_debug_level & 2)
index 2d2cda8..7404653 100644 (file)
@@ -79,7 +79,7 @@ static void dn_nsp_send(struct sk_buff *skb)
        struct dst_entry *dst;
        struct flowi fl;
 
-       skb->h.raw = skb->data;
+       skb_reset_transport_header(skb);
        scp->stamp = jiffies;
 
        dst = sk_dst_check(sk, 0);
@@ -681,8 +681,10 @@ void dn_nsp_send_conninit(struct sock *sk, unsigned char msgflg)
        if (scp->peer.sdn_objnum)
                type = 0;
 
-       skb_put(skb, dn_sockaddr2username(&scp->peer, skb->tail, type));
-       skb_put(skb, dn_sockaddr2username(&scp->addr, skb->tail, 2));
+       skb_put(skb, dn_sockaddr2username(&scp->peer,
+                                         skb_tail_pointer(skb), type));
+       skb_put(skb, dn_sockaddr2username(&scp->addr,
+                                         skb_tail_pointer(skb), 2));
 
        menuver = DN_MENUVER_ACC | DN_MENUVER_USR;
        if (scp->peer.sdn_flags & SDF_PROXY)
index c1b5502..5d7337b 100644 (file)
@@ -77,6 +77,7 @@
 #include <linux/rcupdate.h>
 #include <linux/times.h>
 #include <asm/errno.h>
+#include <net/netlink.h>
 #include <net/neighbour.h>
 #include <net/dst.h>
 #include <net/flow.h>
@@ -386,7 +387,7 @@ static int dn_return_short(struct sk_buff *skb)
        __le16 tmp;
 
        /* Add back headers */
-       skb_push(skb, skb->data - skb->nh.raw);
+       skb_push(skb, skb->data - skb_network_header(skb));
 
        if ((skb = skb_unshare(skb, GFP_ATOMIC)) == NULL)
                return NET_RX_DROP;
@@ -425,7 +426,7 @@ static int dn_return_long(struct sk_buff *skb)
        unsigned char tmp[ETH_ALEN];
 
        /* Add back all headers */
-       skb_push(skb, skb->data - skb->nh.raw);
+       skb_push(skb, skb->data - skb_network_header(skb));
 
        if ((skb = skb_unshare(skb, GFP_ATOMIC)) == NULL)
                return NET_RX_DROP;
@@ -504,7 +505,7 @@ static int dn_route_rx_long(struct sk_buff *skb)
                goto drop_it;
 
        skb_pull(skb, 20);
-       skb->h.raw = skb->data;
+       skb_reset_transport_header(skb);
 
        /* Destination info */
        ptr += 2;
@@ -542,7 +543,7 @@ static int dn_route_rx_short(struct sk_buff *skb)
                goto drop_it;
 
        skb_pull(skb, 5);
-       skb->h.raw = skb->data;
+       skb_reset_transport_header(skb);
 
        cb->dst = *(__le16 *)ptr;
        ptr += 2;
@@ -615,7 +616,7 @@ int dn_route_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type
                flags = *skb->data;
        }
 
-       skb->nh.raw = skb->data;
+       skb_reset_network_header(skb);
 
        /*
         * Weed out future version DECnet
@@ -1468,7 +1469,7 @@ static int dn_rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
        struct dn_route *rt = (struct dn_route *)skb->dst;
        struct rtmsg *r;
        struct nlmsghdr *nlh;
-       unsigned char *b = skb->tail;
+       unsigned char *b = skb_tail_pointer(skb);
        long expires;
 
        nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*r), flags);
@@ -1509,19 +1510,19 @@ static int dn_rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
        if (rt->fl.iif)
                RTA_PUT(skb, RTA_IIF, sizeof(int), &rt->fl.iif);
 
-       nlh->nlmsg_len = skb->tail - b;
+       nlh->nlmsg_len = skb_tail_pointer(skb) - b;
        return skb->len;
 
 nlmsg_failure:
 rtattr_failure:
-       skb_trim(skb, b - skb->data);
+       nlmsg_trim(skb, b);
        return -1;
 }
 
 /*
  * This is called by both endnodes and routers now.
  */
-int dn_cache_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void *arg)
+static int dn_cache_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void *arg)
 {
        struct rtattr **rta = arg;
        struct rtmsg *rtm = NLMSG_DATA(nlh);
@@ -1537,7 +1538,7 @@ int dn_cache_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void *arg)
        skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
        if (skb == NULL)
                return -ENOBUFS;
-       skb->mac.raw = skb->data;
+       skb_reset_mac_header(skb);
        cb = DN_SKB_CB(skb);
 
        if (rta[RTA_SRC-1])
@@ -1812,6 +1813,13 @@ void __init dn_route_init(void)
        dn_dst_ops.gc_thresh = (dn_rt_hash_mask + 1);
 
        proc_net_fops_create("decnet_cache", S_IRUGO, &dn_rt_cache_seq_fops);
+
+#ifdef CONFIG_DECNET_ROUTER
+       rtnl_register(PF_DECnet, RTM_GETROUTE, dn_cache_getroute, dn_fib_dump);
+#else
+       rtnl_register(PF_DECnet, RTM_GETROUTE, dn_cache_getroute,
+                     dn_cache_dump);
+#endif
 }
 
 void __exit dn_route_cleanup(void)
index 5e86dd5..17a1932 100644 (file)
@@ -31,6 +31,7 @@
 #include <net/dn_fib.h>
 #include <net/dn_neigh.h>
 #include <net/dn_dev.h>
+#include <net/dn_route.h>
 
 static struct fib_rules_ops dn_fib_rules_ops;
 
@@ -239,9 +240,9 @@ static u32 dn_fib_rule_default_pref(void)
        return 0;
 }
 
-int dn_fib_dump_rules(struct sk_buff *skb, struct netlink_callback *cb)
+static void dn_fib_rule_flush_cache(void)
 {
-       return fib_rules_dump(skb, cb, AF_DECnet);
+       dn_rt_cache_flush(-1);
 }
 
 static struct fib_rules_ops dn_fib_rules_ops = {
@@ -254,6 +255,7 @@ static struct fib_rules_ops dn_fib_rules_ops = {
        .compare        = dn_fib_rule_compare,
        .fill           = dn_fib_rule_fill,
        .default_pref   = dn_fib_rule_default_pref,
+       .flush_cache    = dn_fib_rule_flush_cache,
        .nlgroup        = RTNLGRP_DECnet_RULE,
        .policy         = dn_fib_rule_policy,
        .rules_list     = &dn_fib_rules,
index 780a141..d6615c9 100644 (file)
@@ -28,6 +28,7 @@
 #include <asm/uaccess.h>
 #include <linux/route.h> /* RTF_xxx */
 #include <net/neighbour.h>
+#include <net/netlink.h>
 #include <net/dst.h>
 #include <net/flow.h>
 #include <net/fib_rules.h>
@@ -295,7 +296,7 @@ static int dn_fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
 {
        struct rtmsg *rtm;
        struct nlmsghdr *nlh;
-       unsigned char *b = skb->tail;
+       unsigned char *b = skb_tail_pointer(skb);
 
        nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*rtm), flags);
        rtm = NLMSG_DATA(nlh);
@@ -337,19 +338,19 @@ static int dn_fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
                        nhp->rtnh_ifindex = nh->nh_oif;
                        if (nh->nh_gw)
                                RTA_PUT(skb, RTA_GATEWAY, 2, &nh->nh_gw);
-                       nhp->rtnh_len = skb->tail - (unsigned char *)nhp;
+                       nhp->rtnh_len = skb_tail_pointer(skb) - (unsigned char *)nhp;
                } endfor_nexthops(fi);
                mp_head->rta_type = RTA_MULTIPATH;
-               mp_head->rta_len = skb->tail - (u8*)mp_head;
+               mp_head->rta_len = skb_tail_pointer(skb) - (u8 *)mp_head;
        }
 
-       nlh->nlmsg_len = skb->tail - b;
+       nlh->nlmsg_len = skb_tail_pointer(skb) - b;
        return skb->len;
 
 
 nlmsg_failure:
 rtattr_failure:
-       skb_trim(skb, b - skb->data);
+       nlmsg_trim(skb, b);
        return -EMSGSIZE;
 }
 
index 0e62def..6962346 100644 (file)
@@ -33,7 +33,7 @@ static struct sk_buff *dnrmg_build_message(struct sk_buff *rt_skb, int *errp)
 {
        struct sk_buff *skb = NULL;
        size_t size;
-       unsigned char *old_tail;
+       sk_buff_data_t old_tail;
        struct nlmsghdr *nlh;
        unsigned char *ptr;
        struct nf_dn_rtmsg *rtm;
@@ -48,7 +48,7 @@ static struct sk_buff *dnrmg_build_message(struct sk_buff *rt_skb, int *errp)
        rtm = (struct nf_dn_rtmsg *)NLMSG_DATA(nlh);
        rtm->nfdn_ifindex = rt_skb->dev->ifindex;
        ptr = NFDN_RTMSG(rtm);
-       memcpy(ptr, rt_skb->data, rt_skb->len);
+       skb_copy_from_linear_data(rt_skb, ptr, rt_skb->len);
        nlh->nlmsg_len = skb->tail - old_tail;
        return skb;
 
@@ -102,7 +102,7 @@ static unsigned int dnrmg_hook(unsigned int hook,
 
 static inline void dnrmg_receive_user_skb(struct sk_buff *skb)
 {
-       struct nlmsghdr *nlh = (struct nlmsghdr *)skb->data;
+       struct nlmsghdr *nlh = nlmsg_hdr(skb);
 
        if (nlh->nlmsg_len < sizeof(*nlh) || skb->len < nlh->nlmsg_len)
                return;
@@ -138,7 +138,7 @@ static int __init dn_rtmsg_init(void)
        int rv = 0;
 
        dnrmg = netlink_kernel_create(NETLINK_DNRTMSG, DNRNG_NLGRP_MAX,
-                                     dnrmg_receive_user_sk, THIS_MODULE);
+                                     dnrmg_receive_user_sk, NULL, THIS_MODULE);
        if (dnrmg == NULL) {
                printk(KERN_ERR "dn_rtmsg: Cannot create netlink socket");
                return -ENOMEM;
index bc12e36..b5524f3 100644 (file)
@@ -162,7 +162,7 @@ static int econet_recvmsg(struct kiocb *iocb, struct socket *sock,
        err = memcpy_toiovec(msg->msg_iov, skb->data, copied);
        if (err)
                goto out_free;
-       skb_get_timestamp(skb, &sk->sk_stamp);
+       sk->sk_stamp = skb->tstamp;
 
        if (msg->msg_name)
                memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
@@ -345,7 +345,7 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
                        goto out_unlock;
 
                skb_reserve(skb, LL_RESERVED_SPACE(dev));
-               skb->nh.raw = skb->data;
+               skb_reset_network_header(skb);
 
                eb = (struct ec_cb *)&skb->cb;
 
@@ -366,7 +366,7 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
                        fh->cb = cb;
                        fh->port = port;
                        if (sock->type != SOCK_DGRAM) {
-                               skb->tail = skb->data;
+                               skb_reset_tail_pointer(skb);
                                skb->len = 0;
                        } else if (res < 0)
                                goto out_free;
@@ -727,6 +727,9 @@ static int econet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg
                case SIOCGSTAMP:
                        return sock_get_timestamp(sk, argp);
 
+               case SIOCGSTAMPNS:
+                       return sock_get_timestampns(sk, argp);
+
                case SIOCSIFADDR:
                case SIOCGIFADDR:
                        return ec_dev_ioctl(sock, cmd, argp);
@@ -845,7 +848,7 @@ static void aun_send_response(__u32 addr, unsigned long seq, int code, int cb)
 
 static void aun_incoming(struct sk_buff *skb, struct aunhdr *ah, size_t len)
 {
-       struct iphdr *ip = skb->nh.iph;
+       struct iphdr *ip = ip_hdr(skb);
        unsigned char stn = ntohl(ip->saddr) & 0xff;
        struct sock *sk;
        struct sk_buff *newskb;
@@ -940,10 +943,10 @@ static void aun_data_available(struct sock *sk, int slen)
                printk(KERN_DEBUG "AUN: recvfrom() error %d\n", -err);
        }
 
-       data = skb->h.raw + sizeof(struct udphdr);
+       data = skb_transport_header(skb) + sizeof(struct udphdr);
        ah = (struct aunhdr *)data;
        len = skb->len - sizeof(struct udphdr);
-       ip = skb->nh.iph;
+       ip = ip_hdr(skb);
 
        switch (ah->code)
        {
index 7391f55..0ac2524 100644 (file)
@@ -156,7 +156,8 @@ __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev)
        struct ethhdr *eth;
        unsigned char *rawp;
 
-       skb->mac.raw = skb->data;
+       skb->dev = dev;
+       skb_reset_mac_header(skb);
        skb_pull(skb, ETH_HLEN);
        eth = eth_hdr(skb);
 
@@ -228,7 +229,7 @@ int eth_header_cache(struct neighbour *neigh, struct hh_cache *hh)
        eth = (struct ethhdr *)
            (((u8 *) hh->hh_data) + (HH_DATA_OFF(sizeof(*eth))));
 
-       if (type == __constant_htons(ETH_P_802_3))
+       if (type == htons(ETH_P_802_3))
                return -1;
 
        eth->h_proto = type;
index 6ef766e..1438ade 100644 (file)
@@ -56,7 +56,8 @@ config IEEE80211_CRYPT_CCMP
 
 config IEEE80211_CRYPT_TKIP
        tristate "IEEE 802.11i TKIP encryption"
-       depends on IEEE80211 && NET_RADIO
+       depends on IEEE80211
+       select WIRELESS_EXT
        select CRYPTO
        select CRYPTO_MICHAEL_MIC
        select CRYPTO_ECB
index ec6d885..4eb3507 100644 (file)
@@ -152,7 +152,7 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
                return -1;
 
        /* Copy the IV into the first 3 bytes of the key */
-       memcpy(key, skb->data + hdr_len, 3);
+       skb_copy_from_linear_data_offset(skb, hdr_len, key, 3);
 
        /* Copy rest of the WEP key (the secret part) */
        memcpy(key + 3, wep->key, wep->key_len);
index 4084909..6ae036b 100644 (file)
@@ -42,7 +42,7 @@ static void ieee80211_monitor_rx(struct ieee80211_device *ieee,
        u16 fc = le16_to_cpu(hdr->frame_ctl);
 
        skb->dev = ieee->dev;
-       skb->mac.raw = skb->data;
+       skb_reset_mac_header(skb);
        skb_pull(skb, ieee80211_get_hdrlen(fc));
        skb->pkt_type = PACKET_OTHERHOST;
        skb->protocol = __constant_htons(ETH_P_80211_RAW);
@@ -606,12 +606,12 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
                if (frag == 0) {
                        /* copy first fragment (including full headers) into
                         * beginning of the fragment cache skb */
-                       memcpy(skb_put(frag_skb, flen), skb->data, flen);
+                       skb_copy_from_linear_data(skb, skb_put(frag_skb, flen), flen);
                } else {
                        /* append frame payload to the end of the fragment
                         * cache skb */
-                       memcpy(skb_put(frag_skb, flen), skb->data + hdrlen,
-                              flen);
+                       skb_copy_from_linear_data_offset(skb, hdrlen,
+                                     skb_put(frag_skb, flen), flen);
                }
                dev_kfree_skb_any(skb);
                skb = NULL;
@@ -759,8 +759,9 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
                    IEEE80211_FCTL_TODS) && skb->len >= ETH_HLEN + ETH_ALEN) {
                /* Non-standard frame: get addr4 from its bogus location after
                 * the payload */
-               memcpy(skb->data + ETH_ALEN,
-                      skb->data + skb->len - ETH_ALEN, ETH_ALEN);
+               skb_copy_to_linear_data_offset(skb, ETH_ALEN,
+                                              skb->data + skb->len - ETH_ALEN,
+                                              ETH_ALEN);
                skb_trim(skb, skb->len - ETH_ALEN);
        }
 #endif
@@ -789,10 +790,11 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
 
        if (skb2 != NULL) {
                /* send to wireless media */
-               skb2->protocol = __constant_htons(ETH_P_802_3);
-               skb2->mac.raw = skb2->nh.raw = skb2->data;
-               /* skb2->nh.raw = skb2->data + ETH_HLEN; */
                skb2->dev = dev;
+               skb2->protocol = __constant_htons(ETH_P_802_3);
+               skb_reset_mac_header(skb2);
+               skb_reset_network_header(skb2);
+               /* skb2->network_header += ETH_HLEN; */
                dev_queue_xmit(skb2);
        }
 #endif
@@ -800,7 +802,6 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
        if (skb) {
                skb->protocol = eth_type_trans(skb, dev);
                memset(skb->cb, 0, sizeof(skb->cb));
-               skb->dev = dev;
                skb->ip_summed = CHECKSUM_NONE; /* 802.11 crc not sufficient */
                if (netif_rx(skb) == NET_RX_DROP) {
                        /* netif_rx always succeeds, but it might drop
index 0292d63..a4c3c51 100644 (file)
@@ -225,10 +225,10 @@ static int ieee80211_classify(struct sk_buff *skb)
        struct iphdr *ip;
 
        eth = (struct ethhdr *)skb->data;
-       if (eth->h_proto != __constant_htons(ETH_P_IP))
+       if (eth->h_proto != htons(ETH_P_IP))
                return 0;
 
-       ip = skb->nh.iph;
+       ip = ip_hdr(skb);
        switch (ip->tos & 0xfc) {
        case 0x20:
                return 2;
@@ -309,8 +309,8 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
        }
 
        /* Save source and destination addresses */
-       memcpy(dest, skb->data, ETH_ALEN);
-       memcpy(src, skb->data + ETH_ALEN, ETH_ALEN);
+       skb_copy_from_linear_data(skb, dest, ETH_ALEN);
+       skb_copy_from_linear_data_offset(skb, ETH_ALEN, src, ETH_ALEN);
 
        if (host_encrypt || host_build_iv)
                fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA |
@@ -363,7 +363,7 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
                snapped = 1;
                ieee80211_copy_snap(skb_put(skb_new, SNAP_SIZE + sizeof(u16)),
                                    ether_type);
-               memcpy(skb_put(skb_new, skb->len), skb->data, skb->len);
+               skb_copy_from_linear_data(skb, skb_put(skb_new, skb->len), skb->len);
                res = crypt->ops->encrypt_msdu(skb_new, hdr_len, crypt->priv);
                if (res < 0) {
                        IEEE80211_ERROR("msdu encryption failed\n");
@@ -492,7 +492,7 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
                        bytes -= SNAP_SIZE + sizeof(u16);
                }
 
-               memcpy(skb_put(skb_frag, bytes), skb->data, bytes);
+               skb_copy_from_linear_data(skb, skb_put(skb_frag, bytes), bytes);
 
                /* Advance the SKB... */
                skb_pull(skb, bytes);
index 9e8ef50..e62aee0 100644 (file)
@@ -574,6 +574,33 @@ config TCP_CONG_VENO
        loss packets.
        See http://www.ntu.edu.sg/home5/ZHOU0022/papers/CPFu03a.pdf
 
+config TCP_CONG_YEAH
+       tristate "YeAH TCP"
+       depends on EXPERIMENTAL
+       default n
+       ---help---
+       YeAH-TCP is a sender-side high-speed enabled TCP congestion control
+       algorithm, which uses a mixed loss/delay approach to compute the
+       congestion window. It's design goals target high efficiency,
+       internal, RTT and Reno fairness, resilience to link loss while
+       keeping network elements load as low as possible.
+
+       For further details look here:
+         http://wil.cs.caltech.edu/pfldnet2007/paper/YeAH_TCP.pdf
+
+config TCP_CONG_ILLINOIS
+       tristate "TCP Illinois"
+       depends on EXPERIMENTAL
+       default n
+       ---help---
+       TCP-Illinois is a sender-side modificatio of TCP Reno for
+       high speed long delay links. It uses round-trip-time to
+       adjust the alpha and beta parameters to achieve a higher average
+       throughput and maintain fairness.
+
+       For further details see:
+         http://www.ews.uiuc.edu/~shaoliu/tcpillinois/index.html
+
 choice
        prompt "Default TCP congestion control"
        default DEFAULT_CUBIC
index 7a06862..4ff6c15 100644 (file)
@@ -49,6 +49,8 @@ obj-$(CONFIG_TCP_CONG_VEGAS) += tcp_vegas.o
 obj-$(CONFIG_TCP_CONG_VENO) += tcp_veno.o
 obj-$(CONFIG_TCP_CONG_SCALABLE) += tcp_scalable.o
 obj-$(CONFIG_TCP_CONG_LP) += tcp_lp.o
+obj-$(CONFIG_TCP_CONG_YEAH) += tcp_yeah.o
+obj-$(CONFIG_TCP_CONG_ILLINOIS) += tcp_illinois.o
 obj-$(CONFIG_NETLABEL) += cipso_ipv4.o
 
 obj-$(CONFIG_XFRM) += xfrm4_policy.o xfrm4_state.o xfrm4_input.o \
index cf358c8..16aae8e 100644 (file)
@@ -87,6 +87,7 @@
 #include <linux/init.h>
 #include <linux/poll.h>
 #include <linux/netfilter_ipv4.h>
+#include <linux/random.h>
 
 #include <asm/uaccess.h>
 #include <asm/system.h>
@@ -217,6 +218,26 @@ out:
        return err;
 }
 
+u32 inet_ehash_secret __read_mostly;
+EXPORT_SYMBOL(inet_ehash_secret);
+
+/*
+ * inet_ehash_secret must be set exactly once
+ * Instead of using a dedicated spinlock, we (ab)use inetsw_lock
+ */
+void build_ehash_secret(void)
+{
+       u32 rnd;
+       do {
+               get_random_bytes(&rnd, sizeof(rnd));
+       } while (rnd == 0);
+       spin_lock_bh(&inetsw_lock);
+       if (!inet_ehash_secret)
+               inet_ehash_secret = rnd;
+       spin_unlock_bh(&inetsw_lock);
+}
+EXPORT_SYMBOL(build_ehash_secret);
+
 /*
  *     Create an inet socket.
  */
@@ -233,6 +254,11 @@ static int inet_create(struct socket *sock, int protocol)
        int try_loading_module = 0;
        int err;
 
+       if (sock->type != SOCK_RAW &&
+           sock->type != SOCK_DGRAM &&
+           !inet_ehash_secret)
+               build_ehash_secret();
+
        sock->state = SS_UNCONNECTED;
 
        /* Look for the requested type/protocol pair. */
@@ -755,6 +781,9 @@ int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
                case SIOCGSTAMP:
                        err = sock_get_timestamp(sk, (struct timeval __user *)arg);
                        break;
+               case SIOCGSTAMPNS:
+                       err = sock_get_timestampns(sk, (struct timespec __user *)arg);
+                       break;
                case SIOCADDRT:
                case SIOCDELRT:
                case SIOCRTMSG:
@@ -1109,7 +1138,7 @@ static int inet_gso_send_check(struct sk_buff *skb)
        if (unlikely(!pskb_may_pull(skb, sizeof(*iph))))
                goto out;
 
-       iph = skb->nh.iph;
+       iph = ip_hdr(skb);
        ihl = iph->ihl * 4;
        if (ihl < sizeof(*iph))
                goto out;
@@ -1117,8 +1146,9 @@ static int inet_gso_send_check(struct sk_buff *skb)
        if (unlikely(!pskb_may_pull(skb, ihl)))
                goto out;
 
-       skb->h.raw = __skb_pull(skb, ihl);
-       iph = skb->nh.iph;
+       __skb_pull(skb, ihl);
+       skb_reset_transport_header(skb);
+       iph = ip_hdr(skb);
        proto = iph->protocol & (MAX_INET_PROTOS - 1);
        err = -EPROTONOSUPPORT;
 
@@ -1152,7 +1182,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb, int features)
        if (unlikely(!pskb_may_pull(skb, sizeof(*iph))))
                goto out;
 
-       iph = skb->nh.iph;
+       iph = ip_hdr(skb);
        ihl = iph->ihl * 4;
        if (ihl < sizeof(*iph))
                goto out;
@@ -1160,8 +1190,9 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb, int features)
        if (unlikely(!pskb_may_pull(skb, ihl)))
                goto out;
 
-       skb->h.raw = __skb_pull(skb, ihl);
-       iph = skb->nh.iph;
+       __skb_pull(skb, ihl);
+       skb_reset_transport_header(skb);
+       iph = ip_hdr(skb);
        id = ntohs(iph->id);
        proto = iph->protocol & (MAX_INET_PROTOS - 1);
        segs = ERR_PTR(-EPROTONOSUPPORT);
@@ -1177,17 +1208,57 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb, int features)
 
        skb = segs;
        do {
-               iph = skb->nh.iph;
+               iph = ip_hdr(skb);
                iph->id = htons(id++);
                iph->tot_len = htons(skb->len - skb->mac_len);
                iph->check = 0;
-               iph->check = ip_fast_csum(skb->nh.raw, iph->ihl);
+               iph->check = ip_fast_csum(skb_network_header(skb), iph->ihl);
        } while ((skb = skb->next));
 
 out:
        return segs;
 }
 
+unsigned long snmp_fold_field(void *mib[], int offt)
+{
+       unsigned long res = 0;
+       int i;
+
+       for_each_possible_cpu(i) {
+               res += *(((unsigned long *) per_cpu_ptr(mib[0], i)) + offt);
+               res += *(((unsigned long *) per_cpu_ptr(mib[1], i)) + offt);
+       }
+       return res;
+}
+EXPORT_SYMBOL_GPL(snmp_fold_field);
+
+int snmp_mib_init(void *ptr[2], size_t mibsize, size_t mibalign)
+{
+       BUG_ON(ptr == NULL);
+       ptr[0] = __alloc_percpu(mibsize);
+       if (!ptr[0])
+               goto err0;
+       ptr[1] = __alloc_percpu(mibsize);
+       if (!ptr[1])
+               goto err1;
+       return 0;
+err1:
+       free_percpu(ptr[0]);
+       ptr[0] = NULL;
+err0:
+       return -ENOMEM;
+}
+EXPORT_SYMBOL_GPL(snmp_mib_init);
+
+void snmp_mib_free(void *ptr[2])
+{
+       BUG_ON(ptr == NULL);
+       free_percpu(ptr[0]);
+       free_percpu(ptr[1]);
+       ptr[0] = ptr[1] = NULL;
+}
+EXPORT_SYMBOL_GPL(snmp_mib_free);
+
 #ifdef CONFIG_IP_MULTICAST
 static struct net_protocol igmp_protocol = {
        .handler =      igmp_rcv,
@@ -1214,28 +1285,47 @@ static struct net_protocol icmp_protocol = {
 
 static int __init init_ipv4_mibs(void)
 {
-       net_statistics[0] = alloc_percpu(struct linux_mib);
-       net_statistics[1] = alloc_percpu(struct linux_mib);
-       ip_statistics[0] = alloc_percpu(struct ipstats_mib);
-       ip_statistics[1] = alloc_percpu(struct ipstats_mib);
-       icmp_statistics[0] = alloc_percpu(struct icmp_mib);
-       icmp_statistics[1] = alloc_percpu(struct icmp_mib);
-       tcp_statistics[0] = alloc_percpu(struct tcp_mib);
-       tcp_statistics[1] = alloc_percpu(struct tcp_mib);
-       udp_statistics[0] = alloc_percpu(struct udp_mib);
-       udp_statistics[1] = alloc_percpu(struct udp_mib);
-       udplite_statistics[0] = alloc_percpu(struct udp_mib);
-       udplite_statistics[1] = alloc_percpu(struct udp_mib);
-       if (!
-           (net_statistics[0] && net_statistics[1] && ip_statistics[0]
-            && ip_statistics[1] && tcp_statistics[0] && tcp_statistics[1]
-            && udp_statistics[0] && udp_statistics[1]
-            && udplite_statistics[0] && udplite_statistics[1]             ) )
-               return -ENOMEM;
-
-       (void) tcp_mib_init();
+       if (snmp_mib_init((void **)net_statistics,
+                         sizeof(struct linux_mib),
+                         __alignof__(struct linux_mib)) < 0)
+               goto err_net_mib;
+       if (snmp_mib_init((void **)ip_statistics,
+                         sizeof(struct ipstats_mib),
+                         __alignof__(struct ipstats_mib)) < 0)
+               goto err_ip_mib;
+       if (snmp_mib_init((void **)icmp_statistics,
+                         sizeof(struct icmp_mib),
+                         __alignof__(struct icmp_mib)) < 0)
+               goto err_icmp_mib;
+       if (snmp_mib_init((void **)tcp_statistics,
+                         sizeof(struct tcp_mib),
+                         __alignof__(struct tcp_mib)) < 0)
+               goto err_tcp_mib;
+       if (snmp_mib_init((void **)udp_statistics,
+                         sizeof(struct udp_mib),
+                         __alignof__(struct udp_mib)) < 0)
+               goto err_udp_mib;
+       if (snmp_mib_init((void **)udplite_statistics,
+                         sizeof(struct udp_mib),
+                         __alignof__(struct udp_mib)) < 0)
+               goto err_udplite_mib;
+
+       tcp_mib_init();
 
        return 0;
+
+err_udplite_mib:
+       snmp_mib_free((void **)udp_statistics);
+err_udp_mib:
+       snmp_mib_free((void **)tcp_statistics);
+err_tcp_mib:
+       snmp_mib_free((void **)icmp_statistics);
+err_icmp_mib:
+       snmp_mib_free((void **)ip_statistics);
+err_ip_mib:
+       snmp_mib_free((void **)net_statistics);
+err_net_mib:
+       return -ENOMEM;
 }
 
 static int ipv4_proc_init(void);
@@ -1336,7 +1426,7 @@ static int __init inet_init(void)
         *      Initialise per-cpu ipv4 mibs
         */
 
-       if(init_ipv4_mibs())
+       if (init_ipv4_mibs())
                printk(KERN_CRIT "inet_init: Cannot init ipv4 mibs\n"); ;
 
        ipv4_proc_init();
index 7194eb4..6da8ff5 100644 (file)
@@ -65,7 +65,7 @@ static int ah_output(struct xfrm_state *x, struct sk_buff *skb)
                char            buf[60];
        } tmp_iph;
 
-       top_iph = skb->nh.iph;
+       top_iph = ip_hdr(skb);
        iph = &tmp_iph.iph;
 
        iph->tos = top_iph->tos;
@@ -152,9 +152,9 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb)
        skb->ip_summed = CHECKSUM_NONE;
 
        ah = (struct ip_auth_hdr*)skb->data;
-       iph = skb->nh.iph;
+       iph = ip_hdr(skb);
 
-       ihl = skb->data - skb->nh.raw;
+       ihl = skb->data - skb_network_header(skb);
        memcpy(work_buf, iph, ihl);
 
        iph->ttl = 0;
@@ -181,7 +181,9 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb)
                }
        }
        ((struct iphdr*)work_buf)->protocol = ah->nexthdr;
-       skb->h.raw = memcpy(skb->nh.raw += ah_hlen, work_buf, ihl);
+       skb->network_header += ah_hlen;
+       memcpy(skb_network_header(skb), work_buf, ihl);
+       skb->transport_header = skb->network_header;
        __skb_pull(skb, ah_hlen + ihl);
 
        return 0;
@@ -196,8 +198,8 @@ static void ah4_err(struct sk_buff *skb, u32 info)
        struct ip_auth_hdr *ah = (struct ip_auth_hdr*)(skb->data+(iph->ihl<<2));
        struct xfrm_state *x;
 
-       if (skb->h.icmph->type != ICMP_DEST_UNREACH ||
-           skb->h.icmph->code != ICMP_FRAG_NEEDED)
+       if (icmp_hdr(skb)->type != ICMP_DEST_UNREACH ||
+           icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
                return;
 
        x = xfrm_state_lookup((xfrm_address_t *)&iph->daddr, ah->spi, IPPROTO_AH, AF_INET);
index 1a3488a..7110779 100644 (file)
@@ -342,13 +342,13 @@ static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb)
        switch (IN_DEV_ARP_ANNOUNCE(in_dev)) {
        default:
        case 0:         /* By default announce any local IP */
-               if (skb && inet_addr_type(skb->nh.iph->saddr) == RTN_LOCAL)
-                       saddr = skb->nh.iph->saddr;
+               if (skb && inet_addr_type(ip_hdr(skb)->saddr) == RTN_LOCAL)
+                       saddr = ip_hdr(skb)->saddr;
                break;
        case 1:         /* Restrict announcements of saddr in same subnet */
                if (!skb)
                        break;
-               saddr = skb->nh.iph->saddr;
+               saddr = ip_hdr(skb)->saddr;
                if (inet_addr_type(saddr) == RTN_LOCAL) {
                        /* saddr should be known to target */
                        if (inet_addr_onlink(in_dev, target, saddr))
@@ -578,7 +578,7 @@ struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip,
                return NULL;
 
        skb_reserve(skb, LL_RESERVED_SPACE(dev));
-       skb->nh.raw = skb->data;
+       skb_reset_network_header(skb);
        arp = (struct arphdr *) skb_put(skb,sizeof(struct arphdr) + 2*(dev->addr_len+4));
        skb->dev = dev;
        skb->protocol = htons(ETH_P_ARP);
@@ -721,7 +721,7 @@ static int arp_process(struct sk_buff *skb)
        if (in_dev == NULL)
                goto out;
 
-       arp = skb->nh.arph;
+       arp = arp_hdr(skb);
 
        switch (dev_type) {
        default:
@@ -937,7 +937,7 @@ static int arp_rcv(struct sk_buff *skb, struct net_device *dev,
                                 (2 * sizeof(u32)))))
                goto freeskb;
 
-       arp = skb->nh.arph;
+       arp = arp_hdr(skb);
        if (arp->ar_hln != dev->addr_len ||
            dev->flags & IFF_NOARP ||
            skb->pkt_type == PACKET_OTHERHOST ||
@@ -1178,7 +1178,7 @@ int arp_ioctl(unsigned int cmd, void __user *arg)
                goto out;
        }
 
-       switch(cmd) {
+       switch (cmd) {
        case SIOCDARP:
                err = arp_req_delete(&r, dev);
                break;
@@ -1360,7 +1360,7 @@ static void *arp_seq_start(struct seq_file *seq, loff_t *pos)
 
 /* ------------------------------------------------------------------------ */
 
-static struct seq_operations arp_seq_ops = {
+static const struct seq_operations arp_seq_ops = {
        .start  = arp_seq_start,
        .next   = neigh_seq_next,
        .stop   = neigh_seq_stop,
index 2ce5b69..11a3404 100644 (file)
@@ -1174,7 +1174,7 @@ static int cipso_v4_map_cat_rng_ntoh(const struct cipso_v4_doi *doi_def,
        u16 cat_low;
        u16 cat_high;
 
-       for(net_iter = 0; net_iter < net_cat_len; net_iter += 4) {
+       for (net_iter = 0; net_iter < net_cat_len; net_iter += 4) {
                cat_high = ntohs(*((__be16 *)&net_cat[net_iter]));
                if ((net_iter + 4) <= net_cat_len)
                        cat_low = ntohs(*((__be16 *)&net_cat[net_iter + 2]));
@@ -1676,7 +1676,7 @@ validate_return:
  */
 void cipso_v4_error(struct sk_buff *skb, int error, u32 gateway)
 {
-       if (skb->nh.iph->protocol == IPPROTO_ICMP || error != -EACCES)
+       if (ip_hdr(skb)->protocol == IPPROTO_ICMP || error != -EACCES)
                return;
 
        if (gateway)
index 98a00d0..088888d 100644 (file)
@@ -48,7 +48,6 @@
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/skbuff.h>
-#include <linux/rtnetlink.h>
 #include <linux/init.h>
 #include <linux/notifier.h>
 #include <linux/inetdevice.h>
@@ -62,7 +61,7 @@
 #include <net/ip.h>
 #include <net/route.h>
 #include <net/ip_fib.h>
-#include <net/netlink.h>
+#include <net/rtnetlink.h>
 
 struct ipv4_devconf ipv4_devconf = {
        .accept_redirects = 1,
@@ -633,7 +632,7 @@ int devinet_ioctl(unsigned int cmd, void __user *arg)
        dev_load(ifr.ifr_name);
 #endif
 
-       switch(cmd) {
+       switch (cmd) {
        case SIOCGIFADDR:       /* Get interface address */
        case SIOCGIFBRDADDR:    /* Get the broadcast address */
        case SIOCGIFDSTADDR:    /* Get the destination address */
@@ -708,7 +707,7 @@ int devinet_ioctl(unsigned int cmd, void __user *arg)
        if (!ifa && cmd != SIOCSIFADDR && cmd != SIOCSIFFLAGS)
                goto done;
 
-       switch(cmd) {
+       switch (cmd) {
        case SIOCGIFADDR:       /* Get interface address */
                sin->sin_addr.s_addr = ifa->ifa_local;
                goto rarok;
@@ -1183,17 +1182,13 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
        int s_ip_idx, s_idx = cb->args[0];
 
        s_ip_idx = ip_idx = cb->args[1];
-       read_lock(&dev_base_lock);
        for (dev = dev_base, idx = 0; dev; dev = dev->next, idx++) {
                if (idx < s_idx)
                        continue;
                if (idx > s_idx)
                        s_ip_idx = 0;
-               rcu_read_lock();
-               if ((in_dev = __in_dev_get_rcu(dev)) == NULL) {
-                       rcu_read_unlock();
+               if ((in_dev = __in_dev_get_rtnl(dev)) == NULL)
                        continue;
-               }
 
                for (ifa = in_dev->ifa_list, ip_idx = 0; ifa;
                     ifa = ifa->ifa_next, ip_idx++) {
@@ -1201,16 +1196,12 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
                                continue;
                        if (inet_fill_ifaddr(skb, ifa, NETLINK_CB(cb->skb).pid,
                                             cb->nlh->nlmsg_seq,
-                                            RTM_NEWADDR, NLM_F_MULTI) <= 0) {
-                               rcu_read_unlock();
+                                            RTM_NEWADDR, NLM_F_MULTI) <= 0)
                                goto done;
-                       }
                }
-               rcu_read_unlock();
        }
 
 done:
-       read_unlock(&dev_base_lock);
        cb->args[0] = idx;
        cb->args[1] = ip_idx;
 
@@ -1241,19 +1232,6 @@ errout:
                rtnl_set_sk_err(RTNLGRP_IPV4_IFADDR, err);
 }
 
-static struct rtnetlink_link inet_rtnetlink_table[RTM_NR_MSGTYPES] = {
-       [RTM_NEWADDR  - RTM_BASE] = { .doit     = inet_rtm_newaddr,     },
-       [RTM_DELADDR  - RTM_BASE] = { .doit     = inet_rtm_deladdr,     },
-       [RTM_GETADDR  - RTM_BASE] = { .dumpit   = inet_dump_ifaddr,     },
-       [RTM_NEWROUTE - RTM_BASE] = { .doit     = inet_rtm_newroute,    },
-       [RTM_DELROUTE - RTM_BASE] = { .doit     = inet_rtm_delroute,    },
-       [RTM_GETROUTE - RTM_BASE] = { .doit     = inet_rtm_getroute,
-                                     .dumpit   = inet_dump_fib,        },
-#ifdef CONFIG_IP_MULTIPLE_TABLES
-       [RTM_GETRULE  - RTM_BASE] = { .dumpit   = fib4_rules_dump,      },
-#endif
-};
-
 #ifdef CONFIG_SYSCTL
 
 void inet_forward_change(void)
@@ -1636,7 +1614,10 @@ void __init devinet_init(void)
 {
        register_gifconf(PF_INET, inet_gifconf);
        register_netdevice_notifier(&ip_netdev_notifier);
-       rtnetlink_links[PF_INET] = inet_rtnetlink_table;
+
+       rtnl_register(PF_INET, RTM_NEWADDR, inet_rtm_newaddr, NULL);
+       rtnl_register(PF_INET, RTM_DELADDR, inet_rtm_deladdr, NULL);
+       rtnl_register(PF_INET, RTM_GETADDR, NULL, inet_dump_ifaddr);
 #ifdef CONFIG_SYSCTL
        devinet_sysctl.sysctl_header =
                register_sysctl_table(devinet_sysctl.devinet_root_dir);
index 3104112..47c95e8 100644 (file)
@@ -21,13 +21,14 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
        struct blkcipher_desc desc;
        struct esp_data *esp;
        struct sk_buff *trailer;
+       u8 *tail;
        int blksize;
        int clen;
        int alen;
        int nfrags;
 
        /* Strip IP+ESP header. */
-       __skb_pull(skb, skb->h.raw - skb->data);
+       __skb_pull(skb, skb_transport_offset(skb));
        /* Now skb is pure payload to encrypt */
 
        err = -ENOMEM;
@@ -49,19 +50,21 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
                goto error;
 
        /* Fill padding... */
+       tail = skb_tail_pointer(trailer);
        do {
                int i;
                for (i=0; i<clen-skb->len - 2; i++)
-                       *(u8*)(trailer->tail + i) = i+1;
+                       tail[i] = i + 1;
        } while (0);
-       *(u8*)(trailer->tail + clen-skb->len - 2) = (clen - skb->len)-2;
+       tail[clen - skb->len - 2] = (clen - skb->len) - 2;
        pskb_put(skb, trailer, clen - skb->len);
 
-       __skb_push(skb, skb->data - skb->nh.raw);
-       top_iph = skb->nh.iph;
-       esph = (struct ip_esp_hdr *)(skb->nh.raw + top_iph->ihl*4);
+       __skb_push(skb, skb->data - skb_network_header(skb));
+       top_iph = ip_hdr(skb);
+       esph = (struct ip_esp_hdr *)(skb_network_header(skb) +
+                                    top_iph->ihl * 4);
        top_iph->tot_len = htons(skb->len + alen);
-       *(u8*)(trailer->tail - 1) = top_iph->protocol;
+       *(skb_tail_pointer(trailer) - 1) = top_iph->protocol;
 
        /* this is non-NULL only with UDP Encapsulation */
        if (x->encap) {
@@ -217,12 +220,12 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
 
        /* ... check padding bits here. Silly. :-) */
 
-       iph = skb->nh.iph;
+       iph = ip_hdr(skb);
        ihl = iph->ihl * 4;
 
        if (x->encap) {
                struct xfrm_encap_tmpl *encap = x->encap;
-               struct udphdr *uh = (void *)(skb->nh.raw + ihl);
+               struct udphdr *uh = (void *)(skb_network_header(skb) + ihl);
 
                /*
                 * 1) if the NAT-T peer's IP or port changed then
@@ -260,7 +263,8 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
 
        iph->protocol = nexthdr[1];
        pskb_trim(skb, skb->len - alen - padlen - 2);
-       skb->h.raw = __skb_pull(skb, sizeof(*esph) + esp->conf.ivlen) - ihl;
+       __skb_pull(skb, sizeof(*esph) + esp->conf.ivlen);
+       skb_set_transport_header(skb, -ihl);
 
        return 0;
 
@@ -268,32 +272,33 @@ out:
        return -EINVAL;
 }
 
-static u32 esp4_get_max_size(struct xfrm_state *x, int mtu)
+static u32 esp4_get_mtu(struct xfrm_state *x, int mtu)
 {
        struct esp_data *esp = x->data;
        u32 blksize = ALIGN(crypto_blkcipher_blocksize(esp->conf.tfm), 4);
-       int enclen = 0;
+       u32 align = max_t(u32, blksize, esp->conf.padlen);
+       u32 rem;
+
+       mtu -= x->props.header_len + esp->auth.icv_trunc_len;
+       rem = mtu & (align - 1);
+       mtu &= ~(align - 1);
 
        switch (x->props.mode) {
        case XFRM_MODE_TUNNEL:
-               mtu = ALIGN(mtu +2, blksize);
                break;
        default:
        case XFRM_MODE_TRANSPORT:
                /* The worst case */
-               mtu = ALIGN(mtu + 2, 4) + blksize - 4;
+               mtu -= blksize - 4;
+               mtu += min_t(u32, blksize - 4, rem);
                break;
        case XFRM_MODE_BEET:
                /* The worst case. */
-               enclen = IPV4_BEET_PHMAXLEN;
-               mtu = ALIGN(mtu + enclen + 2, blksize);
+               mtu += min_t(u32, IPV4_BEET_PHMAXLEN, rem);
                break;
        }
 
-       if (esp->conf.padlen)
-               mtu = ALIGN(mtu, esp->conf.padlen);
-
-       return mtu + x->props.header_len + esp->auth.icv_trunc_len - enclen;
+       return mtu - 2;
 }
 
 static void esp4_err(struct sk_buff *skb, u32 info)
@@ -302,8 +307,8 @@ static void esp4_err(struct sk_buff *skb, u32 info)
        struct ip_esp_hdr *esph = (struct ip_esp_hdr*)(skb->data+(iph->ihl<<2));
        struct xfrm_state *x;
 
-       if (skb->h.icmph->type != ICMP_DEST_UNREACH ||
-           skb->h.icmph->code != ICMP_FRAG_NEEDED)
+       if (icmp_hdr(skb)->type != ICMP_DEST_UNREACH ||
+           icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
                return;
 
        x = xfrm_state_lookup((xfrm_address_t *)&iph->daddr, esph->spi, IPPROTO_ESP, AF_INET);
@@ -336,6 +341,7 @@ static int esp_init_state(struct xfrm_state *x)
 {
        struct esp_data *esp = NULL;
        struct crypto_blkcipher *tfm;
+       u32 align;
 
        /* null auth and encryption can have zero length keys */
        if (x->aalg) {
@@ -402,6 +408,8 @@ static int esp_init_state(struct xfrm_state *x)
        x->props.header_len = sizeof(struct ip_esp_hdr) + esp->conf.ivlen;
        if (x->props.mode == XFRM_MODE_TUNNEL)
                x->props.header_len += sizeof(struct iphdr);
+       else if (x->props.mode == XFRM_MODE_BEET)
+               x->props.header_len += IPV4_BEET_PHMAXLEN;
        if (x->encap) {
                struct xfrm_encap_tmpl *encap = x->encap;
 
@@ -417,7 +425,10 @@ static int esp_init_state(struct xfrm_state *x)
                }
        }
        x->data = esp;
-       x->props.trailer_len = esp4_get_max_size(x, 0) - x->props.header_len;
+       align = ALIGN(crypto_blkcipher_blocksize(esp->conf.tfm), 4);
+       if (esp->conf.padlen)
+               align = max_t(u32, align, esp->conf.padlen);
+       x->props.trailer_len = align + 1 + esp->auth.icv_trunc_len;
        return 0;
 
 error:
@@ -434,7 +445,7 @@ static struct xfrm_type esp_type =
        .proto          = IPPROTO_ESP,
        .init_state     = esp_init_state,
        .destructor     = esp_destroy,
-       .get_max_size   = esp4_get_max_size,
+       .get_mtu        = esp4_get_mtu,
        .input          = esp_input,
        .output         = esp_output
 };
index fc920f6..837f295 100644 (file)
@@ -34,7 +34,6 @@
 #include <linux/if_addr.h>
 #include <linux/if_arp.h>
 #include <linux/skbuff.h>
-#include <linux/netlink.h>
 #include <linux/init.h>
 #include <linux/list.h>
 
@@ -46,6 +45,7 @@
 #include <net/icmp.h>
 #include <net/arp.h>
 #include <net/ip_fib.h>
+#include <net/rtnetlink.h>
 
 #define FFprint(a...) printk(KERN_DEBUG a)
 
@@ -540,7 +540,7 @@ errout:
        return err;
 }
 
-int inet_rtm_delroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
+static int inet_rtm_delroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
 {
        struct fib_config cfg;
        struct fib_table *tb;
@@ -561,7 +561,7 @@ errout:
        return err;
 }
 
-int inet_rtm_newroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
+static int inet_rtm_newroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
 {
        struct fib_config cfg;
        struct fib_table *tb;
@@ -582,7 +582,7 @@ errout:
        return err;
 }
 
-int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
+static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
 {
        unsigned int h, s_h;
        unsigned int e = 0, s_e;
@@ -776,6 +776,12 @@ static void nl_fib_lookup(struct fib_result_nl *frn, struct fib_table *tb )
                                       .nl_u = { .ip4_u = { .daddr = frn->fl_addr,
                                                            .tos = frn->fl_tos,
                                                            .scope = frn->fl_scope } } };
+
+#ifdef CONFIG_IP_MULTIPLE_TABLES
+       res.r = NULL;
+#endif
+
+       frn->err = -ENOENT;
        if (tb) {
                local_bh_disable();
 
@@ -787,6 +793,7 @@ static void nl_fib_lookup(struct fib_result_nl *frn, struct fib_table *tb )
                        frn->nh_sel = res.nh_sel;
                        frn->type = res.type;
                        frn->scope = res.scope;
+                       fib_res_put(&res);
                }
                local_bh_enable();
        }
@@ -801,7 +808,10 @@ static void nl_fib_input(struct sock *sk, int len)
        struct fib_table *tb;
 
        skb = skb_dequeue(&sk->sk_receive_queue);
-       nlh = (struct nlmsghdr *)skb->data;
+       if (skb == NULL)
+               return;
+
+       nlh = nlmsg_hdr(skb);
        if (skb->len < NLMSG_SPACE(0) || skb->len < nlh->nlmsg_len ||
            nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*frn))) {
                kfree_skb(skb);
@@ -813,7 +823,7 @@ static void nl_fib_input(struct sock *sk, int len)
 
        nl_fib_lookup(frn, tb);
 
-       pid = nlh->nlmsg_pid;           /*pid of sending process */
+       pid = NETLINK_CB(skb).pid;       /* pid of sending process */
        NETLINK_CB(skb).pid = 0;         /* from kernel */
        NETLINK_CB(skb).dst_group = 0;  /* unicast */
        netlink_unicast(sk, skb, pid, MSG_DONTWAIT);
@@ -821,7 +831,8 @@ static void nl_fib_input(struct sock *sk, int len)
 
 static void nl_fib_lookup_init(void)
 {
-      netlink_kernel_create(NETLINK_FIB_LOOKUP, 0, nl_fib_input, THIS_MODULE);
+      netlink_kernel_create(NETLINK_FIB_LOOKUP, 0, nl_fib_input, NULL,
+                           THIS_MODULE);
 }
 
 static void fib_disable_ip(struct net_device *dev, int force)
@@ -919,6 +930,10 @@ void __init ip_fib_init(void)
        register_netdevice_notifier(&fib_netdev_notifier);
        register_inetaddr_notifier(&fib_inetaddr_notifier);
        nl_fib_lookup_init();
+
+       rtnl_register(PF_INET, RTM_NEWROUTE, inet_rtm_newroute, NULL);
+       rtnl_register(PF_INET, RTM_DELROUTE, inet_rtm_delroute, NULL);
+       rtnl_register(PF_INET, RTM_GETROUTE, NULL, inet_dump_fib);
 }
 
 EXPORT_SYMBOL(inet_addr_type);
index a4949f9..9cfecf1 100644 (file)
@@ -1027,7 +1027,7 @@ out:
        return 0;
 }
 
-static struct seq_operations fib_seq_ops = {
+static const struct seq_operations fib_seq_ops = {
        .start  = fib_seq_start,
        .next   = fib_seq_next,
        .stop   = fib_seq_stop,
index c660c07..33083ad 100644 (file)
@@ -274,11 +274,6 @@ nla_put_failure:
        return -ENOBUFS;
 }
 
-int fib4_rules_dump(struct sk_buff *skb, struct netlink_callback *cb)
-{
-       return fib_rules_dump(skb, cb, AF_INET);
-}
-
 static u32 fib4_rule_default_pref(void)
 {
        struct list_head *pos;
@@ -303,6 +298,11 @@ static size_t fib4_rule_nlmsg_payload(struct fib_rule *rule)
               + nla_total_size(4); /* flow */
 }
 
+static void fib4_rule_flush_cache(void)
+{
+       rt_cache_flush(-1);
+}
+
 static struct fib_rules_ops fib4_rules_ops = {
        .family         = AF_INET,
        .rule_size      = sizeof(struct fib4_rule),
@@ -314,6 +314,7 @@ static struct fib_rules_ops fib4_rules_ops = {
        .fill           = fib4_rule_fill,
        .default_pref   = fib4_rule_default_pref,
        .nlmsg_payload  = fib4_rule_nlmsg_payload,
+       .flush_cache    = fib4_rule_flush_cache,
        .nlgroup        = RTNLGRP_IPV4_RULE,
        .policy         = fib4_rule_policy,
        .rules_list     = &fib4_rules,
index 3dad12e..406ea70 100644 (file)
@@ -927,7 +927,7 @@ int fib_semantic_match(struct list_head *head, const struct flowi *flp,
                        default:
                                printk(KERN_DEBUG "impossible 102\n");
                                return -EINVAL;
-                       };
+                       }
                }
                return err;
        }
index 214c347..9be7da7 100644 (file)
@@ -50,7 +50,7 @@
  *             Patrick McHardy <kaber@trash.net>
  */
 
-#define VERSION "0.407"
+#define VERSION "0.408"
 
 #include <asm/uaccess.h>
 #include <asm/system.h>
@@ -292,8 +292,8 @@ static inline void check_tnode(const struct tnode *tn)
 
 static int halve_threshold = 25;
 static int inflate_threshold = 50;
-static int halve_threshold_root = 15;
-static int inflate_threshold_root = 25;
+static int halve_threshold_root = 8;
+static int inflate_threshold_root = 15;
 
 
 static void __alias_free_mem(struct rcu_head *head)
@@ -350,11 +350,10 @@ static void __tnode_free_rcu(struct rcu_head *head)
 
 static inline void tnode_free(struct tnode *tn)
 {
-       if(IS_LEAF(tn)) {
+       if (IS_LEAF(tn)) {
                struct leaf *l = (struct leaf *) tn;
                call_rcu_bh(&l->rcu, __leaf_free_rcu);
-       }
-       else
+       } else
                call_rcu(&tn->rcu, __tnode_free_rcu);
 }
 
@@ -459,6 +458,7 @@ static struct node *resize(struct trie *t, struct tnode *tn)
        struct tnode *old_tn;
        int inflate_threshold_use;
        int halve_threshold_use;
+       int max_resize;
 
        if (!tn)
                return NULL;
@@ -553,13 +553,14 @@ static struct node *resize(struct trie *t, struct tnode *tn)
 
        /* Keep root node larger  */
 
-       if(!tn->parent)
+       if (!tn->parent)
                inflate_threshold_use = inflate_threshold_root;
        else
                inflate_threshold_use = inflate_threshold;
 
        err = 0;
-       while ((tn->full_children > 0 &&
+       max_resize = 10;
+       while ((tn->full_children > 0 &&  max_resize-- &&
               50 * (tn->full_children + tnode_child_length(tn) - tn->empty_children) >=
                                inflate_threshold_use * tnode_child_length(tn))) {
 
@@ -574,6 +575,15 @@ static struct node *resize(struct trie *t, struct tnode *tn)
                }
        }
 
+       if (max_resize < 0) {
+               if (!tn->parent)
+                       printk(KERN_WARNING "Fix inflate_threshold_root. Now=%d size=%d bits\n",
+                              inflate_threshold_root, tn->bits);
+               else
+                       printk(KERN_WARNING "Fix inflate_threshold. Now=%d size=%d bits\n",
+                              inflate_threshold, tn->bits);
+       }
+
        check_tnode(tn);
 
        /*
@@ -584,13 +594,14 @@ static struct node *resize(struct trie *t, struct tnode *tn)
 
        /* Keep root node larger  */
 
-       if(!tn->parent)
+       if (!tn->parent)
                halve_threshold_use = halve_threshold_root;
        else
                halve_threshold_use = halve_threshold;
 
        err = 0;
-       while (tn->bits > 1 &&
+       max_resize = 10;
+       while (tn->bits > 1 &&  max_resize-- &&
               100 * (tnode_child_length(tn) - tn->empty_children) <
               halve_threshold_use * tnode_child_length(tn)) {
 
@@ -605,6 +616,14 @@ static struct node *resize(struct trie *t, struct tnode *tn)
                }
        }
 
+       if (max_resize < 0) {
+               if (!tn->parent)
+                       printk(KERN_WARNING "Fix halve_threshold_root. Now=%d size=%d bits\n",
+                              halve_threshold_root, tn->bits);
+               else
+                       printk(KERN_WARNING "Fix halve_threshold. Now=%d size=%d bits\n",
+                              halve_threshold, tn->bits);
+       }
 
        /* Only one child remains */
        if (tn->empty_children == tnode_child_length(tn) - 1)
@@ -2039,12 +2058,12 @@ static struct node *fib_trie_get_first(struct fib_trie_iter *iter,
 {
        struct node *n ;
 
-       if(!t)
+       if (!t)
                return NULL;
 
        n = rcu_dereference(t->trie);
 
-       if(!iter)
+       if (!iter)
                return NULL;
 
        if (n) {
@@ -2084,7 +2103,7 @@ static void trie_collect_stats(struct trie *t, struct trie_stat *s)
                        int i;
 
                        s->tnodes++;
-                       if(tn->bits < MAX_STAT_DEPTH)
+                       if (tn->bits < MAX_STAT_DEPTH)
                                s->nodesizes[tn->bits]++;
 
                        for (i = 0; i < (1<<tn->bits); i++)
@@ -2250,7 +2269,7 @@ static inline const char *rtn_scope(enum rt_scope_t s)
 {
        static char buf[32];
 
-       switch(s) {
+       switch (s) {
        case RT_SCOPE_UNIVERSE: return "universe";
        case RT_SCOPE_SITE:     return "site";
        case RT_SCOPE_LINK:     return "link";
@@ -2340,7 +2359,7 @@ static int fib_trie_seq_show(struct seq_file *seq, void *v)
        return 0;
 }
 
-static struct seq_operations fib_trie_seq_ops = {
+static const struct seq_operations fib_trie_seq_ops = {
        .start  = fib_trie_seq_start,
        .next   = fib_trie_seq_next,
        .stop   = fib_trie_seq_stop,
@@ -2461,7 +2480,7 @@ static int fib_route_seq_show(struct seq_file *seq, void *v)
        return 0;
 }
 
-static struct seq_operations fib_route_seq_ops = {
+static const struct seq_operations fib_route_seq_ops = {
        .start  = fib_trie_seq_start,
        .next   = fib_trie_seq_next,
        .stop   = fib_trie_seq_stop,
index 4b7a0d9..d38cbba 100644 (file)
@@ -355,7 +355,7 @@ static void icmp_push_reply(struct icmp_bxm *icmp_param,
                           ipc, rt, MSG_DONTWAIT) < 0)
                ip_flush_pending_frames(icmp_socket->sk);
        else if ((skb = skb_peek(&icmp_socket->sk->sk_write_queue)) != NULL) {
-               struct icmphdr *icmph = skb->h.icmph;
+               struct icmphdr *icmph = icmp_hdr(skb);
                __wsum csum = 0;
                struct sk_buff *skb1;
 
@@ -392,7 +392,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
        icmp_param->data.icmph.checksum = 0;
        icmp_out_count(icmp_param->data.icmph.type);
 
-       inet->tos = skb->nh.iph->tos;
+       inet->tos = ip_hdr(skb)->tos;
        daddr = ipc.addr = rt->rt_src;
        ipc.opt = NULL;
        if (icmp_param->replyopts.optlen) {
@@ -404,7 +404,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
                struct flowi fl = { .nl_u = { .ip4_u =
                                              { .daddr = daddr,
                                                .saddr = rt->rt_spec_dst,
-                                               .tos = RT_TOS(skb->nh.iph->tos) } },
+                                               .tos = RT_TOS(ip_hdr(skb)->tos) } },
                                    .proto = IPPROTO_ICMP };
                security_skb_classify_flow(skb, &fl);
                if (ip_route_output_key(&rt, &fl))
@@ -448,9 +448,10 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
         *      Check this, icmp_send is called from the most obscure devices
         *      sometimes.
         */
-       iph = skb_in->nh.iph;
+       iph = ip_hdr(skb_in);
 
-       if ((u8 *)iph < skb_in->head || (u8 *)(iph + 1) > skb_in->tail)
+       if ((u8 *)iph < skb_in->head ||
+           (skb_in->network_header + sizeof(*iph)) > skb_in->tail)
                goto out;
 
        /*
@@ -484,7 +485,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
                        u8 _inner_type, *itp;
 
                        itp = skb_header_pointer(skb_in,
-                                                skb_in->nh.raw +
+                                                skb_network_header(skb_in) +
                                                 (iph->ihl << 2) +
                                                 offsetof(struct icmphdr,
                                                          type) -
@@ -536,7 +537,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
        icmp_param.data.icmph.un.gateway = info;
        icmp_param.data.icmph.checksum   = 0;
        icmp_param.skb    = skb_in;
-       icmp_param.offset = skb_in->nh.raw - skb_in->data;
+       icmp_param.offset = skb_network_offset(skb_in);
        icmp_out_count(icmp_param.data.icmph.type);
        inet_sk(icmp_socket->sk)->tos = tos;
        ipc.addr = iph->saddr;
@@ -613,7 +614,7 @@ static void icmp_unreach(struct sk_buff *skb)
        if (!pskb_may_pull(skb, sizeof(struct iphdr)))
                goto out_err;
 
-       icmph = skb->h.icmph;
+       icmph = icmp_hdr(skb);
        iph   = (struct iphdr *)skb->data;
 
        if (iph->ihl < 5) /* Mangled header, drop. */
@@ -676,7 +677,7 @@ static void icmp_unreach(struct sk_buff *skb)
                        printk(KERN_WARNING "%u.%u.%u.%u sent an invalid ICMP "
                                            "type %u, code %u "
                                            "error to a broadcast: %u.%u.%u.%u on %s\n",
-                              NIPQUAD(skb->nh.iph->saddr),
+                              NIPQUAD(ip_hdr(skb)->saddr),
                               icmph->type, icmph->code,
                               NIPQUAD(iph->daddr),
                               skb->dev->name);
@@ -743,7 +744,7 @@ static void icmp_redirect(struct sk_buff *skb)
 
        iph = (struct iphdr *)skb->data;
 
-       switch (skb->h.icmph->code & 7) {
+       switch (icmp_hdr(skb)->code & 7) {
        case ICMP_REDIR_NET:
        case ICMP_REDIR_NETTOS:
                /*
@@ -751,8 +752,8 @@ static void icmp_redirect(struct sk_buff *skb)
                 */
        case ICMP_REDIR_HOST:
        case ICMP_REDIR_HOSTTOS:
-               ip_rt_redirect(skb->nh.iph->saddr, iph->daddr,
-                              skb->h.icmph->un.gateway,
+               ip_rt_redirect(ip_hdr(skb)->saddr, iph->daddr,
+                              icmp_hdr(skb)->un.gateway,
                               iph->saddr, skb->dev);
                break;
        }
@@ -780,7 +781,7 @@ static void icmp_echo(struct sk_buff *skb)
        if (!sysctl_icmp_echo_ignore_all) {
                struct icmp_bxm icmp_param;
 
-               icmp_param.data.icmph      = *skb->h.icmph;
+               icmp_param.data.icmph      = *icmp_hdr(skb);
                icmp_param.data.icmph.type = ICMP_ECHOREPLY;
                icmp_param.skb             = skb;
                icmp_param.offset          = 0;
@@ -816,7 +817,7 @@ static void icmp_timestamp(struct sk_buff *skb)
        icmp_param.data.times[2] = icmp_param.data.times[1];
        if (skb_copy_bits(skb, 0, &icmp_param.data.times[0], 4))
                BUG();
-       icmp_param.data.icmph      = *skb->h.icmph;
+       icmp_param.data.icmph      = *icmp_hdr(skb);
        icmp_param.data.icmph.type = ICMP_TIMESTAMPREPLY;
        icmp_param.data.icmph.code = 0;
        icmp_param.skb             = skb;
@@ -943,7 +944,7 @@ int icmp_rcv(struct sk_buff *skb)
        if (!pskb_pull(skb, sizeof(struct icmphdr)))
                goto error;
 
-       icmph = skb->h.icmph;
+       icmph = icmp_hdr(skb);
 
        /*
         *      18 is the highest 'known' ICMP type. Anything else is a mystery
index 8cedb2a..2506021 100644 (file)
@@ -314,7 +314,9 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
 
        skb_reserve(skb, LL_RESERVED_SPACE(dev));
 
-       skb->nh.iph = pip =(struct iphdr *)skb_put(skb, sizeof(struct iphdr)+4);
+       skb_reset_network_header(skb);
+       pip = ip_hdr(skb);
+       skb_put(skb, sizeof(struct iphdr) + 4);
 
        pip->version  = 4;
        pip->ihl      = (sizeof(struct iphdr)+4)>>2;
@@ -331,8 +333,9 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
        ((u8*)&pip[1])[2] = 0;
        ((u8*)&pip[1])[3] = 0;
 
-       pig =(struct igmpv3_report *)skb_put(skb, sizeof(*pig));
-       skb->h.igmph = (struct igmphdr *)pig;
+       skb->transport_header = skb->network_header + sizeof(struct iphdr) + 4;
+       skb_put(skb, sizeof(*pig));
+       pig = igmpv3_report_hdr(skb);
        pig->type = IGMPV3_HOST_MEMBERSHIP_REPORT;
        pig->resv1 = 0;
        pig->csum = 0;
@@ -343,16 +346,14 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
 
 static int igmpv3_sendpack(struct sk_buff *skb)
 {
-       struct iphdr *pip = skb->nh.iph;
-       struct igmphdr *pig = skb->h.igmph;
-       int iplen, igmplen;
+       struct iphdr *pip = ip_hdr(skb);
+       struct igmphdr *pig = igmp_hdr(skb);
+       const int iplen = skb->tail - skb->network_header;
+       const int igmplen = skb->tail - skb->transport_header;
 
-       iplen = skb->tail - (unsigned char *)skb->nh.iph;
        pip->tot_len = htons(iplen);
        ip_send_check(pip);
-
-       igmplen = skb->tail - (unsigned char *)skb->h.igmph;
-       pig->csum = ip_compute_csum((void *)skb->h.igmph, igmplen);
+       pig->csum = ip_compute_csum(igmp_hdr(skb), igmplen);
 
        return NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, skb->dev,
                       dst_output);
@@ -379,7 +380,7 @@ static struct sk_buff *add_grhead(struct sk_buff *skb, struct ip_mc_list *pmc,
        pgr->grec_auxwords = 0;
        pgr->grec_nsrcs = 0;
        pgr->grec_mca = pmc->multiaddr;
-       pih = (struct igmpv3_report *)skb->h.igmph;
+       pih = igmpv3_report_hdr(skb);
        pih->ngrec = htons(ntohs(pih->ngrec)+1);
        *ppgr = pgr;
        return skb;
@@ -412,7 +413,7 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc,
        if (!*psf_list)
                goto empty_source;
 
-       pih = skb ? (struct igmpv3_report *)skb->h.igmph : NULL;
+       pih = skb ? igmpv3_report_hdr(skb) : NULL;
 
        /* EX and TO_EX get a fresh packet, if needed */
        if (truncate) {
@@ -664,7 +665,9 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
 
        skb_reserve(skb, LL_RESERVED_SPACE(dev));
 
-       skb->nh.iph = iph = (struct iphdr *)skb_put(skb, sizeof(struct iphdr)+4);
+       skb_reset_network_header(skb);
+       iph = ip_hdr(skb);
+       skb_put(skb, sizeof(struct iphdr) + 4);
 
        iph->version  = 4;
        iph->ihl      = (sizeof(struct iphdr)+4)>>2;
@@ -827,8 +830,8 @@ static void igmp_heard_report(struct in_device *in_dev, __be32 group)
 static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
        int len)
 {
-       struct igmphdr          *ih = skb->h.igmph;
-       struct igmpv3_query *ih3 = (struct igmpv3_query *)ih;
+       struct igmphdr          *ih = igmp_hdr(skb);
+       struct igmpv3_query *ih3 = igmpv3_query_hdr(skb);
        struct ip_mc_list       *im;
        __be32                  group = ih->group;
        int                     max_delay;
@@ -861,12 +864,12 @@ static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
                if (!pskb_may_pull(skb, sizeof(struct igmpv3_query)))
                        return;
 
-               ih3 = (struct igmpv3_query *) skb->h.raw;
+               ih3 = igmpv3_query_hdr(skb);
                if (ih3->nsrcs) {
                        if (!pskb_may_pull(skb, sizeof(struct igmpv3_query)
                                           + ntohs(ih3->nsrcs)*sizeof(__be32)))
                                return;
-                       ih3 = (struct igmpv3_query *) skb->h.raw;
+                       ih3 = igmpv3_query_hdr(skb);
                }
 
                max_delay = IGMPV3_MRC(ih3->code)*(HZ/IGMP_TIMER_SCALE);
@@ -943,7 +946,7 @@ int igmp_rcv(struct sk_buff *skb)
                        goto drop;
        }
 
-       ih = skb->h.igmph;
+       ih = igmp_hdr(skb);
        switch (ih->type) {
        case IGMP_HOST_MEMBERSHIP_QUERY:
                igmp_heard_query(in_dev, skb, len);
@@ -2397,7 +2400,7 @@ static int igmp_mc_seq_show(struct seq_file *seq, void *v)
        return 0;
 }
 
-static struct seq_operations igmp_mc_seq_ops = {
+static const struct seq_operations igmp_mc_seq_ops = {
        .start  =       igmp_mc_seq_start,
        .next   =       igmp_mc_seq_next,
        .stop   =       igmp_mc_seq_stop,
@@ -2571,7 +2574,7 @@ static int igmp_mcf_seq_show(struct seq_file *seq, void *v)
        return 0;
 }
 
-static struct seq_operations igmp_mcf_seq_ops = {
+static const struct seq_operations igmp_mcf_seq_ops = {
        .start  =       igmp_mcf_seq_start,
        .next   =       igmp_mcf_seq_next,
        .stop   =       igmp_mcf_seq_stop,
index 5df71cd..dbeacd8 100644 (file)
@@ -27,6 +27,7 @@
 #include <net/inet_hashtables.h>
 #include <net/inet_timewait_sock.h>
 #include <net/inet6_hashtables.h>
+#include <net/netlink.h>
 
 #include <linux/inet.h>
 #include <linux/stddef.h>
@@ -60,7 +61,7 @@ static int inet_csk_diag_fill(struct sock *sk,
        struct nlmsghdr  *nlh;
        void *info = NULL;
        struct inet_diag_meminfo  *minfo = NULL;
-       unsigned char    *b = skb->tail;
+       unsigned char    *b = skb_tail_pointer(skb);
        const struct inet_diag_handler *handler;
 
        handler = inet_diag_table[unlh->nlmsg_type];
@@ -147,12 +148,12 @@ static int inet_csk_diag_fill(struct sock *sk,
            icsk->icsk_ca_ops && icsk->icsk_ca_ops->get_info)
                icsk->icsk_ca_ops->get_info(sk, ext, skb);
 
-       nlh->nlmsg_len = skb->tail - b;
+       nlh->nlmsg_len = skb_tail_pointer(skb) - b;
        return skb->len;
 
 rtattr_failure:
 nlmsg_failure:
-       skb_trim(skb, b - skb->data);
+       nlmsg_trim(skb, b);
        return -EMSGSIZE;
 }
 
@@ -163,7 +164,7 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
 {
        long tmo;
        struct inet_diag_msg *r;
-       const unsigned char *previous_tail = skb->tail;
+       const unsigned char *previous_tail = skb_tail_pointer(skb);
        struct nlmsghdr *nlh = NLMSG_PUT(skb, pid, seq,
                                         unlh->nlmsg_type, sizeof(*r));
 
@@ -205,10 +206,10 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
                               &tw6->tw_v6_daddr);
        }
 #endif
-       nlh->nlmsg_len = skb->tail - previous_tail;
+       nlh->nlmsg_len = skb_tail_pointer(skb) - previous_tail;
        return skb->len;
 nlmsg_failure:
-       skb_trim(skb, previous_tail - skb->data);
+       nlmsg_trim(skb, previous_tail);
        return -EMSGSIZE;
 }
 
@@ -535,7 +536,7 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
 {
        const struct inet_request_sock *ireq = inet_rsk(req);
        struct inet_sock *inet = inet_sk(sk);
-       unsigned char *b = skb->tail;
+       unsigned char *b = skb_tail_pointer(skb);
        struct inet_diag_msg *r;
        struct nlmsghdr *nlh;
        long tmo;
@@ -574,12 +575,12 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
                               &inet6_rsk(req)->rmt_addr);
        }
 #endif
-       nlh->nlmsg_len = skb->tail - b;
+       nlh->nlmsg_len = skb_tail_pointer(skb) - b;
 
        return skb->len;
 
 nlmsg_failure:
-       skb_trim(skb, b - skb->data);
+       nlmsg_trim(skb, b);
        return -1;
 }
 
@@ -805,68 +806,43 @@ done:
        return skb->len;
 }
 
-static inline int inet_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+static int inet_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
 {
-       if (!(nlh->nlmsg_flags&NLM_F_REQUEST))
-               return 0;
+       int hdrlen = sizeof(struct inet_diag_req);
 
-       if (nlh->nlmsg_type >= INET_DIAG_GETSOCK_MAX)
-               goto err_inval;
+       if (nlh->nlmsg_type >= INET_DIAG_GETSOCK_MAX ||
+           nlmsg_len(nlh) < hdrlen)
+               return -EINVAL;
 
        if (inet_diag_table[nlh->nlmsg_type] == NULL)
                return -ENOENT;
 
-       if (NLMSG_LENGTH(sizeof(struct inet_diag_req)) > skb->len)
-               goto err_inval;
-
-       if (nlh->nlmsg_flags&NLM_F_DUMP) {
-               if (nlh->nlmsg_len >
-                   (4 + NLMSG_SPACE(sizeof(struct inet_diag_req)))) {
-                       struct rtattr *rta = (void *)(NLMSG_DATA(nlh) +
-                                                sizeof(struct inet_diag_req));
-                       if (rta->rta_type != INET_DIAG_REQ_BYTECODE ||
-                           rta->rta_len < 8 ||
-                           rta->rta_len >
-                           (nlh->nlmsg_len -
-                            NLMSG_SPACE(sizeof(struct inet_diag_req))))
-                               goto err_inval;
-                       if (inet_diag_bc_audit(RTA_DATA(rta), RTA_PAYLOAD(rta)))
-                               goto err_inval;
+       if (nlh->nlmsg_flags & NLM_F_DUMP) {
+               if (nlmsg_attrlen(nlh, hdrlen)) {
+                       struct nlattr *attr;
+
+                       attr = nlmsg_find_attr(nlh, hdrlen,
+                                              INET_DIAG_REQ_BYTECODE);
+                       if (attr == NULL ||
+                           nla_len(attr) < sizeof(struct inet_diag_bc_op) ||
+                           inet_diag_bc_audit(nla_data(attr), nla_len(attr)))
+                               return -EINVAL;
                }
+
                return netlink_dump_start(idiagnl, skb, nlh,
                                          inet_diag_dump, NULL);
-       } else
-               return inet_diag_get_exact(skb, nlh);
-
-err_inval:
-       return -EINVAL;
-}
-
-
-static inline void inet_diag_rcv_skb(struct sk_buff *skb)
-{
-       if (skb->len >= NLMSG_SPACE(0)) {
-               int err;
-               struct nlmsghdr *nlh = (struct nlmsghdr *)skb->data;
-
-               if (nlh->nlmsg_len < sizeof(*nlh) ||
-                   skb->len < nlh->nlmsg_len)
-                       return;
-               err = inet_diag_rcv_msg(skb, nlh);
-               if (err || nlh->nlmsg_flags & NLM_F_ACK)
-                       netlink_ack(skb, nlh, err);
        }
+
+       return inet_diag_get_exact(skb, nlh);
 }
 
 static void inet_diag_rcv(struct sock *sk, int len)
 {
-       struct sk_buff *skb;
-       unsigned int qlen = skb_queue_len(&sk->sk_receive_queue);
+       unsigned int qlen = 0;
 
-       while (qlen-- && (skb = skb_dequeue(&sk->sk_receive_queue))) {
-               inet_diag_rcv_skb(skb);
-               kfree_skb(skb);
-       }
+       do {
+               netlink_run_queue(sk, &qlen, &inet_diag_rcv_msg);
+       } while (qlen);
 }
 
 static DEFINE_SPINLOCK(inet_diag_register_lock);
@@ -917,7 +893,7 @@ static int __init inet_diag_init(void)
                goto out;
 
        idiagnl = netlink_kernel_create(NETLINK_INET_DIAG, 0, inet_diag_rcv,
-                                       THIS_MODULE);
+                                       NULL, THIS_MODULE);
        if (idiagnl == NULL)
                goto out_free_table;
        err = 0;
index db3ef96..2f44e61 100644 (file)
@@ -87,10 +87,12 @@ static DEFINE_RWLOCK(peer_pool_lock);
 
 static int peer_total;
 /* Exported for sysctl_net_ipv4.  */
-int inet_peer_threshold = 65536 + 128; /* start to throw entries more
+int inet_peer_threshold __read_mostly = 65536 + 128;   /* start to throw entries more
                                         * aggressively at this stage */
-int inet_peer_minttl = 120 * HZ;       /* TTL under high load: 120 sec */
-int inet_peer_maxttl = 10 * 60 * HZ;   /* usual time to live: 10 min */
+int inet_peer_minttl __read_mostly = 120 * HZ; /* TTL under high load: 120 sec */
+int inet_peer_maxttl __read_mostly = 10 * 60 * HZ;     /* usual time to live: 10 min */
+int inet_peer_gc_mintime __read_mostly = 10 * HZ;
+int inet_peer_gc_maxtime __read_mostly = 120 * HZ;
 
 static struct inet_peer *inet_peer_unused_head;
 static struct inet_peer **inet_peer_unused_tailp = &inet_peer_unused_head;
@@ -99,9 +101,6 @@ static DEFINE_SPINLOCK(inet_peer_unused_lock);
 static void peer_check_expire(unsigned long dummy);
 static DEFINE_TIMER(peer_periodic_timer, peer_check_expire, 0, 0);
 
-/* Exported for sysctl_net_ipv4.  */
-int inet_peer_gc_mintime = 10 * HZ,
-    inet_peer_gc_maxtime = 120 * HZ;
 
 /* Called from ip_output.c:ip_init  */
 void __init inet_initpeers(void)
@@ -151,20 +150,27 @@ static void unlink_from_unused(struct inet_peer *p)
        spin_unlock_bh(&inet_peer_unused_lock);
 }
 
-/* Called with local BH disabled and the pool lock held. */
-#define lookup(daddr)                                          \
+/*
+ * Called with local BH disabled and the pool lock held.
+ * _stack is known to be NULL or not at compile time,
+ * so compiler will optimize the if (_stack) tests.
+ */
+#define lookup(_daddr,_stack)                                  \
 ({                                                             \
        struct inet_peer *u, **v;                               \
-       stackptr = stack;                                       \
-       *stackptr++ = &peer_root;                               \
+       if (_stack) {                                           \
+               stackptr = _stack;                              \
+               *stackptr++ = &peer_root;                       \
+       }                                                       \
        for (u = peer_root; u != peer_avl_empty; ) {            \
-               if (daddr == u->v4daddr)                        \
+               if (_daddr == u->v4daddr)                       \
                        break;                                  \
-               if ((__force __u32)daddr < (__force __u32)u->v4daddr)   \
+               if ((__force __u32)_daddr < (__force __u32)u->v4daddr)  \
                        v = &u->avl_left;                       \
                else                                            \
                        v = &u->avl_right;                      \
-               *stackptr++ = v;                                \
+               if (_stack)                                     \
+                       *stackptr++ = v;                        \
                u = *v;                                         \
        }                                                       \
        u;                                                      \
@@ -288,7 +294,7 @@ static void unlink_from_pool(struct inet_peer *p)
        if (atomic_read(&p->refcnt) == 1) {
                struct inet_peer **stack[PEER_MAXDEPTH];
                struct inet_peer ***stackptr, ***delp;
-               if (lookup(p->v4daddr) != p)
+               if (lookup(p->v4daddr, stack) != p)
                        BUG();
                delp = stackptr - 1; /* *delp[0] == p */
                if (p->avl_left == peer_avl_empty) {
@@ -373,7 +379,7 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create)
 
        /* Look up for the address quickly. */
        read_lock_bh(&peer_pool_lock);
-       p = lookup(daddr);
+       p = lookup(daddr, NULL);
        if (p != peer_avl_empty)
                atomic_inc(&p->refcnt);
        read_unlock_bh(&peer_pool_lock);
@@ -400,7 +406,7 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create)
 
        write_lock_bh(&peer_pool_lock);
        /* Check if an entry has suddenly appeared. */
-       p = lookup(daddr);
+       p = lookup(daddr, stack);
        if (p != peer_avl_empty)
                goto out_free;
 
index 369e721..9cb04df 100644 (file)
@@ -67,14 +67,14 @@ int ip_forward(struct sk_buff *skb)
        if (skb->pkt_type != PACKET_HOST)
                goto drop;
 
-       skb->ip_summed = CHECKSUM_NONE;
+       skb_forward_csum(skb);
 
        /*
         *      According to the RFC, we must first decrease the TTL field. If
         *      that reaches zero, we must reply an ICMP control message telling
         *      that the packet's lifetime expired.
         */
-       if (skb->nh.iph->ttl <= 1)
+       if (ip_hdr(skb)->ttl <= 1)
                goto too_many_hops;
 
        if (!xfrm4_route_forward(skb))
@@ -85,10 +85,18 @@ int ip_forward(struct sk_buff *skb)
        if (opt->is_strictroute && rt->rt_dst != rt->rt_gateway)
                goto sr_failed;
 
+       if (unlikely(skb->len > dst_mtu(&rt->u.dst) &&
+                    (ip_hdr(skb)->frag_off & htons(IP_DF))) && !skb->local_df) {
+               IP_INC_STATS(IPSTATS_MIB_FRAGFAILS);
+               icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
+                         htonl(dst_mtu(&rt->u.dst)));
+               goto drop;
+       }
+
        /* We are about to mangle packet. Copy it! */
        if (skb_cow(skb, LL_RESERVED_SPACE(rt->u.dst.dev)+rt->u.dst.header_len))
                goto drop;
-       iph = skb->nh.iph;
+       iph = ip_hdr(skb);
 
        /* Decrease ttl after skb cow done */
        ip_decrease_ttl(iph);
index b6f0553..0231bdc 100644 (file)
@@ -92,7 +92,7 @@ struct ipq {
        spinlock_t      lock;
        atomic_t        refcnt;
        struct timer_list timer;        /* when will this queue expire?         */
-       struct timeval  stamp;
+       ktime_t         stamp;
        int             iif;
        unsigned int    rid;
        struct inet_peer *peer;
@@ -184,7 +184,7 @@ static __inline__ struct ipq *frag_alloc_queue(void)
 {
        struct ipq *qp = kmalloc(sizeof(struct ipq), GFP_ATOMIC);
 
-       if(!qp)
+       if (!qp)
                return NULL;
        atomic_add(sizeof(struct ipq), &ip_frag_mem);
        return qp;
@@ -321,11 +321,11 @@ static struct ipq *ip_frag_intern(struct ipq *qp_in)
         * promoted read lock to write lock.
         */
        hlist_for_each_entry(qp, n, &ipq_hash[hash], list) {
-               if(qp->id == qp_in->id          &&
-                  qp->saddr == qp_in->saddr    &&
-                  qp->daddr == qp_in->daddr    &&
-                  qp->protocol == qp_in->protocol &&
-                  qp->user == qp_in->user) {
+               if (qp->id == qp_in->id         &&
+                   qp->saddr == qp_in->saddr   &&
+                   qp->daddr == qp_in->daddr   &&
+                   qp->protocol == qp_in->protocol &&
+                   qp->user == qp_in->user) {
                        atomic_inc(&qp->refcnt);
                        write_unlock(&ipfrag_lock);
                        qp_in->last_in |= COMPLETE;
@@ -398,11 +398,11 @@ static inline struct ipq *ip_find(struct iphdr *iph, u32 user)
        read_lock(&ipfrag_lock);
        hash = ipqhashfn(id, saddr, daddr, protocol);
        hlist_for_each_entry(qp, n, &ipq_hash[hash], list) {
-               if(qp->id == id         &&
-                  qp->saddr == saddr   &&
-                  qp->daddr == daddr   &&
-                  qp->protocol == protocol &&
-                  qp->user == user) {
+               if (qp->id == id                &&
+                   qp->saddr == saddr  &&
+                   qp->daddr == daddr  &&
+                   qp->protocol == protocol &&
+                   qp->user == user) {
                        atomic_inc(&qp->refcnt);
                        read_unlock(&ipfrag_lock);
                        return qp;
@@ -479,11 +479,11 @@ static void ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
                goto err;
        }
 
-       offset = ntohs(skb->nh.iph->frag_off);
+       offset = ntohs(ip_hdr(skb)->frag_off);
        flags = offset & ~IP_OFFSET;
        offset &= IP_OFFSET;
        offset <<= 3;           /* offset is in 8-byte chunks */
-       ihl = skb->nh.iph->ihl * 4;
+       ihl = ip_hdrlen(skb);
 
        /* Determine the position of this fragment. */
        end = offset + skb->len - ihl;
@@ -524,7 +524,7 @@ static void ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
         * this fragment, right?
         */
        prev = NULL;
-       for(next = qp->fragments; next != NULL; next = next->next) {
+       for (next = qp->fragments; next != NULL; next = next->next) {
                if (FRAG_CB(next)->offset >= offset)
                        break;  /* bingo! */
                prev = next;
@@ -592,7 +592,7 @@ static void ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
        if (skb->dev)
                qp->iif = skb->dev->ifindex;
        skb->dev = NULL;
-       skb_get_timestamp(skb, &qp->stamp);
+       qp->stamp = skb->tstamp;
        qp->meat += skb->len;
        atomic_add(skb->truesize, &ip_frag_mem);
        if (offset == 0)
@@ -624,10 +624,10 @@ static struct sk_buff *ip_frag_reasm(struct ipq *qp, struct net_device *dev)
        BUG_TRAP(FRAG_CB(head)->offset == 0);
 
        /* Allocate a new buffer for the datagram. */
-       ihlen = head->nh.iph->ihl*4;
+       ihlen = ip_hdrlen(head);
        len = ihlen + qp->len;
 
-       if(len > 65535)
+       if (len > 65535)
                goto out_oversize;
 
        /* Head of list must not be cloned. */
@@ -658,7 +658,7 @@ static struct sk_buff *ip_frag_reasm(struct ipq *qp, struct net_device *dev)
        }
 
        skb_shinfo(head)->frag_list = head->next;
-       skb_push(head, head->data - head->nh.raw);
+       skb_push(head, head->data - skb_network_header(head));
        atomic_sub(head->truesize, &ip_frag_mem);
 
        for (fp=head->next; fp; fp = fp->next) {
@@ -674,9 +674,9 @@ static struct sk_buff *ip_frag_reasm(struct ipq *qp, struct net_device *dev)
 
        head->next = NULL;
        head->dev = dev;
-       skb_set_timestamp(head, &qp->stamp);
+       head->tstamp = qp->stamp;
 
-       iph = head->nh.iph;
+       iph = ip_hdr(head);
        iph->frag_off = 0;
        iph->tot_len = htons(len);
        IP_INC_STATS_BH(IPSTATS_MIB_REASMOKS);
@@ -700,7 +700,6 @@ out_fail:
 /* Process an incoming IP datagram fragment. */
 struct sk_buff *ip_defrag(struct sk_buff *skb, u32 user)
 {
-       struct iphdr *iph = skb->nh.iph;
        struct ipq *qp;
        struct net_device *dev;
 
@@ -713,7 +712,7 @@ struct sk_buff *ip_defrag(struct sk_buff *skb, u32 user)
        dev = skb->dev;
 
        /* Lookup (or create) queue header */
-       if ((qp = ip_find(iph, user)) != NULL) {
+       if ((qp = ip_find(ip_hdr(skb), user)) != NULL) {
                struct sk_buff *ret = NULL;
 
                spin_lock(&qp->lock);
@@ -734,7 +733,7 @@ struct sk_buff *ip_defrag(struct sk_buff *skb, u32 user)
        return NULL;
 }
 
-void ipfrag_init(void)
+void __init ipfrag_init(void)
 {
        ipfrag_hash_rnd = (u32) ((num_physpages ^ (num_physpages>>7)) ^
                                 (jiffies ^ (jiffies >> 6)));
index 9151da6..6328293 100644 (file)
@@ -191,11 +191,11 @@ static struct ip_tunnel * ipgre_tunnel_lookup(__be32 remote, __be32 local, __be3
        return NULL;
 }
 
-static struct ip_tunnel **ipgre_bucket(struct ip_tunnel *t)
+static struct ip_tunnel **__ipgre_bucket(struct ip_tunnel_parm *parms)
 {
-       __be32 remote = t->parms.iph.daddr;
-       __be32 local = t->parms.iph.saddr;
-       __be32 key = t->parms.i_key;
+       __be32 remote = parms->iph.daddr;
+       __be32 local = parms->iph.saddr;
+       __be32 key = parms->i_key;
        unsigned h = HASH(key);
        int prio = 0;
 
@@ -209,6 +209,11 @@ static struct ip_tunnel **ipgre_bucket(struct ip_tunnel *t)
        return &tunnels[prio][h];
 }
 
+static inline struct ip_tunnel **ipgre_bucket(struct ip_tunnel *t)
+{
+       return __ipgre_bucket(&t->parms);
+}
+
 static void ipgre_tunnel_link(struct ip_tunnel *t)
 {
        struct ip_tunnel **tp = ipgre_bucket(t);
@@ -240,17 +245,9 @@ static struct ip_tunnel * ipgre_tunnel_locate(struct ip_tunnel_parm *parms, int
        __be32 key = parms->i_key;
        struct ip_tunnel *t, **tp, *nt;
        struct net_device *dev;
-       unsigned h = HASH(key);
-       int prio = 0;
        char name[IFNAMSIZ];
 
-       if (local)
-               prio |= 1;
-       if (remote && !MULTICAST(remote)) {
-               prio |= 2;
-               h ^= HASH(remote);
-       }
-       for (tp = &tunnels[prio][h]; (t = *tp) != NULL; tp = &t->next) {
+       for (tp = __ipgre_bucket(parms); (t = *tp) != NULL; tp = &t->next) {
                if (local == t->parms.iph.saddr && remote == t->parms.iph.daddr) {
                        if (key == t->parms.i_key)
                                return t;
@@ -320,8 +317,8 @@ static void ipgre_err(struct sk_buff *skb, u32 info)
        struct iphdr *iph = (struct iphdr*)skb->data;
        __be16       *p = (__be16*)(skb->data+(iph->ihl<<2));
        int grehlen = (iph->ihl<<2) + 4;
-       int type = skb->h.icmph->type;
-       int code = skb->h.icmph->code;
+       const int type = icmp_hdr(skb)->type;
+       const int code = icmp_hdr(skb)->code;
        struct ip_tunnel *t;
        __be16 flags;
 
@@ -388,8 +385,8 @@ out:
        struct iphdr *iph = (struct iphdr*)dp;
        struct iphdr *eiph;
        __be16       *p = (__be16*)(dp+(iph->ihl<<2));
-       int type = skb->h.icmph->type;
-       int code = skb->h.icmph->code;
+       const int type = icmp_hdr(skb)->type;
+       const int code = icmp_hdr(skb)->code;
        int rel_type = 0;
        int rel_code = 0;
        __be32 rel_info = 0;
@@ -422,7 +419,7 @@ out:
        default:
                return;
        case ICMP_PARAMETERPROB:
-               n = ntohl(skb->h.icmph->un.gateway) >> 24;
+               n = ntohl(icmp_hdr(skb)->un.gateway) >> 24;
                if (n < (iph->ihl<<2))
                        return;
 
@@ -442,7 +439,7 @@ out:
                        return;
                case ICMP_FRAG_NEEDED:
                        /* And it is the only really necessary thing :-) */
-                       n = ntohs(skb->h.icmph->un.frag.mtu);
+                       n = ntohs(icmp_hdr(skb)->un.frag.mtu);
                        if (n < grehlen+68)
                                return;
                        n -= grehlen;
@@ -474,7 +471,7 @@ out:
        dst_release(skb2->dst);
        skb2->dst = NULL;
        skb_pull(skb2, skb->data - (u8*)eiph);
-       skb2->nh.raw = skb2->data;
+       skb_reset_network_header(skb2);
 
        /* Try to guess incoming interface */
        memset(&fl, 0, sizeof(fl));
@@ -533,9 +530,9 @@ static inline void ipgre_ecn_decapsulate(struct iphdr *iph, struct sk_buff *skb)
 {
        if (INET_ECN_is_ce(iph->tos)) {
                if (skb->protocol == htons(ETH_P_IP)) {
-                       IP_ECN_set_ce(skb->nh.iph);
+                       IP_ECN_set_ce(ip_hdr(skb));
                } else if (skb->protocol == htons(ETH_P_IPV6)) {
-                       IP6_ECN_set_ce(skb->nh.ipv6h);
+                       IP6_ECN_set_ce(ipv6_hdr(skb));
                }
        }
 }
@@ -565,7 +562,7 @@ static int ipgre_rcv(struct sk_buff *skb)
        if (!pskb_may_pull(skb, 16))
                goto drop_nolock;
 
-       iph = skb->nh.iph;
+       iph = ip_hdr(skb);
        h = skb->data;
        flags = *(__be16*)h;
 
@@ -616,9 +613,10 @@ static int ipgre_rcv(struct sk_buff *skb)
                                offset += 4;
                }
 
-               skb->mac.raw = skb->nh.raw;
-               skb->nh.raw = __pskb_pull(skb, offset);
-               skb_postpull_rcsum(skb, skb->h.raw, offset);
+               skb_reset_mac_header(skb);
+               __pskb_pull(skb, offset);
+               skb_reset_network_header(skb);
+               skb_postpull_rcsum(skb, skb_transport_header(skb), offset);
                skb->pkt_type = PACKET_HOST;
 #ifdef CONFIG_NET_IPGRE_BROADCAST
                if (MULTICAST(iph->daddr)) {
@@ -669,7 +667,7 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct ip_tunnel *tunnel = netdev_priv(dev);
        struct net_device_stats *stats = &tunnel->stat;
-       struct iphdr  *old_iph = skb->nh.iph;
+       struct iphdr  *old_iph = ip_hdr(skb);
        struct iphdr  *tiph;
        u8     tos;
        __be16 df;
@@ -720,7 +718,7 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
                        addr_type = ipv6_addr_type(addr6);
 
                        if (addr_type == IPV6_ADDR_ANY) {
-                               addr6 = &skb->nh.ipv6h->daddr;
+                               addr6 = &ipv6_hdr(skb)->daddr;
                                addr_type = ipv6_addr_type(addr6);
                        }
 
@@ -824,11 +822,12 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
                        skb_set_owner_w(new_skb, skb->sk);
                dev_kfree_skb(skb);
                skb = new_skb;
-               old_iph = skb->nh.iph;
+               old_iph = ip_hdr(skb);
        }
 
-       skb->h.raw = skb->nh.raw;
-       skb->nh.raw = skb_push(skb, gre_hlen);
+       skb->transport_header = skb->network_header;
+       skb_push(skb, gre_hlen);
+       skb_reset_network_header(skb);
        memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
        IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
                              IPSKB_REROUTED);
@@ -839,7 +838,7 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
         *      Push down and install the IPIP header.
         */
 
-       iph                     =       skb->nh.iph;
+       iph                     =       ip_hdr(skb);
        iph->version            =       4;
        iph->ihl                =       sizeof(struct iphdr) >> 2;
        iph->frag_off           =       df;
index f38e976..324e7e0 100644 (file)
@@ -158,7 +158,7 @@ DEFINE_SNMP_STAT(struct ipstats_mib, ip_statistics) __read_mostly;
 int ip_call_ra_chain(struct sk_buff *skb)
 {
        struct ip_ra_chain *ra;
-       u8 protocol = skb->nh.iph->protocol;
+       u8 protocol = ip_hdr(skb)->protocol;
        struct sock *last = NULL;
 
        read_lock(&ip_ra_lock);
@@ -171,7 +171,7 @@ int ip_call_ra_chain(struct sk_buff *skb)
                if (sk && inet_sk(sk)->num == protocol &&
                    (!sk->sk_bound_dev_if ||
                     sk->sk_bound_dev_if == skb->dev->ifindex)) {
-                       if (skb->nh.iph->frag_off & htons(IP_MF|IP_OFFSET)) {
+                       if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) {
                                skb = ip_defrag(skb, IP_DEFRAG_CALL_RA_CHAIN);
                                if (skb == NULL) {
                                        read_unlock(&ip_ra_lock);
@@ -198,17 +198,15 @@ int ip_call_ra_chain(struct sk_buff *skb)
 
 static inline int ip_local_deliver_finish(struct sk_buff *skb)
 {
-       int ihl = skb->nh.iph->ihl*4;
-
-       __skb_pull(skb, ihl);
+       __skb_pull(skb, ip_hdrlen(skb));
 
        /* Point into the IP datagram, just past the header. */
-       skb->h.raw = skb->data;
+       skb_reset_transport_header(skb);
 
        rcu_read_lock();
        {
                /* Note: See raw.c and net/raw.h, RAWV4_HTABLE_SIZE==MAX_INET_PROTOS */
-               int protocol = skb->nh.iph->protocol;
+               int protocol = ip_hdr(skb)->protocol;
                int hash;
                struct sock *raw_sk;
                struct net_protocol *ipprot;
@@ -220,7 +218,7 @@ static inline int ip_local_deliver_finish(struct sk_buff *skb)
                /* If there maybe a raw socket we must check - if not we
                 * don't care less
                 */
-               if (raw_sk && !raw_v4_input(skb, skb->nh.iph, hash))
+               if (raw_sk && !raw_v4_input(skb, ip_hdr(skb), hash))
                        raw_sk = NULL;
 
                if ((ipprot = rcu_dereference(inet_protos[hash])) != NULL) {
@@ -266,7 +264,7 @@ int ip_local_deliver(struct sk_buff *skb)
         *      Reassemble IP fragments.
         */
 
-       if (skb->nh.iph->frag_off & htons(IP_MF|IP_OFFSET)) {
+       if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) {
                skb = ip_defrag(skb, IP_DEFRAG_LOCAL_DELIVER);
                if (!skb)
                        return 0;
@@ -294,7 +292,7 @@ static inline int ip_rcv_options(struct sk_buff *skb)
                goto drop;
        }
 
-       iph = skb->nh.iph;
+       iph = ip_hdr(skb);
 
        if (ip_options_compile(NULL, skb)) {
                IP_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
@@ -330,7 +328,7 @@ drop:
 
 static inline int ip_rcv_finish(struct sk_buff *skb)
 {
-       struct iphdr *iph = skb->nh.iph;
+       const struct iphdr *iph = ip_hdr(skb);
 
        /*
         *      Initialise the virtual path cache for the packet. It describes
@@ -391,7 +389,7 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
        if (!pskb_may_pull(skb, sizeof(struct iphdr)))
                goto inhdr_error;
 
-       iph = skb->nh.iph;
+       iph = ip_hdr(skb);
 
        /*
         *      RFC1122: 3.1.2.2 MUST silently discard any IP frame that fails the checksum.
@@ -410,7 +408,7 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
        if (!pskb_may_pull(skb, iph->ihl*4))
                goto inhdr_error;
 
-       iph = skb->nh.iph;
+       iph = ip_hdr(skb);
 
        if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
                goto inhdr_error;
index f906a80..2513468 100644 (file)
@@ -40,7 +40,7 @@
 void ip_options_build(struct sk_buff * skb, struct ip_options * opt,
                            __be32 daddr, struct rtable *rt, int is_frag)
 {
-       unsigned char * iph = skb->nh.raw;
+       unsigned char *iph = skb_network_header(skb);
 
        memcpy(&(IPCB(skb)->opt), opt, sizeof(struct ip_options));
        memcpy(iph+sizeof(struct iphdr), opt->__data, opt->optlen);
@@ -104,13 +104,13 @@ int ip_options_echo(struct ip_options * dopt, struct sk_buff * skb)
                return 0;
        }
 
-       sptr = skb->nh.raw;
+       sptr = skb_network_header(skb);
        dptr = dopt->__data;
 
        if (skb->dst)
                daddr = ((struct rtable*)skb->dst)->rt_spec_dst;
        else
-               daddr = skb->nh.iph->daddr;
+               daddr = ip_hdr(skb)->daddr;
 
        if (sopt->rr) {
                optlen  = sptr[sopt->rr+1];
@@ -180,7 +180,8 @@ int ip_options_echo(struct ip_options * dopt, struct sk_buff * skb)
                        /*
                         * RFC1812 requires to fix illegal source routes.
                         */
-                       if (memcmp(&skb->nh.iph->saddr, &start[soffset+3], 4) == 0)
+                       if (memcmp(&ip_hdr(skb)->saddr,
+                                  &start[soffset + 3], 4) == 0)
                                doffset -= 4;
                }
                if (doffset > 3) {
@@ -217,7 +218,7 @@ int ip_options_echo(struct ip_options * dopt, struct sk_buff * skb)
 
 void ip_options_fragment(struct sk_buff * skb)
 {
-       unsigned char * optptr = skb->nh.raw + sizeof(struct iphdr);
+       unsigned char *optptr = skb_network_header(skb) + sizeof(struct iphdr);
        struct ip_options * opt = &(IPCB(skb)->opt);
        int  l = opt->optlen;
        int  optlen;
@@ -264,12 +265,13 @@ int ip_options_compile(struct ip_options * opt, struct sk_buff * skb)
 
        if (!opt) {
                opt = &(IPCB(skb)->opt);
-               iph = skb->nh.raw;
+               iph = skb_network_header(skb);
                opt->optlen = ((struct iphdr *)iph)->ihl*4 - sizeof(struct iphdr);
                optptr = iph + sizeof(struct iphdr);
                opt->is_data = 0;
        } else {
-               optptr = opt->is_data ? opt->__data : (unsigned char*)&(skb->nh.iph[1]);
+               optptr = opt->is_data ? opt->__data :
+                                       (unsigned char *)&(ip_hdr(skb)[1]);
                iph = optptr - sizeof(struct iphdr);
        }
 
@@ -563,7 +565,7 @@ void ip_forward_options(struct sk_buff *skb)
        struct   ip_options * opt       = &(IPCB(skb)->opt);
        unsigned char * optptr;
        struct rtable *rt = (struct rtable*)skb->dst;
-       unsigned char *raw = skb->nh.raw;
+       unsigned char *raw = skb_network_header(skb);
 
        if (opt->rr_needaddr) {
                optptr = (unsigned char *)raw + opt->rr;
@@ -587,7 +589,7 @@ void ip_forward_options(struct sk_buff *skb)
                if (srrptr + 3 <= srrspace) {
                        opt->is_changed = 1;
                        ip_rt_get_source(&optptr[srrptr-1], rt);
-                       skb->nh.iph->daddr = rt->rt_dst;
+                       ip_hdr(skb)->daddr = rt->rt_dst;
                        optptr[2] = srrptr+4;
                } else if (net_ratelimit())
                        printk(KERN_CRIT "ip_forward(): Argh! Destination lost!\n");
@@ -599,7 +601,7 @@ void ip_forward_options(struct sk_buff *skb)
        }
        if (opt->is_changed) {
                opt->is_changed = 0;
-               ip_send_check(skb->nh.iph);
+               ip_send_check(ip_hdr(skb));
        }
 }
 
@@ -608,8 +610,8 @@ int ip_options_rcv_srr(struct sk_buff *skb)
        struct ip_options *opt = &(IPCB(skb)->opt);
        int srrspace, srrptr;
        __be32 nexthop;
-       struct iphdr *iph = skb->nh.iph;
-       unsigned char * optptr = skb->nh.raw + opt->srr;
+       struct iphdr *iph = ip_hdr(skb);
+       unsigned char *optptr = skb_network_header(skb) + opt->srr;
        struct rtable *rt = (struct rtable*)skb->dst;
        struct rtable *rt2;
        int err;
index d096332..534650c 100644 (file)
@@ -95,8 +95,8 @@ __inline__ void ip_send_check(struct iphdr *iph)
 /* dev_loopback_xmit for use with netfilter. */
 static int ip_dev_loopback_xmit(struct sk_buff *newskb)
 {
-       newskb->mac.raw = newskb->data;
-       __skb_pull(newskb, newskb->nh.raw - newskb->data);
+       skb_reset_mac_header(newskb);
+       __skb_pull(newskb, skb_network_offset(newskb));
        newskb->pkt_type = PACKET_LOOPBACK;
        newskb->ip_summed = CHECKSUM_UNNECESSARY;
        BUG_TRAP(newskb->dst);
@@ -125,11 +125,9 @@ int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
        struct iphdr *iph;
 
        /* Build the IP header. */
-       if (opt)
-               iph=(struct iphdr *)skb_push(skb,sizeof(struct iphdr) + opt->optlen);
-       else
-               iph=(struct iphdr *)skb_push(skb,sizeof(struct iphdr));
-
+       skb_push(skb, sizeof(struct iphdr) + (opt ? opt->optlen : 0));
+       skb_reset_network_header(skb);
+       iph = ip_hdr(skb);
        iph->version  = 4;
        iph->ihl      = 5;
        iph->tos      = inet->tos;
@@ -143,7 +141,6 @@ int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
        iph->protocol = sk->sk_protocol;
        iph->tot_len  = htons(skb->len);
        ip_select_ident(iph, &rt->u.dst, sk);
-       skb->nh.iph   = iph;
 
        if (opt && opt->optlen) {
                iph->ihl += opt->optlen>>2;
@@ -192,6 +189,14 @@ static inline int ip_finish_output2(struct sk_buff *skb)
        return -EINVAL;
 }
 
+static inline int ip_skb_dst_mtu(struct sk_buff *skb)
+{
+       struct inet_sock *inet = skb->sk ? inet_sk(skb->sk) : NULL;
+
+       return (inet && inet->pmtudisc == IP_PMTUDISC_PROBE) ?
+              skb->dst->dev->mtu : dst_mtu(skb->dst);
+}
+
 static inline int ip_finish_output(struct sk_buff *skb)
 {
 #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
@@ -201,7 +206,7 @@ static inline int ip_finish_output(struct sk_buff *skb)
                return dst_output(skb);
        }
 #endif
-       if (skb->len > dst_mtu(skb->dst) && !skb_is_gso(skb))
+       if (skb->len > ip_skb_dst_mtu(skb) && !skb_is_gso(skb))
                return ip_fragment(skb, ip_finish_output2);
        else
                return ip_finish_output2(skb);
@@ -248,7 +253,7 @@ int ip_mc_output(struct sk_buff *skb)
 
                /* Multicasts with ttl 0 must not go beyond the host */
 
-               if (skb->nh.iph->ttl == 0) {
+               if (ip_hdr(skb)->ttl == 0) {
                        kfree_skb(skb);
                        return 0;
                }
@@ -333,7 +338,9 @@ packet_routed:
                goto no_route;
 
        /* OK, we know where to send it, allocate and build IP header. */
-       iph = (struct iphdr *) skb_push(skb, sizeof(struct iphdr) + (opt ? opt->optlen : 0));
+       skb_push(skb, sizeof(struct iphdr) + (opt ? opt->optlen : 0));
+       skb_reset_network_header(skb);
+       iph = ip_hdr(skb);
        *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff));
        iph->tot_len = htons(skb->len);
        if (ip_dont_fragment(sk, &rt->u.dst) && !ipfragok)
@@ -344,7 +351,6 @@ packet_routed:
        iph->protocol = sk->sk_protocol;
        iph->saddr    = rt->rt_src;
        iph->daddr    = rt->rt_dst;
-       skb->nh.iph   = iph;
        /* Transport layer set skb->h.foo itself. */
 
        if (opt && opt->optlen) {
@@ -386,20 +392,9 @@ static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
 #ifdef CONFIG_NET_SCHED
        to->tc_index = from->tc_index;
 #endif
-#ifdef CONFIG_NETFILTER
-       /* Connection association is same as pre-frag packet */
-       nf_conntrack_put(to->nfct);
-       to->nfct = from->nfct;
-       nf_conntrack_get(to->nfct);
-       to->nfctinfo = from->nfctinfo;
+       nf_copy(to, from);
 #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
        to->ipvs_property = from->ipvs_property;
-#endif
-#ifdef CONFIG_BRIDGE_NETFILTER
-       nf_bridge_put(to->nf_bridge);
-       to->nf_bridge = from->nf_bridge;
-       nf_bridge_get(to->nf_bridge);
-#endif
 #endif
        skb_copy_secmark(to, from);
 }
@@ -430,12 +425,12 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*))
         *      Point into the IP datagram header.
         */
 
-       iph = skb->nh.iph;
+       iph = ip_hdr(skb);
 
        if (unlikely((iph->frag_off & htons(IP_DF)) && !skb->local_df)) {
                IP_INC_STATS(IPSTATS_MIB_FRAGFAILS);
                icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
-                         htonl(dst_mtu(&rt->u.dst)));
+                         htonl(ip_skb_dst_mtu(skb)));
                kfree_skb(skb);
                return -EMSGSIZE;
        }
@@ -502,10 +497,11 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*))
                         * before previous one went down. */
                        if (frag) {
                                frag->ip_summed = CHECKSUM_NONE;
-                               frag->h.raw = frag->data;
-                               frag->nh.raw = __skb_push(frag, hlen);
-                               memcpy(frag->nh.raw, iph, hlen);
-                               iph = frag->nh.iph;
+                               skb_reset_transport_header(frag);
+                               __skb_push(frag, hlen);
+                               skb_reset_network_header(frag);
+                               memcpy(skb_network_header(frag), iph, hlen);
+                               iph = ip_hdr(frag);
                                iph->tot_len = htons(frag->len);
                                ip_copy_metadata(frag, skb);
                                if (offset == 0)
@@ -566,7 +562,7 @@ slow_path:
         *      Keep copying data until we run out.
         */
 
-       while(left > 0) {
+       while (left > 0) {
                len = left;
                /* IF: it doesn't fit, use 'mtu' - the data space left */
                if (len > mtu)
@@ -593,8 +589,8 @@ slow_path:
                ip_copy_metadata(skb2, skb);
                skb_reserve(skb2, ll_rs);
                skb_put(skb2, len + hlen);
-               skb2->nh.raw = skb2->data;
-               skb2->h.raw = skb2->data + hlen;
+               skb_reset_network_header(skb2);
+               skb2->transport_header = skb2->network_header + hlen;
 
                /*
                 *      Charge the memory for the fragment to any owner
@@ -608,19 +604,19 @@ slow_path:
                 *      Copy the packet header into the new buffer.
                 */
 
-               memcpy(skb2->nh.raw, skb->data, hlen);
+               skb_copy_from_linear_data(skb, skb_network_header(skb2), hlen);
 
                /*
                 *      Copy a block of the IP datagram.
                 */
-               if (skb_copy_bits(skb, ptr, skb2->h.raw, len))
+               if (skb_copy_bits(skb, ptr, skb_transport_header(skb2), len))
                        BUG();
                left -= len;
 
                /*
                 *      Fill in the new header fields.
                 */
-               iph = skb2->nh.iph;
+               iph = ip_hdr(skb2);
                iph->frag_off = htons((offset >> 3));
 
                /* ANK: dirty, but effective trick. Upgrade options only if
@@ -722,10 +718,10 @@ static inline int ip_ufo_append_data(struct sock *sk,
                skb_put(skb,fragheaderlen + transhdrlen);
 
                /* initialize network header pointer */
-               skb->nh.raw = skb->data;
+               skb_reset_network_header(skb);
 
                /* initialize protocol header pointer */
-               skb->h.raw = skb->data + fragheaderlen;
+               skb->transport_header = skb->network_header + fragheaderlen;
 
                skb->ip_summed = CHECKSUM_PARTIAL;
                skb->csum = 0;
@@ -799,7 +795,9 @@ int ip_append_data(struct sock *sk,
                        inet->cork.addr = ipc->addr;
                }
                dst_hold(&rt->u.dst);
-               inet->cork.fragsize = mtu = dst_mtu(rt->u.dst.path);
+               inet->cork.fragsize = mtu = inet->pmtudisc == IP_PMTUDISC_PROBE ?
+                                           rt->u.dst.dev->mtu :
+                                           dst_mtu(rt->u.dst.path);
                inet->cork.rt = rt;
                inet->cork.length = 0;
                sk->sk_sndmsg_page = NULL;
@@ -929,9 +927,10 @@ alloc_new_skb:
                         *      Find where to start putting bytes.
                         */
                        data = skb_put(skb, fraglen);
-                       skb->nh.raw = data + exthdrlen;
+                       skb_set_network_header(skb, exthdrlen);
+                       skb->transport_header = (skb->network_header +
+                                                fragheaderlen);
                        data += fragheaderlen;
-                       skb->h.raw = data + exthdrlen;
 
                        if (fraggap) {
                                skb->csum = skb_copy_and_csum_bits(
@@ -1100,8 +1099,6 @@ ssize_t   ip_append_page(struct sock *sk, struct page *page,
                }
                if (len <= 0) {
                        struct sk_buff *skb_prev;
-                       char *data;
-                       struct iphdr *iph;
                        int alloclen;
 
                        skb_prev = skb;
@@ -1124,15 +1121,15 @@ ssize_t ip_append_page(struct sock *sk, struct page *page,
                        /*
                         *      Find where to start putting bytes.
                         */
-                       data = skb_put(skb, fragheaderlen + fraggap);
-                       skb->nh.iph = iph = (struct iphdr *)data;
-                       data += fragheaderlen;
-                       skb->h.raw = data;
-
+                       skb_put(skb, fragheaderlen + fraggap);
+                       skb_reset_network_header(skb);
+                       skb->transport_header = (skb->network_header +
+                                                fragheaderlen);
                        if (fraggap) {
-                               skb->csum = skb_copy_and_csum_bits(
-                                       skb_prev, maxfraglen,
-                                       data, fraggap, 0);
+                               skb->csum = skb_copy_and_csum_bits(skb_prev,
+                                                                  maxfraglen,
+                                                   skb_transport_header(skb),
+                                                                  fraggap, 0);
                                skb_prev->csum = csum_sub(skb_prev->csum,
                                                          skb->csum);
                                pskb_trim_unique(skb_prev, maxfraglen);
@@ -1198,10 +1195,10 @@ int ip_push_pending_frames(struct sock *sk)
        tail_skb = &(skb_shinfo(skb)->frag_list);
 
        /* move skb->data to ip header from ext header */
-       if (skb->data < skb->nh.raw)
-               __skb_pull(skb, skb->nh.raw - skb->data);
+       if (skb->data < skb_network_header(skb))
+               __skb_pull(skb, skb_network_offset(skb));
        while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
-               __skb_pull(tmp_skb, skb->h.raw - skb->nh.raw);
+               __skb_pull(tmp_skb, skb_network_header_len(skb));
                *tail_skb = tmp_skb;
                tail_skb = &(tmp_skb->next);
                skb->len += tmp_skb->len;
@@ -1216,13 +1213,13 @@ int ip_push_pending_frames(struct sock *sk)
         * to fragment the frame generated here. No matter, what transforms
         * how transforms change size of the packet, it will come out.
         */
-       if (inet->pmtudisc != IP_PMTUDISC_DO)
+       if (inet->pmtudisc < IP_PMTUDISC_DO)
                skb->local_df = 1;
 
        /* DF bit is set when we want to see DF on outgoing frames.
         * If local_df is set too, we still allow to fragment this frame
         * locally. */
-       if (inet->pmtudisc == IP_PMTUDISC_DO ||
+       if (inet->pmtudisc >= IP_PMTUDISC_DO ||
            (skb->len <= dst_mtu(&rt->u.dst) &&
             ip_dont_fragment(sk, &rt->u.dst)))
                df = htons(IP_DF);
@@ -1352,11 +1349,11 @@ void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *ar
                struct flowi fl = { .nl_u = { .ip4_u =
                                              { .daddr = daddr,
                                                .saddr = rt->rt_spec_dst,
-                                               .tos = RT_TOS(skb->nh.iph->tos) } },
+                                               .tos = RT_TOS(ip_hdr(skb)->tos) } },
                                    /* Not quite clean, but right. */
                                    .uli_u = { .ports =
-                                              { .sport = skb->h.th->dest,
-                                                .dport = skb->h.th->source } },
+                                              { .sport = tcp_hdr(skb)->dest,
+                                                .dport = tcp_hdr(skb)->source } },
                                    .proto = sk->sk_protocol };
                security_skb_classify_flow(skb, &fl);
                if (ip_route_output_key(&rt, &fl))
@@ -1370,14 +1367,16 @@ void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *ar
           with locally disabled BH and that sk cannot be already spinlocked.
         */
        bh_lock_sock(sk);
-       inet->tos = skb->nh.iph->tos;
+       inet->tos = ip_hdr(skb)->tos;
        sk->sk_priority = skb->priority;
-       sk->sk_protocol = skb->nh.iph->protocol;
+       sk->sk_protocol = ip_hdr(skb)->protocol;
        ip_append_data(sk, ip_reply_glue_bits, arg->iov->iov_base, len, 0,
                       &ipc, rt, MSG_DONTWAIT);
        if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) {
                if (arg->csumoffset >= 0)
-                       *((__sum16 *)skb->h.raw + arg->csumoffset) = csum_fold(csum_add(skb->csum, arg->csum));
+                       *((__sum16 *)skb_transport_header(skb) +
+                         arg->csumoffset) = csum_fold(csum_add(skb->csum,
+                                                               arg->csum));
                skb->ip_summed = CHECKSUM_NONE;
                ip_push_pending_frames(sk);
        }
index 23048d9..4d54457 100644 (file)
@@ -59,7 +59,7 @@ static void ip_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
        struct in_pktinfo info;
        struct rtable *rt = (struct rtable *)skb->dst;
 
-       info.ipi_addr.s_addr = skb->nh.iph->daddr;
+       info.ipi_addr.s_addr = ip_hdr(skb)->daddr;
        if (rt) {
                info.ipi_ifindex = rt->rt_iif;
                info.ipi_spec_dst.s_addr = rt->rt_spec_dst;
@@ -73,13 +73,13 @@ static void ip_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
 
 static void ip_cmsg_recv_ttl(struct msghdr *msg, struct sk_buff *skb)
 {
-       int ttl = skb->nh.iph->ttl;
+       int ttl = ip_hdr(skb)->ttl;
        put_cmsg(msg, SOL_IP, IP_TTL, sizeof(int), &ttl);
 }
 
 static void ip_cmsg_recv_tos(struct msghdr *msg, struct sk_buff *skb)
 {
-       put_cmsg(msg, SOL_IP, IP_TOS, 1, &skb->nh.iph->tos);
+       put_cmsg(msg, SOL_IP, IP_TOS, 1, &ip_hdr(skb)->tos);
 }
 
 static void ip_cmsg_recv_opts(struct msghdr *msg, struct sk_buff *skb)
@@ -87,7 +87,8 @@ static void ip_cmsg_recv_opts(struct msghdr *msg, struct sk_buff *skb)
        if (IPCB(skb)->opt.optlen == 0)
                return;
 
-       put_cmsg(msg, SOL_IP, IP_RECVOPTS, IPCB(skb)->opt.optlen, skb->nh.iph+1);
+       put_cmsg(msg, SOL_IP, IP_RECVOPTS, IPCB(skb)->opt.optlen,
+                ip_hdr(skb) + 1);
 }
 
 
@@ -268,18 +269,21 @@ void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
        serr = SKB_EXT_ERR(skb);
        serr->ee.ee_errno = err;
        serr->ee.ee_origin = SO_EE_ORIGIN_ICMP;
-       serr->ee.ee_type = skb->h.icmph->type;
-       serr->ee.ee_code = skb->h.icmph->code;
+       serr->ee.ee_type = icmp_hdr(skb)->type;
+       serr->ee.ee_code = icmp_hdr(skb)->code;
        serr->ee.ee_pad = 0;
        serr->ee.ee_info = info;
        serr->ee.ee_data = 0;
-       serr->addr_offset = (u8*)&(((struct iphdr*)(skb->h.icmph+1))->daddr) - skb->nh.raw;
+       serr->addr_offset = (u8 *)&(((struct iphdr *)(icmp_hdr(skb) + 1))->daddr) -
+                                  skb_network_header(skb);
        serr->port = port;
 
-       skb->h.raw = payload;
-       if (!skb_pull(skb, payload - skb->data) ||
-           sock_queue_err_skb(sk, skb))
-               kfree_skb(skb);
+       if (skb_pull(skb, payload - skb->data) != NULL) {
+               skb_reset_transport_header(skb);
+               if (sock_queue_err_skb(sk, skb) == 0)
+                       return;
+       }
+       kfree_skb(skb);
 }
 
 void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 port, u32 info)
@@ -296,8 +300,9 @@ void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 port, u32 inf
        if (!skb)
                return;
 
-       iph = (struct iphdr*)skb_put(skb, sizeof(struct iphdr));
-       skb->nh.iph = iph;
+       skb_put(skb, sizeof(struct iphdr));
+       skb_reset_network_header(skb);
+       iph = ip_hdr(skb);
        iph->daddr = daddr;
 
        serr = SKB_EXT_ERR(skb);
@@ -308,11 +313,11 @@ void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 port, u32 inf
        serr->ee.ee_pad = 0;
        serr->ee.ee_info = info;
        serr->ee.ee_data = 0;
-       serr->addr_offset = (u8*)&iph->daddr - skb->nh.raw;
+       serr->addr_offset = (u8 *)&iph->daddr - skb_network_header(skb);
        serr->port = port;
 
-       skb->h.raw = skb->tail;
-       __skb_pull(skb, skb->tail - skb->data);
+       __skb_pull(skb, skb_tail_pointer(skb) - skb->data);
+       skb_reset_transport_header(skb);
 
        if (sock_queue_err_skb(sk, skb))
                kfree_skb(skb);
@@ -354,7 +359,8 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len)
        sin = (struct sockaddr_in *)msg->msg_name;
        if (sin) {
                sin->sin_family = AF_INET;
-               sin->sin_addr.s_addr = *(__be32*)(skb->nh.raw + serr->addr_offset);
+               sin->sin_addr.s_addr = *(__be32 *)(skb_network_header(skb) +
+                                                  serr->addr_offset);
                sin->sin_port = serr->port;
                memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
        }
@@ -366,7 +372,7 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len)
                struct inet_sock *inet = inet_sk(sk);
 
                sin->sin_family = AF_INET;
-               sin->sin_addr.s_addr = skb->nh.iph->saddr;
+               sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
                sin->sin_port = 0;
                memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
                if (inet->cmsg_flags)
@@ -403,20 +409,20 @@ out:
  */
 
 static int do_ip_setsockopt(struct sock *sk, int level,
-               int optname, char __user *optval, int optlen)
+                           int optname, char __user *optval, int optlen)
 {
        struct inet_sock *inet = inet_sk(sk);
        int val=0,err;
 
        if (((1<<optname) & ((1<<IP_PKTINFO) | (1<<IP_RECVTTL) |
-                           (1<<IP_RECVOPTS) | (1<<IP_RECVTOS) |
-                           (1<<IP_RETOPTS) | (1<<IP_TOS) |
-                           (1<<IP_TTL) | (1<<IP_HDRINCL) |
-                           (1<<IP_MTU_DISCOVER) | (1<<IP_RECVERR) |
-                           (1<<IP_ROUTER_ALERT) | (1<<IP_FREEBIND) |
-                           (1<<IP_PASSSEC))) ||
-                               optname == IP_MULTICAST_TTL ||
-                               optname == IP_MULTICAST_LOOP) {
+                            (1<<IP_RECVOPTS) | (1<<IP_RECVTOS) |
+                            (1<<IP_RETOPTS) | (1<<IP_TOS) |
+                            (1<<IP_TTL) | (1<<IP_HDRINCL) |
+                            (1<<IP_MTU_DISCOVER) | (1<<IP_RECVERR) |
+                            (1<<IP_ROUTER_ALERT) | (1<<IP_FREEBIND) |
+                            (1<<IP_PASSSEC))) ||
+           optname == IP_MULTICAST_TTL ||
+           optname == IP_MULTICAST_LOOP) {
                if (optlen >= sizeof(int)) {
                        if (get_user(val, (int __user *) optval))
                                return -EFAULT;
@@ -440,444 +446,444 @@ static int do_ip_setsockopt(struct sock *sk, int level,
        lock_sock(sk);
 
        switch (optname) {
-               case IP_OPTIONS:
-               {
-                       struct ip_options * opt = NULL;
-                       if (optlen > 40 || optlen < 0)
-                               goto e_inval;
-                       err = ip_options_get_from_user(&opt, optval, optlen);
-                       if (err)
-                               break;
-                       if (inet->is_icsk) {
-                               struct inet_connection_sock *icsk = inet_csk(sk);
+       case IP_OPTIONS:
+       {
+               struct ip_options * opt = NULL;
+               if (optlen > 40 || optlen < 0)
+                       goto e_inval;
+               err = ip_options_get_from_user(&opt, optval, optlen);
+               if (err)
+                       break;
+               if (inet->is_icsk) {
+                       struct inet_connection_sock *icsk = inet_csk(sk);
 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
-                               if (sk->sk_family == PF_INET ||
-                                   (!((1 << sk->sk_state) &
-                                      (TCPF_LISTEN | TCPF_CLOSE)) &&
-                                    inet->daddr != LOOPBACK4_IPV6)) {
+                       if (sk->sk_family == PF_INET ||
+                           (!((1 << sk->sk_state) &
+                              (TCPF_LISTEN | TCPF_CLOSE)) &&
+                            inet->daddr != LOOPBACK4_IPV6)) {
 #endif
-                                       if (inet->opt)
-                                               icsk->icsk_ext_hdr_len -= inet->opt->optlen;
-                                       if (opt)
-                                               icsk->icsk_ext_hdr_len += opt->optlen;
-                                       icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie);
+                               if (inet->opt)
+                                       icsk->icsk_ext_hdr_len -= inet->opt->optlen;
+                               if (opt)
+                                       icsk->icsk_ext_hdr_len += opt->optlen;
+                               icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie);
 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
-                               }
-#endif
                        }
-                       opt = xchg(&inet->opt, opt);
-                       kfree(opt);
-                       break;
+#endif
                }
-               case IP_PKTINFO:
-                       if (val)
-                               inet->cmsg_flags |= IP_CMSG_PKTINFO;
-                       else
-                               inet->cmsg_flags &= ~IP_CMSG_PKTINFO;
-                       break;
-               case IP_RECVTTL:
-                       if (val)
-                               inet->cmsg_flags |=  IP_CMSG_TTL;
-                       else
-                               inet->cmsg_flags &= ~IP_CMSG_TTL;
-                       break;
-               case IP_RECVTOS:
-                       if (val)
-                               inet->cmsg_flags |=  IP_CMSG_TOS;
-                       else
-                               inet->cmsg_flags &= ~IP_CMSG_TOS;
-                       break;
-               case IP_RECVOPTS:
-                       if (val)
-                               inet->cmsg_flags |=  IP_CMSG_RECVOPTS;
-                       else
-                               inet->cmsg_flags &= ~IP_CMSG_RECVOPTS;
-                       break;
-               case IP_RETOPTS:
-                       if (val)
-                               inet->cmsg_flags |= IP_CMSG_RETOPTS;
-                       else
-                               inet->cmsg_flags &= ~IP_CMSG_RETOPTS;
+               opt = xchg(&inet->opt, opt);
+               kfree(opt);
+               break;
+       }
+       case IP_PKTINFO:
+               if (val)
+                       inet->cmsg_flags |= IP_CMSG_PKTINFO;
+               else
+                       inet->cmsg_flags &= ~IP_CMSG_PKTINFO;
+               break;
+       case IP_RECVTTL:
+               if (val)
+                       inet->cmsg_flags |=  IP_CMSG_TTL;
+               else
+                       inet->cmsg_flags &= ~IP_CMSG_TTL;
+               break;
+       case IP_RECVTOS:
+               if (val)
+                       inet->cmsg_flags |=  IP_CMSG_TOS;
+               else
+                       inet->cmsg_flags &= ~IP_CMSG_TOS;
+               break;
+       case IP_RECVOPTS:
+               if (val)
+                       inet->cmsg_flags |=  IP_CMSG_RECVOPTS;
+               else
+                       inet->cmsg_flags &= ~IP_CMSG_RECVOPTS;
+               break;
+       case IP_RETOPTS:
+               if (val)
+                       inet->cmsg_flags |= IP_CMSG_RETOPTS;
+               else
+                       inet->cmsg_flags &= ~IP_CMSG_RETOPTS;
+               break;
+       case IP_PASSSEC:
+               if (val)
+                       inet->cmsg_flags |= IP_CMSG_PASSSEC;
+               else
+                       inet->cmsg_flags &= ~IP_CMSG_PASSSEC;
+               break;
+       case IP_TOS:    /* This sets both TOS and Precedence */
+               if (sk->sk_type == SOCK_STREAM) {
+                       val &= ~3;
+                       val |= inet->tos & 3;
+               }
+               if (IPTOS_PREC(val) >= IPTOS_PREC_CRITIC_ECP &&
+                   !capable(CAP_NET_ADMIN)) {
+                       err = -EPERM;
                        break;
-               case IP_PASSSEC:
-                       if (val)
-                               inet->cmsg_flags |= IP_CMSG_PASSSEC;
-                       else
-                               inet->cmsg_flags &= ~IP_CMSG_PASSSEC;
+               }
+               if (inet->tos != val) {
+                       inet->tos = val;
+                       sk->sk_priority = rt_tos2priority(val);
+                       sk_dst_reset(sk);
+               }
+               break;
+       case IP_TTL:
+               if (optlen<1)
+                       goto e_inval;
+               if (val != -1 && (val < 1 || val>255))
+                       goto e_inval;
+               inet->uc_ttl = val;
+               break;
+       case IP_HDRINCL:
+               if (sk->sk_type != SOCK_RAW) {
+                       err = -ENOPROTOOPT;
                        break;
-               case IP_TOS:    /* This sets both TOS and Precedence */
-                       if (sk->sk_type == SOCK_STREAM) {
-                               val &= ~3;
-                               val |= inet->tos & 3;
-                       }
-                       if (IPTOS_PREC(val) >= IPTOS_PREC_CRITIC_ECP &&
-                           !capable(CAP_NET_ADMIN)) {
-                               err = -EPERM;
+               }
+               inet->hdrincl = val ? 1 : 0;
+               break;
+       case IP_MTU_DISCOVER:
+               if (val<0 || val>3)
+                       goto e_inval;
+               inet->pmtudisc = val;
+               break;
+       case IP_RECVERR:
+               inet->recverr = !!val;
+               if (!val)
+                       skb_queue_purge(&sk->sk_error_queue);
+               break;
+       case IP_MULTICAST_TTL:
+               if (sk->sk_type == SOCK_STREAM)
+                       goto e_inval;
+               if (optlen<1)
+                       goto e_inval;
+               if (val==-1)
+                       val = 1;
+               if (val < 0 || val > 255)
+                       goto e_inval;
+               inet->mc_ttl = val;
+               break;
+       case IP_MULTICAST_LOOP:
+               if (optlen<1)
+                       goto e_inval;
+               inet->mc_loop = !!val;
+               break;
+       case IP_MULTICAST_IF:
+       {
+               struct ip_mreqn mreq;
+               struct net_device *dev = NULL;
+
+               if (sk->sk_type == SOCK_STREAM)
+                       goto e_inval;
+               /*
+                *      Check the arguments are allowable
+                */
+
+               err = -EFAULT;
+               if (optlen >= sizeof(struct ip_mreqn)) {
+                       if (copy_from_user(&mreq,optval,sizeof(mreq)))
                                break;
-                       }
-                       if (inet->tos != val) {
-                               inet->tos = val;
-                               sk->sk_priority = rt_tos2priority(val);
-                               sk_dst_reset(sk);
-                       }
-                       break;
-               case IP_TTL:
-                       if (optlen<1)
-                               goto e_inval;
-                       if (val != -1 && (val < 1 || val>255))
-                               goto e_inval;
-                       inet->uc_ttl = val;
-                       break;
-               case IP_HDRINCL:
-                       if (sk->sk_type != SOCK_RAW) {
-                               err = -ENOPROTOOPT;
+               } else {
+                       memset(&mreq, 0, sizeof(mreq));
+                       if (optlen >= sizeof(struct in_addr) &&
+                           copy_from_user(&mreq.imr_address,optval,sizeof(struct in_addr)))
+                               break;
+               }
+
+               if (!mreq.imr_ifindex) {
+                       if (mreq.imr_address.s_addr == INADDR_ANY) {
+                               inet->mc_index = 0;
+                               inet->mc_addr  = 0;
+                               err = 0;
                                break;
                        }
-                       inet->hdrincl = val ? 1 : 0;
-                       break;
-               case IP_MTU_DISCOVER:
-                       if (val<0 || val>2)
-                               goto e_inval;
-                       inet->pmtudisc = val;
-                       break;
-               case IP_RECVERR:
-                       inet->recverr = !!val;
-                       if (!val)
-                               skb_queue_purge(&sk->sk_error_queue);
-                       break;
-               case IP_MULTICAST_TTL:
-                       if (sk->sk_type == SOCK_STREAM)
-                               goto e_inval;
-                       if (optlen<1)
-                               goto e_inval;
-                       if (val==-1)
-                               val = 1;
-                       if (val < 0 || val > 255)
-                               goto e_inval;
-                       inet->mc_ttl = val;
-                       break;
-               case IP_MULTICAST_LOOP:
-                       if (optlen<1)
-                               goto e_inval;
-                       inet->mc_loop = !!val;
-                       break;
-               case IP_MULTICAST_IF:
-               {
-                       struct ip_mreqn mreq;
-                       struct net_device *dev = NULL;
+                       dev = ip_dev_find(mreq.imr_address.s_addr);
+                       if (dev) {
+                               mreq.imr_ifindex = dev->ifindex;
+                               dev_put(dev);
+                       }
+               } else
+                       dev = __dev_get_by_index(mreq.imr_ifindex);
 
-                       if (sk->sk_type == SOCK_STREAM)
-                               goto e_inval;
-                       /*
-                        *      Check the arguments are allowable
-                        */
 
-                       err = -EFAULT;
-                       if (optlen >= sizeof(struct ip_mreqn)) {
-                               if (copy_from_user(&mreq,optval,sizeof(mreq)))
-                                       break;
-                       } else {
-                               memset(&mreq, 0, sizeof(mreq));
-                               if (optlen >= sizeof(struct in_addr) &&
-                                   copy_from_user(&mreq.imr_address,optval,sizeof(struct in_addr)))
-                                       break;
-                       }
+               err = -EADDRNOTAVAIL;
+               if (!dev)
+                       break;
 
-                       if (!mreq.imr_ifindex) {
-                               if (mreq.imr_address.s_addr == INADDR_ANY) {
-                                       inet->mc_index = 0;
-                                       inet->mc_addr  = 0;
-                                       err = 0;
-                                       break;
-                               }
-                               dev = ip_dev_find(mreq.imr_address.s_addr);
-                               if (dev) {
-                                       mreq.imr_ifindex = dev->ifindex;
-                                       dev_put(dev);
-                               }
-                       } else
-                               dev = __dev_get_by_index(mreq.imr_ifindex);
+               err = -EINVAL;
+               if (sk->sk_bound_dev_if &&
+                   mreq.imr_ifindex != sk->sk_bound_dev_if)
+                       break;
 
+               inet->mc_index = mreq.imr_ifindex;
+               inet->mc_addr  = mreq.imr_address.s_addr;
+               err = 0;
+               break;
+       }
 
-                       err = -EADDRNOTAVAIL;
-                       if (!dev)
-                               break;
+       case IP_ADD_MEMBERSHIP:
+       case IP_DROP_MEMBERSHIP:
+       {
+               struct ip_mreqn mreq;
 
-                       err = -EINVAL;
-                       if (sk->sk_bound_dev_if &&
-                           mreq.imr_ifindex != sk->sk_bound_dev_if)
+               if (optlen < sizeof(struct ip_mreq))
+                       goto e_inval;
+               err = -EFAULT;
+               if (optlen >= sizeof(struct ip_mreqn)) {
+                       if (copy_from_user(&mreq,optval,sizeof(mreq)))
                                break;
+               } else {
+                       memset(&mreq, 0, sizeof(mreq));
+                       if (copy_from_user(&mreq,optval,sizeof(struct ip_mreq)))
+                               break;
+               }
 
-                       inet->mc_index = mreq.imr_ifindex;
-                       inet->mc_addr  = mreq.imr_address.s_addr;
-                       err = 0;
+               if (optname == IP_ADD_MEMBERSHIP)
+                       err = ip_mc_join_group(sk, &mreq);
+               else
+                       err = ip_mc_leave_group(sk, &mreq);
+               break;
+       }
+       case IP_MSFILTER:
+       {
+               extern int sysctl_igmp_max_msf;
+               struct ip_msfilter *msf;
+
+               if (optlen < IP_MSFILTER_SIZE(0))
+                       goto e_inval;
+               if (optlen > sysctl_optmem_max) {
+                       err = -ENOBUFS;
                        break;
                }
+               msf = kmalloc(optlen, GFP_KERNEL);
+               if (msf == 0) {
+                       err = -ENOBUFS;
+                       break;
+               }
+               err = -EFAULT;
+               if (copy_from_user(msf, optval, optlen)) {
+                       kfree(msf);
+                       break;
+               }
+               /* numsrc >= (1G-4) overflow in 32 bits */
+               if (msf->imsf_numsrc >= 0x3ffffffcU ||
+                   msf->imsf_numsrc > sysctl_igmp_max_msf) {
+                       kfree(msf);
+                       err = -ENOBUFS;
+                       break;
+               }
+               if (IP_MSFILTER_SIZE(msf->imsf_numsrc) > optlen) {
+                       kfree(msf);
+                       err = -EINVAL;
+                       break;
+               }
+               err = ip_mc_msfilter(sk, msf, 0);
+               kfree(msf);
+               break;
+       }
+       case IP_BLOCK_SOURCE:
+       case IP_UNBLOCK_SOURCE:
+       case IP_ADD_SOURCE_MEMBERSHIP:
+       case IP_DROP_SOURCE_MEMBERSHIP:
+       {
+               struct ip_mreq_source mreqs;
+               int omode, add;
 
-               case IP_ADD_MEMBERSHIP:
-               case IP_DROP_MEMBERSHIP:
-               {
-                       struct ip_mreqn mreq;
-
-                       if (optlen < sizeof(struct ip_mreq))
-                               goto e_inval;
+               if (optlen != sizeof(struct ip_mreq_source))
+                       goto e_inval;
+               if (copy_from_user(&mreqs, optval, sizeof(mreqs))) {
                        err = -EFAULT;
-                       if (optlen >= sizeof(struct ip_mreqn)) {
-                               if(copy_from_user(&mreq,optval,sizeof(mreq)))
-                                       break;
-                       } else {
-                               memset(&mreq, 0, sizeof(mreq));
-                               if (copy_from_user(&mreq,optval,sizeof(struct ip_mreq)))
-                                       break;
-                       }
-
-                       if (optname == IP_ADD_MEMBERSHIP)
-                               err = ip_mc_join_group(sk, &mreq);
-                       else
-                               err = ip_mc_leave_group(sk, &mreq);
                        break;
                }
-               case IP_MSFILTER:
-               {
-                       extern int sysctl_igmp_max_msf;
-                       struct ip_msfilter *msf;
+               if (optname == IP_BLOCK_SOURCE) {
+                       omode = MCAST_EXCLUDE;
+                       add = 1;
+               } else if (optname == IP_UNBLOCK_SOURCE) {
+                       omode = MCAST_EXCLUDE;
+                       add = 0;
+               } else if (optname == IP_ADD_SOURCE_MEMBERSHIP) {
+                       struct ip_mreqn mreq;
 
-                       if (optlen < IP_MSFILTER_SIZE(0))
-                               goto e_inval;
-                       if (optlen > sysctl_optmem_max) {
-                               err = -ENOBUFS;
-                               break;
-                       }
-                       msf = kmalloc(optlen, GFP_KERNEL);
-                       if (msf == 0) {
-                               err = -ENOBUFS;
+                       mreq.imr_multiaddr.s_addr = mreqs.imr_multiaddr;
+                       mreq.imr_address.s_addr = mreqs.imr_interface;
+                       mreq.imr_ifindex = 0;
+                       err = ip_mc_join_group(sk, &mreq);
+                       if (err && err != -EADDRINUSE)
                                break;
-                       }
+                       omode = MCAST_INCLUDE;
+                       add = 1;
+               } else /* IP_DROP_SOURCE_MEMBERSHIP */ {
+                       omode = MCAST_INCLUDE;
+                       add = 0;
+               }
+               err = ip_mc_source(add, omode, sk, &mreqs, 0);
+               break;
+       }
+       case MCAST_JOIN_GROUP:
+       case MCAST_LEAVE_GROUP:
+       {
+               struct group_req greq;
+               struct sockaddr_in *psin;
+               struct ip_mreqn mreq;
+
+               if (optlen < sizeof(struct group_req))
+                       goto e_inval;
+               err = -EFAULT;
+               if (copy_from_user(&greq, optval, sizeof(greq)))
+                       break;
+               psin = (struct sockaddr_in *)&greq.gr_group;
+               if (psin->sin_family != AF_INET)
+                       goto e_inval;
+               memset(&mreq, 0, sizeof(mreq));
+               mreq.imr_multiaddr = psin->sin_addr;
+               mreq.imr_ifindex = greq.gr_interface;
+
+               if (optname == MCAST_JOIN_GROUP)
+                       err = ip_mc_join_group(sk, &mreq);
+               else
+                       err = ip_mc_leave_group(sk, &mreq);
+               break;
+       }
+       case MCAST_JOIN_SOURCE_GROUP:
+       case MCAST_LEAVE_SOURCE_GROUP:
+       case MCAST_BLOCK_SOURCE:
+       case MCAST_UNBLOCK_SOURCE:
+       {
+               struct group_source_req greqs;
+               struct ip_mreq_source mreqs;
+               struct sockaddr_in *psin;
+               int omode, add;
+
+               if (optlen != sizeof(struct group_source_req))
+                       goto e_inval;
+               if (copy_from_user(&greqs, optval, sizeof(greqs))) {
                        err = -EFAULT;
-                       if (copy_from_user(msf, optval, optlen)) {
-                               kfree(msf);
-                               break;
-                       }
-                       /* numsrc >= (1G-4) overflow in 32 bits */
-                       if (msf->imsf_numsrc >= 0x3ffffffcU ||
-                           msf->imsf_numsrc > sysctl_igmp_max_msf) {
-                               kfree(msf);
-                               err = -ENOBUFS;
-                               break;
-                       }
-                       if (IP_MSFILTER_SIZE(msf->imsf_numsrc) > optlen) {
-                               kfree(msf);
-                               err = -EINVAL;
-                               break;
-                       }
-                       err = ip_mc_msfilter(sk, msf, 0);
-                       kfree(msf);
                        break;
                }
-               case IP_BLOCK_SOURCE:
-               case IP_UNBLOCK_SOURCE:
-               case IP_ADD_SOURCE_MEMBERSHIP:
-               case IP_DROP_SOURCE_MEMBERSHIP:
-               {
-                       struct ip_mreq_source mreqs;
-                       int omode, add;
-
-                       if (optlen != sizeof(struct ip_mreq_source))
-                               goto e_inval;
-                       if (copy_from_user(&mreqs, optval, sizeof(mreqs))) {
-                               err = -EFAULT;
-                               break;
-                       }
-                       if (optname == IP_BLOCK_SOURCE) {
-                               omode = MCAST_EXCLUDE;
-                               add = 1;
-                       } else if (optname == IP_UNBLOCK_SOURCE) {
-                               omode = MCAST_EXCLUDE;
-                               add = 0;
-                       } else if (optname == IP_ADD_SOURCE_MEMBERSHIP) {
-                               struct ip_mreqn mreq;
-
-                               mreq.imr_multiaddr.s_addr = mreqs.imr_multiaddr;
-                               mreq.imr_address.s_addr = mreqs.imr_interface;
-                               mreq.imr_ifindex = 0;
-                               err = ip_mc_join_group(sk, &mreq);
-                               if (err && err != -EADDRINUSE)
-                                       break;
-                               omode = MCAST_INCLUDE;
-                               add = 1;
-                       } else /* IP_DROP_SOURCE_MEMBERSHIP */ {
-                               omode = MCAST_INCLUDE;
-                               add = 0;
-                       }
-                       err = ip_mc_source(add, omode, sk, &mreqs, 0);
+               if (greqs.gsr_group.ss_family != AF_INET ||
+                   greqs.gsr_source.ss_family != AF_INET) {
+                       err = -EADDRNOTAVAIL;
                        break;
                }
-               case MCAST_JOIN_GROUP:
-               case MCAST_LEAVE_GROUP:
-               {
-                       struct group_req greq;
-                       struct sockaddr_in *psin;
+               psin = (struct sockaddr_in *)&greqs.gsr_group;
+               mreqs.imr_multiaddr = psin->sin_addr.s_addr;
+               psin = (struct sockaddr_in *)&greqs.gsr_source;
+               mreqs.imr_sourceaddr = psin->sin_addr.s_addr;
+               mreqs.imr_interface = 0; /* use index for mc_source */
+
+               if (optname == MCAST_BLOCK_SOURCE) {
+                       omode = MCAST_EXCLUDE;
+                       add = 1;
+               } else if (optname == MCAST_UNBLOCK_SOURCE) {
+                       omode = MCAST_EXCLUDE;
+                       add = 0;
+               } else if (optname == MCAST_JOIN_SOURCE_GROUP) {
                        struct ip_mreqn mreq;
 
-                       if (optlen < sizeof(struct group_req))
-                               goto e_inval;
-                       err = -EFAULT;
-                       if(copy_from_user(&greq, optval, sizeof(greq)))
-                               break;
-                       psin = (struct sockaddr_in *)&greq.gr_group;
-                       if (psin->sin_family != AF_INET)
-                               goto e_inval;
-                       memset(&mreq, 0, sizeof(mreq));
+                       psin = (struct sockaddr_in *)&greqs.gsr_group;
                        mreq.imr_multiaddr = psin->sin_addr;
-                       mreq.imr_ifindex = greq.gr_interface;
-
-                       if (optname == MCAST_JOIN_GROUP)
-                               err = ip_mc_join_group(sk, &mreq);
-                       else
-                               err = ip_mc_leave_group(sk, &mreq);
+                       mreq.imr_address.s_addr = 0;
+                       mreq.imr_ifindex = greqs.gsr_interface;
+                       err = ip_mc_join_group(sk, &mreq);
+                       if (err && err != -EADDRINUSE)
+                               break;
+                       greqs.gsr_interface = mreq.imr_ifindex;
+                       omode = MCAST_INCLUDE;
+                       add = 1;
+               } else /* MCAST_LEAVE_SOURCE_GROUP */ {
+                       omode = MCAST_INCLUDE;
+                       add = 0;
+               }
+               err = ip_mc_source(add, omode, sk, &mreqs,
+                                  greqs.gsr_interface);
+               break;
+       }
+       case MCAST_MSFILTER:
+       {
+               extern int sysctl_igmp_max_msf;
+               struct sockaddr_in *psin;
+               struct ip_msfilter *msf = NULL;
+               struct group_filter *gsf = NULL;
+               int msize, i, ifindex;
+
+               if (optlen < GROUP_FILTER_SIZE(0))
+                       goto e_inval;
+               if (optlen > sysctl_optmem_max) {
+                       err = -ENOBUFS;
                        break;
                }
-               case MCAST_JOIN_SOURCE_GROUP:
-               case MCAST_LEAVE_SOURCE_GROUP:
-               case MCAST_BLOCK_SOURCE:
-               case MCAST_UNBLOCK_SOURCE:
-               {
-                       struct group_source_req greqs;
-                       struct ip_mreq_source mreqs;
-                       struct sockaddr_in *psin;
-                       int omode, add;
-
-                       if (optlen != sizeof(struct group_source_req))
-                               goto e_inval;
-                       if (copy_from_user(&greqs, optval, sizeof(greqs))) {
-                               err = -EFAULT;
-                               break;
-                       }
-                       if (greqs.gsr_group.ss_family != AF_INET ||
-                           greqs.gsr_source.ss_family != AF_INET) {
-                               err = -EADDRNOTAVAIL;
-                               break;
-                       }
-                       psin = (struct sockaddr_in *)&greqs.gsr_group;
-                       mreqs.imr_multiaddr = psin->sin_addr.s_addr;
-                       psin = (struct sockaddr_in *)&greqs.gsr_source;
-                       mreqs.imr_sourceaddr = psin->sin_addr.s_addr;
-                       mreqs.imr_interface = 0; /* use index for mc_source */
-
-                       if (optname == MCAST_BLOCK_SOURCE) {
-                               omode = MCAST_EXCLUDE;
-                               add = 1;
-                       } else if (optname == MCAST_UNBLOCK_SOURCE) {
-                               omode = MCAST_EXCLUDE;
-                               add = 0;
-                       } else if (optname == MCAST_JOIN_SOURCE_GROUP) {
-                               struct ip_mreqn mreq;
-
-                               psin = (struct sockaddr_in *)&greqs.gsr_group;
-                               mreq.imr_multiaddr = psin->sin_addr;
-                               mreq.imr_address.s_addr = 0;
-                               mreq.imr_ifindex = greqs.gsr_interface;
-                               err = ip_mc_join_group(sk, &mreq);
-                               if (err && err != -EADDRINUSE)
-                                       break;
-                               greqs.gsr_interface = mreq.imr_ifindex;
-                               omode = MCAST_INCLUDE;
-                               add = 1;
-                       } else /* MCAST_LEAVE_SOURCE_GROUP */ {
-                               omode = MCAST_INCLUDE;
-                               add = 0;
-                       }
-                       err = ip_mc_source(add, omode, sk, &mreqs,
-                               greqs.gsr_interface);
+               gsf = kmalloc(optlen,GFP_KERNEL);
+               if (gsf == 0) {
+                       err = -ENOBUFS;
                        break;
                }
-               case MCAST_MSFILTER:
-               {
-                       extern int sysctl_igmp_max_msf;
-                       struct sockaddr_in *psin;
-                       struct ip_msfilter *msf = NULL;
-                       struct group_filter *gsf = NULL;
-                       int msize, i, ifindex;
-
-                       if (optlen < GROUP_FILTER_SIZE(0))
-                               goto e_inval;
-                       if (optlen > sysctl_optmem_max) {
-                               err = -ENOBUFS;
-                               break;
-                       }
-                       gsf = kmalloc(optlen,GFP_KERNEL);
-                       if (gsf == 0) {
-                               err = -ENOBUFS;
-                               break;
-                       }
-                       err = -EFAULT;
-                       if (copy_from_user(gsf, optval, optlen)) {
-                               goto mc_msf_out;
-                       }
-                       /* numsrc >= (4G-140)/128 overflow in 32 bits */
-                       if (gsf->gf_numsrc >= 0x1ffffff ||
-                           gsf->gf_numsrc > sysctl_igmp_max_msf) {
-                               err = -ENOBUFS;
-                               goto mc_msf_out;
-                       }
-                       if (GROUP_FILTER_SIZE(gsf->gf_numsrc) > optlen) {
-                               err = -EINVAL;
-                               goto mc_msf_out;
-                       }
-                       msize = IP_MSFILTER_SIZE(gsf->gf_numsrc);
-                       msf = kmalloc(msize,GFP_KERNEL);
-                       if (msf == 0) {
-                               err = -ENOBUFS;
-                               goto mc_msf_out;
-                       }
-                       ifindex = gsf->gf_interface;
-                       psin = (struct sockaddr_in *)&gsf->gf_group;
-                       if (psin->sin_family != AF_INET) {
-                               err = -EADDRNOTAVAIL;
-                               goto mc_msf_out;
-                       }
-                       msf->imsf_multiaddr = psin->sin_addr.s_addr;
-                       msf->imsf_interface = 0;
-                       msf->imsf_fmode = gsf->gf_fmode;
-                       msf->imsf_numsrc = gsf->gf_numsrc;
+               err = -EFAULT;
+               if (copy_from_user(gsf, optval, optlen)) {
+                       goto mc_msf_out;
+               }
+               /* numsrc >= (4G-140)/128 overflow in 32 bits */
+               if (gsf->gf_numsrc >= 0x1ffffff ||
+                   gsf->gf_numsrc > sysctl_igmp_max_msf) {
+                       err = -ENOBUFS;
+                       goto mc_msf_out;
+               }
+               if (GROUP_FILTER_SIZE(gsf->gf_numsrc) > optlen) {
+                       err = -EINVAL;
+                       goto mc_msf_out;
+               }
+               msize = IP_MSFILTER_SIZE(gsf->gf_numsrc);
+               msf = kmalloc(msize,GFP_KERNEL);
+               if (msf == 0) {
+                       err = -ENOBUFS;
+                       goto mc_msf_out;
+               }
+               ifindex = gsf->gf_interface;
+               psin = (struct sockaddr_in *)&gsf->gf_group;
+               if (psin->sin_family != AF_INET) {
                        err = -EADDRNOTAVAIL;
-                       for (i=0; i<gsf->gf_numsrc; ++i) {
-                               psin = (struct sockaddr_in *)&gsf->gf_slist[i];
-
-                               if (psin->sin_family != AF_INET)
-                                       goto mc_msf_out;
-                               msf->imsf_slist[i] = psin->sin_addr.s_addr;
-                       }
-                       kfree(gsf);
-                       gsf = NULL;
-
-                       err = ip_mc_msfilter(sk, msf, ifindex);
-mc_msf_out:
-                       kfree(msf);
-                       kfree(gsf);
-                       break;
+                       goto mc_msf_out;
                }
-               case IP_ROUTER_ALERT:
-                       err = ip_ra_control(sk, val ? 1 : 0, NULL);
-                       break;
-
-               case IP_FREEBIND:
-                       if (optlen<1)
-                               goto e_inval;
-                       inet->freebind = !!val;
-                       break;
+               msf->imsf_multiaddr = psin->sin_addr.s_addr;
+               msf->imsf_interface = 0;
+               msf->imsf_fmode = gsf->gf_fmode;
+               msf->imsf_numsrc = gsf->gf_numsrc;
+               err = -EADDRNOTAVAIL;
+               for (i=0; i<gsf->gf_numsrc; ++i) {
+                       psin = (struct sockaddr_in *)&gsf->gf_slist[i];
 
-               case IP_IPSEC_POLICY:
-               case IP_XFRM_POLICY:
-                       err = -EPERM;
-                       if (!capable(CAP_NET_ADMIN))
-                               break;
-                       err = xfrm_user_policy(sk, optname, optval, optlen);
+                       if (psin->sin_family != AF_INET)
+                               goto mc_msf_out;
+                       msf->imsf_slist[i] = psin->sin_addr.s_addr;
+               }
+               kfree(gsf);
+               gsf = NULL;
+
+               err = ip_mc_msfilter(sk, msf, ifindex);
+       mc_msf_out:
+               kfree(msf);
+               kfree(gsf);
+               break;
+       }
+       case IP_ROUTER_ALERT:
+               err = ip_ra_control(sk, val ? 1 : 0, NULL);
+               break;
+
+       case IP_FREEBIND:
+               if (optlen<1)
+                       goto e_inval;
+               inet->freebind = !!val;
+               break;
+
+       case IP_IPSEC_POLICY:
+       case IP_XFRM_POLICY:
+               err = -EPERM;
+               if (!capable(CAP_NET_ADMIN))
                        break;
+               err = xfrm_user_policy(sk, optname, optval, optlen);
+               break;
 
-               default:
-                       err = -ENOPROTOOPT;
-                       break;
+       default:
+               err = -ENOPROTOOPT;
+               break;
        }
        release_sock(sk);
        return err;
@@ -948,214 +954,213 @@ EXPORT_SYMBOL(compat_ip_setsockopt);
  */
 
 static int do_ip_getsockopt(struct sock *sk, int level, int optname,
-               char __user *optval, int __user *optlen)
+                           char __user *optval, int __user *optlen)
 {
        struct inet_sock *inet = inet_sk(sk);
        int val;
        int len;
 
-       if(level!=SOL_IP)
+       if (level != SOL_IP)
                return -EOPNOTSUPP;
 
 #ifdef CONFIG_IP_MROUTE
-       if(optname>=MRT_BASE && optname <=MRT_BASE+10)
-       {
+       if (optname >= MRT_BASE && optname <= MRT_BASE+10) {
                return ip_mroute_getsockopt(sk,optname,optval,optlen);
        }
 #endif
 
-       if(get_user(len,optlen))
+       if (get_user(len,optlen))
                return -EFAULT;
-       if(len < 0)
+       if (len < 0)
                return -EINVAL;
 
        lock_sock(sk);
 
-       switch(optname) {
-               case IP_OPTIONS:
-                       {
-                               unsigned char optbuf[sizeof(struct ip_options)+40];
-                               struct ip_options * opt = (struct ip_options*)optbuf;
-                               opt->optlen = 0;
-                               if (inet->opt)
-                                       memcpy(optbuf, inet->opt,
-                                              sizeof(struct ip_options)+
-                                              inet->opt->optlen);
-                               release_sock(sk);
-
-                               if (opt->optlen == 0)
-                                       return put_user(0, optlen);
-
-                               ip_options_undo(opt);
-
-                               len = min_t(unsigned int, len, opt->optlen);
-                               if(put_user(len, optlen))
-                                       return -EFAULT;
-                               if(copy_to_user(optval, opt->__data, len))
-                                       return -EFAULT;
-                               return 0;
-                       }
-               case IP_PKTINFO:
-                       val = (inet->cmsg_flags & IP_CMSG_PKTINFO) != 0;
-                       break;
-               case IP_RECVTTL:
-                       val = (inet->cmsg_flags & IP_CMSG_TTL) != 0;
-                       break;
-               case IP_RECVTOS:
-                       val = (inet->cmsg_flags & IP_CMSG_TOS) != 0;
-                       break;
-               case IP_RECVOPTS:
-                       val = (inet->cmsg_flags & IP_CMSG_RECVOPTS) != 0;
-                       break;
-               case IP_RETOPTS:
-                       val = (inet->cmsg_flags & IP_CMSG_RETOPTS) != 0;
-                       break;
-               case IP_PASSSEC:
-                       val = (inet->cmsg_flags & IP_CMSG_PASSSEC) != 0;
-                       break;
-               case IP_TOS:
-                       val = inet->tos;
-                       break;
-               case IP_TTL:
-                       val = (inet->uc_ttl == -1 ?
-                              sysctl_ip_default_ttl :
-                              inet->uc_ttl);
-                       break;
-               case IP_HDRINCL:
-                       val = inet->hdrincl;
-                       break;
-               case IP_MTU_DISCOVER:
-                       val = inet->pmtudisc;
-                       break;
-               case IP_MTU:
-               {
-                       struct dst_entry *dst;
-                       val = 0;
-                       dst = sk_dst_get(sk);
-                       if (dst) {
-                               val = dst_mtu(dst);
-                               dst_release(dst);
-                       }
-                       if (!val) {
-                               release_sock(sk);
-                               return -ENOTCONN;
-                       }
-                       break;
+       switch (optname) {
+       case IP_OPTIONS:
+       {
+               unsigned char optbuf[sizeof(struct ip_options)+40];
+               struct ip_options * opt = (struct ip_options*)optbuf;
+               opt->optlen = 0;
+               if (inet->opt)
+                       memcpy(optbuf, inet->opt,
+                              sizeof(struct ip_options)+
+                              inet->opt->optlen);
+               release_sock(sk);
+
+               if (opt->optlen == 0)
+                       return put_user(0, optlen);
+
+               ip_options_undo(opt);
+
+               len = min_t(unsigned int, len, opt->optlen);
+               if (put_user(len, optlen))
+                       return -EFAULT;
+               if (copy_to_user(optval, opt->__data, len))
+                       return -EFAULT;
+               return 0;
+       }
+       case IP_PKTINFO:
+               val = (inet->cmsg_flags & IP_CMSG_PKTINFO) != 0;
+               break;
+       case IP_RECVTTL:
+               val = (inet->cmsg_flags & IP_CMSG_TTL) != 0;
+               break;
+       case IP_RECVTOS:
+               val = (inet->cmsg_flags & IP_CMSG_TOS) != 0;
+               break;
+       case IP_RECVOPTS:
+               val = (inet->cmsg_flags & IP_CMSG_RECVOPTS) != 0;
+               break;
+       case IP_RETOPTS:
+               val = (inet->cmsg_flags & IP_CMSG_RETOPTS) != 0;
+               break;
+       case IP_PASSSEC:
+               val = (inet->cmsg_flags & IP_CMSG_PASSSEC) != 0;
+               break;
+       case IP_TOS:
+               val = inet->tos;
+               break;
+       case IP_TTL:
+               val = (inet->uc_ttl == -1 ?
+                      sysctl_ip_default_ttl :
+                      inet->uc_ttl);
+               break;
+       case IP_HDRINCL:
+               val = inet->hdrincl;
+               break;
+       case IP_MTU_DISCOVER:
+               val = inet->pmtudisc;
+               break;
+       case IP_MTU:
+       {
+               struct dst_entry *dst;
+               val = 0;
+               dst = sk_dst_get(sk);
+               if (dst) {
+                       val = dst_mtu(dst);
+                       dst_release(dst);
                }
-               case IP_RECVERR:
-                       val = inet->recverr;
-                       break;
-               case IP_MULTICAST_TTL:
-                       val = inet->mc_ttl;
-                       break;
-               case IP_MULTICAST_LOOP:
-                       val = inet->mc_loop;
-                       break;
-               case IP_MULTICAST_IF:
-               {
-                       struct in_addr addr;
-                       len = min_t(unsigned int, len, sizeof(struct in_addr));
-                       addr.s_addr = inet->mc_addr;
+               if (!val) {
                        release_sock(sk);
-
-                       if(put_user(len, optlen))
-                               return -EFAULT;
-                       if(copy_to_user(optval, &addr, len))
-                               return -EFAULT;
-                       return 0;
+                       return -ENOTCONN;
                }
-               case IP_MSFILTER:
-               {
-                       struct ip_msfilter msf;
-                       int err;
+               break;
+       }
+       case IP_RECVERR:
+               val = inet->recverr;
+               break;
+       case IP_MULTICAST_TTL:
+               val = inet->mc_ttl;
+               break;
+       case IP_MULTICAST_LOOP:
+               val = inet->mc_loop;
+               break;
+       case IP_MULTICAST_IF:
+       {
+               struct in_addr addr;
+               len = min_t(unsigned int, len, sizeof(struct in_addr));
+               addr.s_addr = inet->mc_addr;
+               release_sock(sk);
 
-                       if (len < IP_MSFILTER_SIZE(0)) {
-                               release_sock(sk);
-                               return -EINVAL;
-                       }
-                       if (copy_from_user(&msf, optval, IP_MSFILTER_SIZE(0))) {
-                               release_sock(sk);
-                               return -EFAULT;
-                       }
-                       err = ip_mc_msfget(sk, &msf,
-                               (struct ip_msfilter __user *)optval, optlen);
+               if (put_user(len, optlen))
+                       return -EFAULT;
+               if (copy_to_user(optval, &addr, len))
+                       return -EFAULT;
+               return 0;
+       }
+       case IP_MSFILTER:
+       {
+               struct ip_msfilter msf;
+               int err;
+
+               if (len < IP_MSFILTER_SIZE(0)) {
                        release_sock(sk);
-                       return err;
+                       return -EINVAL;
                }
-               case MCAST_MSFILTER:
-               {
-                       struct group_filter gsf;
-                       int err;
-
-                       if (len < GROUP_FILTER_SIZE(0)) {
-                               release_sock(sk);
-                               return -EINVAL;
-                       }
-                       if (copy_from_user(&gsf, optval, GROUP_FILTER_SIZE(0))) {
-                               release_sock(sk);
-                               return -EFAULT;
-                       }
-                       err = ip_mc_gsfget(sk, &gsf,
-                               (struct group_filter __user *)optval, optlen);
+               if (copy_from_user(&msf, optval, IP_MSFILTER_SIZE(0))) {
                        release_sock(sk);
-                       return err;
+                       return -EFAULT;
                }
-               case IP_PKTOPTIONS:
-               {
-                       struct msghdr msg;
+               err = ip_mc_msfget(sk, &msf,
+                                  (struct ip_msfilter __user *)optval, optlen);
+               release_sock(sk);
+               return err;
+       }
+       case MCAST_MSFILTER:
+       {
+               struct group_filter gsf;
+               int err;
 
+               if (len < GROUP_FILTER_SIZE(0)) {
                        release_sock(sk);
+                       return -EINVAL;
+               }
+               if (copy_from_user(&gsf, optval, GROUP_FILTER_SIZE(0))) {
+                       release_sock(sk);
+                       return -EFAULT;
+               }
+               err = ip_mc_gsfget(sk, &gsf,
+                                  (struct group_filter __user *)optval, optlen);
+               release_sock(sk);
+               return err;
+       }
+       case IP_PKTOPTIONS:
+       {
+               struct msghdr msg;
+
+               release_sock(sk);
 
-                       if (sk->sk_type != SOCK_STREAM)
-                               return -ENOPROTOOPT;
+               if (sk->sk_type != SOCK_STREAM)
+                       return -ENOPROTOOPT;
 
-                       msg.msg_control = optval;
-                       msg.msg_controllen = len;
-                       msg.msg_flags = 0;
+               msg.msg_control = optval;
+               msg.msg_controllen = len;
+               msg.msg_flags = 0;
 
-                       if (inet->cmsg_flags & IP_CMSG_PKTINFO) {
-                               struct in_pktinfo info;
+               if (inet->cmsg_flags & IP_CMSG_PKTINFO) {
+                       struct in_pktinfo info;
 
-                               info.ipi_addr.s_addr = inet->rcv_saddr;
-                               info.ipi_spec_dst.s_addr = inet->rcv_saddr;
-                               info.ipi_ifindex = inet->mc_index;
-                               put_cmsg(&msg, SOL_IP, IP_PKTINFO, sizeof(info), &info);
-                       }
-                       if (inet->cmsg_flags & IP_CMSG_TTL) {
-                               int hlim = inet->mc_ttl;
-                               put_cmsg(&msg, SOL_IP, IP_TTL, sizeof(hlim), &hlim);
-                       }
-                       len -= msg.msg_controllen;
-                       return put_user(len, optlen);
+                       info.ipi_addr.s_addr = inet->rcv_saddr;
+                       info.ipi_spec_dst.s_addr = inet->rcv_saddr;
+                       info.ipi_ifindex = inet->mc_index;
+                       put_cmsg(&msg, SOL_IP, IP_PKTINFO, sizeof(info), &info);
                }
-               case IP_FREEBIND:
-                       val = inet->freebind;
-                       break;
-               default:
-                       release_sock(sk);
-                       return -ENOPROTOOPT;
+               if (inet->cmsg_flags & IP_CMSG_TTL) {
+                       int hlim = inet->mc_ttl;
+                       put_cmsg(&msg, SOL_IP, IP_TTL, sizeof(hlim), &hlim);
+               }
+               len -= msg.msg_controllen;
+               return put_user(len, optlen);
+       }
+       case IP_FREEBIND:
+               val = inet->freebind;
+               break;
+       default:
+               release_sock(sk);
+               return -ENOPROTOOPT;
        }
        release_sock(sk);
 
        if (len < sizeof(int) && len > 0 && val>=0 && val<255) {
                unsigned char ucval = (unsigned char)val;
                len = 1;
-               if(put_user(len, optlen))
+               if (put_user(len, optlen))
                        return -EFAULT;
-               if(copy_to_user(optval,&ucval,1))
+               if (copy_to_user(optval,&ucval,1))
                        return -EFAULT;
        } else {
                len = min_t(unsigned int, sizeof(int), len);
-               if(put_user(len, optlen))
+               if (put_user(len, optlen))
                        return -EFAULT;
-               if(copy_to_user(optval,&val,len))
+               if (copy_to_user(optval,&val,len))
                        return -EFAULT;
        }
        return 0;
 }
 
 int ip_getsockopt(struct sock *sk, int level,
-               int optname, char __user *optval, int __user *optlen)
+                 int optname, char __user *optval, int __user *optlen)
 {
        int err;
 
@@ -1169,7 +1174,7 @@ int ip_getsockopt(struct sock *sk, int level,
           ) {
                int len;
 
-               if(get_user(len,optlen))
+               if (get_user(len,optlen))
                        return -EFAULT;
 
                lock_sock(sk);
index aa704b8..ab86137 100644 (file)
@@ -43,21 +43,15 @@ static LIST_HEAD(ipcomp_tfms_list);
 
 static int ipcomp_decompress(struct xfrm_state *x, struct sk_buff *skb)
 {
-       int err, plen, dlen;
        struct ipcomp_data *ipcd = x->data;
-       u8 *start, *scratch;
-       struct crypto_comp *tfm;
-       int cpu;
-
-       plen = skb->len;
-       dlen = IPCOMP_SCRATCH_SIZE;
-       start = skb->data;
+       const int plen = skb->len;
+       int dlen = IPCOMP_SCRATCH_SIZE;
+       const u8 *start = skb->data;
+       const int cpu = get_cpu();
+       u8 *scratch = *per_cpu_ptr(ipcomp_scratches, cpu);
+       struct crypto_comp *tfm = *per_cpu_ptr(ipcd->tfms, cpu);
+       int err = crypto_comp_decompress(tfm, start, plen, scratch, &dlen);
 
-       cpu = get_cpu();
-       scratch = *per_cpu_ptr(ipcomp_scratches, cpu);
-       tfm = *per_cpu_ptr(ipcd->tfms, cpu);
-
-       err = crypto_comp_decompress(tfm, start, plen, scratch, &dlen);
        if (err)
                goto out;
 
@@ -72,7 +66,7 @@ static int ipcomp_decompress(struct xfrm_state *x, struct sk_buff *skb)
 
        skb->truesize += dlen - plen;
        __skb_put(skb, dlen - plen);
-       memcpy(skb->data, scratch, dlen);
+       skb_copy_to_linear_data(skb, scratch, dlen);
 out:
        put_cpu();
        return err;
@@ -90,10 +84,10 @@ static int ipcomp_input(struct xfrm_state *x, struct sk_buff *skb)
        skb->ip_summed = CHECKSUM_NONE;
 
        /* Remove ipcomp header and decompress original payload */
-       iph = skb->nh.iph;
+       iph = ip_hdr(skb);
        ipch = (void *)skb->data;
        iph->protocol = ipch->nexthdr;
-       skb->h.raw = skb->nh.raw + sizeof(*ipch);
+       skb->transport_header = skb->network_header + sizeof(*ipch);
        __skb_pull(skb, sizeof(*ipch));
        err = ipcomp_decompress(x, skb);
 
@@ -103,23 +97,16 @@ out:
 
 static int ipcomp_compress(struct xfrm_state *x, struct sk_buff *skb)
 {
-       int err, plen, dlen, ihlen;
-       struct iphdr *iph = skb->nh.iph;
        struct ipcomp_data *ipcd = x->data;
-       u8 *start, *scratch;
-       struct crypto_comp *tfm;
-       int cpu;
+       const int ihlen = ip_hdrlen(skb);
+       const int plen = skb->len - ihlen;
+       int dlen = IPCOMP_SCRATCH_SIZE;
+       u8 *start = skb->data + ihlen;
+       const int cpu = get_cpu();
+       u8 *scratch = *per_cpu_ptr(ipcomp_scratches, cpu);
+       struct crypto_comp *tfm = *per_cpu_ptr(ipcd->tfms, cpu);
+       int err = crypto_comp_compress(tfm, start, plen, scratch, &dlen);
 
-       ihlen = iph->ihl * 4;
-       plen = skb->len - ihlen;
-       dlen = IPCOMP_SCRATCH_SIZE;
-       start = skb->data + ihlen;
-
-       cpu = get_cpu();
-       scratch = *per_cpu_ptr(ipcomp_scratches, cpu);
-       tfm = *per_cpu_ptr(ipcd->tfms, cpu);
-
-       err = crypto_comp_compress(tfm, start, plen, scratch, &dlen);
        if (err)
                goto out;
 
@@ -142,12 +129,11 @@ out:
 static int ipcomp_output(struct xfrm_state *x, struct sk_buff *skb)
 {
        int err;
-       struct iphdr *iph;
        struct ip_comp_hdr *ipch;
        struct ipcomp_data *ipcd = x->data;
        int hdr_len = 0;
+       struct iphdr *iph = ip_hdr(skb);
 
-       iph = skb->nh.iph;
        iph->tot_len = htons(skb->len);
        hdr_len = iph->ihl * 4;
        if ((skb->len - hdr_len) < ipcd->threshold) {
@@ -159,7 +145,7 @@ static int ipcomp_output(struct xfrm_state *x, struct sk_buff *skb)
                goto out_ok;
 
        err = ipcomp_compress(x, skb);
-       iph = skb->nh.iph;
+       iph = ip_hdr(skb);
 
        if (err) {
                goto out_ok;
@@ -188,8 +174,8 @@ static void ipcomp4_err(struct sk_buff *skb, u32 info)
        struct ip_comp_hdr *ipch = (struct ip_comp_hdr *)(skb->data+(iph->ihl<<2));
        struct xfrm_state *x;
 
-       if (skb->h.icmph->type != ICMP_DEST_UNREACH ||
-           skb->h.icmph->code != ICMP_FRAG_NEEDED)
+       if (icmp_hdr(skb)->type != ICMP_DEST_UNREACH ||
+           icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
                return;
 
        spi = htonl(ntohs(ipch->cpi));
index cf49de1..597c800 100644 (file)
@@ -432,7 +432,7 @@ ic_rarp_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
                goto drop;
 
        /* Basic sanity checks can be done without the lock.  */
-       rarp = (struct arphdr *)skb->h.raw;
+       rarp = (struct arphdr *)skb_transport_header(skb);
 
        /* If this test doesn't pass, it's not IP, or we should
         * ignore it anyway.
@@ -455,7 +455,7 @@ ic_rarp_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
                goto drop;
 
        /* OK, it is all there and looks valid, process... */
-       rarp = (struct arphdr *)skb->h.raw;
+       rarp = (struct arphdr *)skb_transport_header(skb);
        rarp_ptr = (unsigned char *) (rarp + 1);
 
        /* One reply at a time, please. */
@@ -702,7 +702,8 @@ static void __init ic_bootp_send_if(struct ic_device *d, unsigned long jiffies_d
        memset(b, 0, sizeof(struct bootp_pkt));
 
        /* Construct IP header */
-       skb->nh.iph = h = &b->iph;
+       skb_reset_network_header(skb);
+       h = ip_hdr(skb);
        h->version = 4;
        h->ihl = 5;
        h->tot_len = htons(sizeof(struct bootp_pkt));
@@ -782,7 +783,7 @@ static void __init ic_do_bootp_ext(u8 *ext)
        u8 *c;
 
        printk("DHCP/BOOTP: Got extension %d:",*ext);
-       for(c=ext+2; c<ext+2+ext[1]; c++)
+       for (c=ext+2; c<ext+2+ext[1]; c++)
                printk(" %02x", *c);
        printk("\n");
 #endif
@@ -845,7 +846,7 @@ static int __init ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, str
                           sizeof(struct udphdr)))
                goto drop;
 
-       b = (struct bootp_pkt *) skb->nh.iph;
+       b = (struct bootp_pkt *)skb_network_header(skb);
        h = &b->iph;
 
        if (h->ihl != 5 || h->version != 4 || h->protocol != IPPROTO_UDP)
@@ -883,7 +884,7 @@ static int __init ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, str
        if (!pskb_may_pull(skb, skb->len))
                goto drop;
 
-       b = (struct bootp_pkt *) skb->nh.iph;
+       b = (struct bootp_pkt *)skb_network_header(skb);
        h = &b->iph;
 
        /* One reply at a time, please. */
@@ -938,7 +939,7 @@ static int __init ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, str
                                        if (opt[1] >= 4)
                                                memcpy(&server_id, opt + 2, 4);
                                        break;
-                               };
+                               }
                        }
 
 #ifdef IPCONFIG_DEBUG
@@ -983,7 +984,7 @@ static int __init ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, str
                                ic_myaddr = NONE;
                                ic_servaddr = NONE;
                                goto drop_unlock;
-                       };
+                       }
 
                        ic_dhcp_msgtype = mt;
 
@@ -1094,7 +1095,7 @@ static int __init ic_dynamic(void)
        retries = CONF_SEND_RETRIES;
        get_random_bytes(&timeout, sizeof(timeout));
        timeout = CONF_BASE_TIMEOUT + (timeout % (unsigned) CONF_TIMEOUT_RANDOM);
-       for(;;) {
+       for (;;) {
 #ifdef IPCONFIG_BOOTP
                if (do_bootp && (d->able & IC_BOOTP))
                        ic_bootp_send_if(d, jiffies - start_jiffies);
index 3ec5ce0..ebd2f2d 100644 (file)
@@ -157,10 +157,10 @@ static struct ip_tunnel * ipip_tunnel_lookup(__be32 remote, __be32 local)
        return NULL;
 }
 
-static struct ip_tunnel **ipip_bucket(struct ip_tunnel *t)
+static struct ip_tunnel **__ipip_bucket(struct ip_tunnel_parm *parms)
 {
-       __be32 remote = t->parms.iph.daddr;
-       __be32 local = t->parms.iph.saddr;
+       __be32 remote = parms->iph.daddr;
+       __be32 local = parms->iph.saddr;
        unsigned h = 0;
        int prio = 0;
 
@@ -175,6 +175,10 @@ static struct ip_tunnel **ipip_bucket(struct ip_tunnel *t)
        return &tunnels[prio][h];
 }
 
+static inline struct ip_tunnel **ipip_bucket(struct ip_tunnel *t)
+{
+       return __ipip_bucket(&t->parms);
+}
 
 static void ipip_tunnel_unlink(struct ip_tunnel *t)
 {
@@ -206,19 +210,9 @@ static struct ip_tunnel * ipip_tunnel_locate(struct ip_tunnel_parm *parms, int c
        __be32 local = parms->iph.saddr;
        struct ip_tunnel *t, **tp, *nt;
        struct net_device *dev;
-       unsigned h = 0;
-       int prio = 0;
        char name[IFNAMSIZ];
 
-       if (remote) {
-               prio |= 2;
-               h ^= HASH(remote);
-       }
-       if (local) {
-               prio |= 1;
-               h ^= HASH(local);
-       }
-       for (tp = &tunnels[prio][h]; (t = *tp) != NULL; tp = &t->next) {
+       for (tp = __ipip_bucket(parms); (t = *tp) != NULL; tp = &t->next) {
                if (local == t->parms.iph.saddr && remote == t->parms.iph.daddr)
                        return t;
        }
@@ -280,8 +274,8 @@ static int ipip_err(struct sk_buff *skb, u32 info)
    ICMP in the real Internet is absolutely infeasible.
  */
        struct iphdr *iph = (struct iphdr*)skb->data;
-       int type = skb->h.icmph->type;
-       int code = skb->h.icmph->code;
+       const int type = icmp_hdr(skb)->type;
+       const int code = icmp_hdr(skb)->code;
        struct ip_tunnel *t;
        int err;
 
@@ -336,8 +330,8 @@ out:
        struct iphdr *iph = (struct iphdr*)dp;
        int hlen = iph->ihl<<2;
        struct iphdr *eiph;
-       int type = skb->h.icmph->type;
-       int code = skb->h.icmph->code;
+       const int type = icmp_hdr(skb)->type;
+       const int code = icmp_hdr(skb)->code;
        int rel_type = 0;
        int rel_code = 0;
        __be32 rel_info = 0;
@@ -354,7 +348,7 @@ out:
        default:
                return 0;
        case ICMP_PARAMETERPROB:
-               n = ntohl(skb->h.icmph->un.gateway) >> 24;
+               n = ntohl(icmp_hdr(skb)->un.gateway) >> 24;
                if (n < hlen)
                        return 0;
 
@@ -373,7 +367,7 @@ out:
                        return 0;
                case ICMP_FRAG_NEEDED:
                        /* And it is the only really necessary thing :-) */
-                       n = ntohs(skb->h.icmph->un.frag.mtu);
+                       n = ntohs(icmp_hdr(skb)->un.frag.mtu);
                        if (n < hlen+68)
                                return 0;
                        n -= hlen;
@@ -405,7 +399,7 @@ out:
        dst_release(skb2->dst);
        skb2->dst = NULL;
        skb_pull(skb2, skb->data - (u8*)eiph);
-       skb2->nh.raw = skb2->data;
+       skb_reset_network_header(skb2);
 
        /* Try to guess incoming interface */
        memset(&fl, 0, sizeof(fl));
@@ -461,9 +455,10 @@ out:
 #endif
 }
 
-static inline void ipip_ecn_decapsulate(struct iphdr *outer_iph, struct sk_buff *skb)
+static inline void ipip_ecn_decapsulate(const struct iphdr *outer_iph,
+                                       struct sk_buff *skb)
 {
-       struct iphdr *inner_iph = skb->nh.iph;
+       struct iphdr *inner_iph = ip_hdr(skb);
 
        if (INET_ECN_is_ce(outer_iph->tos))
                IP_ECN_set_ce(inner_iph);
@@ -471,10 +466,8 @@ static inline void ipip_ecn_decapsulate(struct iphdr *outer_iph, struct sk_buff
 
 static int ipip_rcv(struct sk_buff *skb)
 {
-       struct iphdr *iph;
        struct ip_tunnel *tunnel;
-
-       iph = skb->nh.iph;
+       const struct iphdr *iph = ip_hdr(skb);
 
        read_lock(&ipip_lock);
        if ((tunnel = ipip_tunnel_lookup(iph->saddr, iph->daddr)) != NULL) {
@@ -486,8 +479,8 @@ static int ipip_rcv(struct sk_buff *skb)
 
                secpath_reset(skb);
 
-               skb->mac.raw = skb->nh.raw;
-               skb->nh.raw = skb->data;
+               skb->mac_header = skb->network_header;
+               skb_reset_network_header(skb);
                skb->protocol = htons(ETH_P_IP);
                skb->pkt_type = PACKET_HOST;
 
@@ -521,7 +514,7 @@ static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
        __be16 df = tiph->frag_off;
        struct rtable *rt;                      /* Route to the other host */
        struct net_device *tdev;                        /* Device to other host */
-       struct iphdr  *old_iph = skb->nh.iph;
+       struct iphdr  *old_iph = ip_hdr(skb);
        struct iphdr  *iph;                     /* Our new IP header */
        int    max_headroom;                    /* The extra header space needed */
        __be32 dst = tiph->daddr;
@@ -615,11 +608,12 @@ static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
                        skb_set_owner_w(new_skb, skb->sk);
                dev_kfree_skb(skb);
                skb = new_skb;
-               old_iph = skb->nh.iph;
+               old_iph = ip_hdr(skb);
        }
 
-       skb->h.raw = skb->nh.raw;
-       skb->nh.raw = skb_push(skb, sizeof(struct iphdr));
+       skb->transport_header = skb->network_header;
+       skb_push(skb, sizeof(struct iphdr));
+       skb_reset_network_header(skb);
        memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
        IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
                              IPSKB_REROUTED);
@@ -630,7 +624,7 @@ static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
         *      Push down and install the IPIP header.
         */
 
-       iph                     =       skb->nh.iph;
+       iph                     =       ip_hdr(skb);
        iph->version            =       4;
        iph->ihl                =       sizeof(struct iphdr)>>2;
        iph->frag_off           =       df;
index 601e3df..0ebae41 100644 (file)
@@ -62,6 +62,7 @@
 #include <linux/netfilter_ipv4.h>
 #include <net/ipip.h>
 #include <net/checksum.h>
+#include <net/netlink.h>
 
 #if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
 #define CONFIG_IP_PIMSM        1
@@ -302,8 +303,8 @@ static void ipmr_destroy_unres(struct mfc_cache *c)
 
        atomic_dec(&cache_resolve_queue_len);
 
-       while((skb=skb_dequeue(&c->mfc_un.unres.unresolved))) {
-               if (skb->nh.iph->version == 0) {
+       while ((skb=skb_dequeue(&c->mfc_un.unres.unresolved))) {
+               if (ip_hdr(skb)->version == 0) {
                        struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
                        nlh->nlmsg_type = NLMSG_ERROR;
                        nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
@@ -479,7 +480,7 @@ static struct mfc_cache *ipmr_cache_find(__be32 origin, __be32 mcastgrp)
 static struct mfc_cache *ipmr_cache_alloc(void)
 {
        struct mfc_cache *c=kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
-       if(c==NULL)
+       if (c==NULL)
                return NULL;
        c->mfc_un.res.minvif = MAXVIFS;
        return c;
@@ -488,7 +489,7 @@ static struct mfc_cache *ipmr_cache_alloc(void)
 static struct mfc_cache *ipmr_cache_alloc_unres(void)
 {
        struct mfc_cache *c=kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
-       if(c==NULL)
+       if (c==NULL)
                return NULL;
        skb_queue_head_init(&c->mfc_un.unres.unresolved);
        c->mfc_un.unres.expires = jiffies + 10*HZ;
@@ -508,12 +509,13 @@ static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c)
         *      Play the pending entries through our router
         */
 
-       while((skb=__skb_dequeue(&uc->mfc_un.unres.unresolved))) {
-               if (skb->nh.iph->version == 0) {
+       while ((skb=__skb_dequeue(&uc->mfc_un.unres.unresolved))) {
+               if (ip_hdr(skb)->version == 0) {
                        struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
 
                        if (ipmr_fill_mroute(skb, c, NLMSG_DATA(nlh)) > 0) {
-                               nlh->nlmsg_len = skb->tail - (u8*)nlh;
+                               nlh->nlmsg_len = (skb_tail_pointer(skb) -
+                                                 (u8 *)nlh);
                        } else {
                                nlh->nlmsg_type = NLMSG_ERROR;
                                nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
@@ -539,7 +541,7 @@ static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c)
 static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert)
 {
        struct sk_buff *skb;
-       int ihl = pkt->nh.iph->ihl<<2;
+       const int ihl = ip_hdrlen(pkt);
        struct igmphdr *igmp;
        struct igmpmsg *msg;
        int ret;
@@ -551,7 +553,7 @@ static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert)
 #endif
                skb = alloc_skb(128, GFP_ATOMIC);
 
-       if(!skb)
+       if (!skb)
                return -ENOBUFS;
 
 #ifdef CONFIG_IP_PIMSM
@@ -561,14 +563,17 @@ static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert)
                   And all this only to mangle msg->im_msgtype and
                   to set msg->im_mbz to "mbz" :-)
                 */
-               msg = (struct igmpmsg*)skb_push(skb, sizeof(struct iphdr));
-               skb->nh.raw = skb->h.raw = (u8*)msg;
-               memcpy(msg, pkt->nh.raw, sizeof(struct iphdr));
+               skb_push(skb, sizeof(struct iphdr));
+               skb_reset_network_header(skb);
+               skb_reset_transport_header(skb);
+               msg = (struct igmpmsg *)skb_network_header(skb);
+               memcpy(msg, skb_network_header(pkt), sizeof(struct iphdr));
                msg->im_msgtype = IGMPMSG_WHOLEPKT;
                msg->im_mbz = 0;
                msg->im_vif = reg_vif_num;
-               skb->nh.iph->ihl = sizeof(struct iphdr) >> 2;
-               skb->nh.iph->tot_len = htons(ntohs(pkt->nh.iph->tot_len) + sizeof(struct iphdr));
+               ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2;
+               ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) +
+                                            sizeof(struct iphdr));
        } else
 #endif
        {
@@ -577,10 +582,11 @@ static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert)
         *      Copy the IP header
         */
 
-       skb->nh.iph = (struct iphdr *)skb_put(skb, ihl);
-       memcpy(skb->data,pkt->data,ihl);
-       skb->nh.iph->protocol = 0;                      /* Flag to the kernel this is a route add */
-       msg = (struct igmpmsg*)skb->nh.iph;
+       skb->network_header = skb->tail;
+       skb_put(skb, ihl);
+       skb_copy_to_linear_data(skb, pkt->data, ihl);
+       ip_hdr(skb)->protocol = 0;                      /* Flag to the kernel this is a route add */
+       msg = (struct igmpmsg *)skb_network_header(skb);
        msg->im_vif = vifi;
        skb->dst = dst_clone(pkt->dst);
 
@@ -592,8 +598,8 @@ static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert)
        igmp->type      =
        msg->im_msgtype = assert;
        igmp->code      =       0;
-       skb->nh.iph->tot_len=htons(skb->len);                   /* Fix the length */
-       skb->h.raw = skb->nh.raw;
+       ip_hdr(skb)->tot_len = htons(skb->len);                 /* Fix the length */
+       skb->transport_header = skb->network_header;
        }
 
        if (mroute_socket == NULL) {
@@ -622,11 +628,12 @@ ipmr_cache_unresolved(vifi_t vifi, struct sk_buff *skb)
 {
        int err;
        struct mfc_cache *c;
+       const struct iphdr *iph = ip_hdr(skb);
 
        spin_lock_bh(&mfc_unres_lock);
        for (c=mfc_unres_queue; c; c=c->next) {
-               if (c->mfc_mcastgrp == skb->nh.iph->daddr &&
-                   c->mfc_origin == skb->nh.iph->saddr)
+               if (c->mfc_mcastgrp == iph->daddr &&
+                   c->mfc_origin == iph->saddr)
                        break;
        }
 
@@ -646,9 +653,9 @@ ipmr_cache_unresolved(vifi_t vifi, struct sk_buff *skb)
                /*
                 *      Fill in the new cache entry
                 */
-               c->mfc_parent=-1;
-               c->mfc_origin=skb->nh.iph->saddr;
-               c->mfc_mcastgrp=skb->nh.iph->daddr;
+               c->mfc_parent   = -1;
+               c->mfc_origin   = iph->saddr;
+               c->mfc_mcastgrp = iph->daddr;
 
                /*
                 *      Reflect first query at mrouted.
@@ -734,7 +741,7 @@ static int ipmr_mfc_add(struct mfcctl *mfc, int mrtsock)
                return 0;
        }
 
-       if(!MULTICAST(mfc->mfcc_mcastgrp.s_addr))
+       if (!MULTICAST(mfc->mfcc_mcastgrp.s_addr))
                return -EINVAL;
 
        c=ipmr_cache_alloc();
@@ -788,7 +795,7 @@ static void mroute_clean_tables(struct sock *sk)
        /*
         *      Shut down all active vif entries
         */
-       for(i=0; i<maxvif; i++) {
+       for (i=0; i<maxvif; i++) {
                if (!(vif_table[i].flags&VIFF_STATIC))
                        vif_delete(i);
        }
@@ -858,119 +865,117 @@ int ip_mroute_setsockopt(struct sock *sk,int optname,char __user *optval,int opt
        struct vifctl vif;
        struct mfcctl mfc;
 
-       if(optname!=MRT_INIT)
-       {
-               if(sk!=mroute_socket && !capable(CAP_NET_ADMIN))
+       if (optname != MRT_INIT) {
+               if (sk != mroute_socket && !capable(CAP_NET_ADMIN))
                        return -EACCES;
        }
 
-       switch(optname)
-       {
-               case MRT_INIT:
-                       if (sk->sk_type != SOCK_RAW ||
-                           inet_sk(sk)->num != IPPROTO_IGMP)
-                               return -EOPNOTSUPP;
-                       if(optlen!=sizeof(int))
-                               return -ENOPROTOOPT;
-
-                       rtnl_lock();
-                       if (mroute_socket) {
-                               rtnl_unlock();
-                               return -EADDRINUSE;
-                       }
-
-                       ret = ip_ra_control(sk, 1, mrtsock_destruct);
-                       if (ret == 0) {
-                               write_lock_bh(&mrt_lock);
-                               mroute_socket=sk;
-                               write_unlock_bh(&mrt_lock);
+       switch (optname) {
+       case MRT_INIT:
+               if (sk->sk_type != SOCK_RAW ||
+                   inet_sk(sk)->num != IPPROTO_IGMP)
+                       return -EOPNOTSUPP;
+               if (optlen!=sizeof(int))
+                       return -ENOPROTOOPT;
 
-                               ipv4_devconf.mc_forwarding++;
-                       }
+               rtnl_lock();
+               if (mroute_socket) {
                        rtnl_unlock();
-                       return ret;
-               case MRT_DONE:
-                       if (sk!=mroute_socket)
-                               return -EACCES;
-                       return ip_ra_control(sk, 0, NULL);
-               case MRT_ADD_VIF:
-               case MRT_DEL_VIF:
-                       if(optlen!=sizeof(vif))
-                               return -EINVAL;
-                       if (copy_from_user(&vif,optval,sizeof(vif)))
-                               return -EFAULT;
-                       if(vif.vifc_vifi >= MAXVIFS)
-                               return -ENFILE;
-                       rtnl_lock();
-                       if (optname==MRT_ADD_VIF) {
-                               ret = vif_add(&vif, sk==mroute_socket);
-                       } else {
-                               ret = vif_delete(vif.vifc_vifi);
-                       }
-                       rtnl_unlock();
-                       return ret;
+                       return -EADDRINUSE;
+               }
+
+               ret = ip_ra_control(sk, 1, mrtsock_destruct);
+               if (ret == 0) {
+                       write_lock_bh(&mrt_lock);
+                       mroute_socket=sk;
+                       write_unlock_bh(&mrt_lock);
+
+                       ipv4_devconf.mc_forwarding++;
+               }
+               rtnl_unlock();
+               return ret;
+       case MRT_DONE:
+               if (sk!=mroute_socket)
+                       return -EACCES;
+               return ip_ra_control(sk, 0, NULL);
+       case MRT_ADD_VIF:
+       case MRT_DEL_VIF:
+               if (optlen!=sizeof(vif))
+                       return -EINVAL;
+               if (copy_from_user(&vif,optval,sizeof(vif)))
+                       return -EFAULT;
+               if (vif.vifc_vifi >= MAXVIFS)
+                       return -ENFILE;
+               rtnl_lock();
+               if (optname==MRT_ADD_VIF) {
+                       ret = vif_add(&vif, sk==mroute_socket);
+               } else {
+                       ret = vif_delete(vif.vifc_vifi);
+               }
+               rtnl_unlock();
+               return ret;
 
                /*
                 *      Manipulate the forwarding caches. These live
                 *      in a sort of kernel/user symbiosis.
                 */
-               case MRT_ADD_MFC:
-               case MRT_DEL_MFC:
-                       if(optlen!=sizeof(mfc))
-                               return -EINVAL;
-                       if (copy_from_user(&mfc,optval, sizeof(mfc)))
-                               return -EFAULT;
-                       rtnl_lock();
-                       if (optname==MRT_DEL_MFC)
-                               ret = ipmr_mfc_delete(&mfc);
-                       else
-                               ret = ipmr_mfc_add(&mfc, sk==mroute_socket);
-                       rtnl_unlock();
-                       return ret;
+       case MRT_ADD_MFC:
+       case MRT_DEL_MFC:
+               if (optlen!=sizeof(mfc))
+                       return -EINVAL;
+               if (copy_from_user(&mfc,optval, sizeof(mfc)))
+                       return -EFAULT;
+               rtnl_lock();
+               if (optname==MRT_DEL_MFC)
+                       ret = ipmr_mfc_delete(&mfc);
+               else
+                       ret = ipmr_mfc_add(&mfc, sk==mroute_socket);
+               rtnl_unlock();
+               return ret;
                /*
                 *      Control PIM assert.
                 */
-               case MRT_ASSERT:
-               {
-                       int v;
-                       if(get_user(v,(int __user *)optval))
-                               return -EFAULT;
-                       mroute_do_assert=(v)?1:0;
-                       return 0;
-               }
+       case MRT_ASSERT:
+       {
+               int v;
+               if (get_user(v,(int __user *)optval))
+                       return -EFAULT;
+               mroute_do_assert=(v)?1:0;
+               return 0;
+       }
 #ifdef CONFIG_IP_PIMSM
-               case MRT_PIM:
-               {
-                       int v, ret;
-                       if(get_user(v,(int __user *)optval))
-                               return -EFAULT;
-                       v = (v)?1:0;
-                       rtnl_lock();
-                       ret = 0;
-                       if (v != mroute_do_pim) {
-                               mroute_do_pim = v;
-                               mroute_do_assert = v;
+       case MRT_PIM:
+       {
+               int v, ret;
+               if (get_user(v,(int __user *)optval))
+                       return -EFAULT;
+               v = (v)?1:0;
+               rtnl_lock();
+               ret = 0;
+               if (v != mroute_do_pim) {
+                       mroute_do_pim = v;
+                       mroute_do_assert = v;
 #ifdef CONFIG_IP_PIMSM_V2
-                               if (mroute_do_pim)
-                                       ret = inet_add_protocol(&pim_protocol,
-                                                               IPPROTO_PIM);
-                               else
-                                       ret = inet_del_protocol(&pim_protocol,
-                                                               IPPROTO_PIM);
-                               if (ret < 0)
-                                       ret = -EAGAIN;
+                       if (mroute_do_pim)
+                               ret = inet_add_protocol(&pim_protocol,
+                                                       IPPROTO_PIM);
+                       else
+                               ret = inet_del_protocol(&pim_protocol,
+                                                       IPPROTO_PIM);
+                       if (ret < 0)
+                               ret = -EAGAIN;
 #endif
-                       }
-                       rtnl_unlock();
-                       return ret;
                }
+               rtnl_unlock();
+               return ret;
+       }
 #endif
-               /*
-                *      Spurious command, or MRT_VERSION which you cannot
-                *      set.
-                */
-               default:
-                       return -ENOPROTOOPT;
+       /*
+        *      Spurious command, or MRT_VERSION which you cannot
+        *      set.
+        */
+       default:
+               return -ENOPROTOOPT;
        }
 }
 
@@ -983,7 +988,7 @@ int ip_mroute_getsockopt(struct sock *sk,int optname,char __user *optval,int __u
        int olr;
        int val;
 
-       if(optname!=MRT_VERSION &&
+       if (optname!=MRT_VERSION &&
 #ifdef CONFIG_IP_PIMSM
           optname!=MRT_PIM &&
 #endif
@@ -997,17 +1002,17 @@ int ip_mroute_getsockopt(struct sock *sk,int optname,char __user *optval,int __u
        if (olr < 0)
                return -EINVAL;
 
-       if(put_user(olr,optlen))
+       if (put_user(olr,optlen))
                return -EFAULT;
-       if(optname==MRT_VERSION)
+       if (optname==MRT_VERSION)
                val=0x0305;
 #ifdef CONFIG_IP_PIMSM
-       else if(optname==MRT_PIM)
+       else if (optname==MRT_PIM)
                val=mroute_do_pim;
 #endif
        else
                val=mroute_do_assert;
-       if(copy_to_user(optval,&val,olr))
+       if (copy_to_user(optval,&val,olr))
                return -EFAULT;
        return 0;
 }
@@ -1023,48 +1028,47 @@ int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
        struct vif_device *vif;
        struct mfc_cache *c;
 
-       switch(cmd)
-       {
-               case SIOCGETVIFCNT:
-                       if (copy_from_user(&vr,arg,sizeof(vr)))
-                               return -EFAULT;
-                       if(vr.vifi>=maxvif)
-                               return -EINVAL;
-                       read_lock(&mrt_lock);
-                       vif=&vif_table[vr.vifi];
-                       if(VIF_EXISTS(vr.vifi)) {
-                               vr.icount=vif->pkt_in;
-                               vr.ocount=vif->pkt_out;
-                               vr.ibytes=vif->bytes_in;
-                               vr.obytes=vif->bytes_out;
-                               read_unlock(&mrt_lock);
-
-                               if (copy_to_user(arg,&vr,sizeof(vr)))
-                                       return -EFAULT;
-                               return 0;
-                       }
+       switch (cmd) {
+       case SIOCGETVIFCNT:
+               if (copy_from_user(&vr,arg,sizeof(vr)))
+                       return -EFAULT;
+               if (vr.vifi>=maxvif)
+                       return -EINVAL;
+               read_lock(&mrt_lock);
+               vif=&vif_table[vr.vifi];
+               if (VIF_EXISTS(vr.vifi))        {
+                       vr.icount=vif->pkt_in;
+                       vr.ocount=vif->pkt_out;
+                       vr.ibytes=vif->bytes_in;
+                       vr.obytes=vif->bytes_out;
                        read_unlock(&mrt_lock);
-                       return -EADDRNOTAVAIL;
-               case SIOCGETSGCNT:
-                       if (copy_from_user(&sr,arg,sizeof(sr)))
-                               return -EFAULT;
 
-                       read_lock(&mrt_lock);
-                       c = ipmr_cache_find(sr.src.s_addr, sr.grp.s_addr);
-                       if (c) {
-                               sr.pktcnt = c->mfc_un.res.pkt;
-                               sr.bytecnt = c->mfc_un.res.bytes;
-                               sr.wrong_if = c->mfc_un.res.wrong_if;
-                               read_unlock(&mrt_lock);
-
-                               if (copy_to_user(arg,&sr,sizeof(sr)))
-                                       return -EFAULT;
-                               return 0;
-                       }
+                       if (copy_to_user(arg,&vr,sizeof(vr)))
+                               return -EFAULT;
+                       return 0;
+               }
+               read_unlock(&mrt_lock);
+               return -EADDRNOTAVAIL;
+       case SIOCGETSGCNT:
+               if (copy_from_user(&sr,arg,sizeof(sr)))
+                       return -EFAULT;
+
+               read_lock(&mrt_lock);
+               c = ipmr_cache_find(sr.src.s_addr, sr.grp.s_addr);
+               if (c) {
+                       sr.pktcnt = c->mfc_un.res.pkt;
+                       sr.bytecnt = c->mfc_un.res.bytes;
+                       sr.wrong_if = c->mfc_un.res.wrong_if;
                        read_unlock(&mrt_lock);
-                       return -EADDRNOTAVAIL;
-               default:
-                       return -ENOIOCTLCMD;
+
+                       if (copy_to_user(arg,&sr,sizeof(sr)))
+                               return -EFAULT;
+                       return 0;
+               }
+               read_unlock(&mrt_lock);
+               return -EADDRNOTAVAIL;
+       default:
+               return -ENOIOCTLCMD;
        }
 }
 
@@ -1076,7 +1080,7 @@ static int ipmr_device_event(struct notifier_block *this, unsigned long event, v
        if (event != NETDEV_UNREGISTER)
                return NOTIFY_DONE;
        v=&vif_table[0];
-       for(ct=0;ct<maxvif;ct++,v++) {
+       for (ct=0;ct<maxvif;ct++,v++) {
                if (v->dev==ptr)
                        vif_delete(ct);
        }
@@ -1096,11 +1100,17 @@ static struct notifier_block ip_mr_notifier={
 
 static void ip_encap(struct sk_buff *skb, __be32 saddr, __be32 daddr)
 {
-       struct iphdr *iph = (struct iphdr *)skb_push(skb,sizeof(struct iphdr));
+       struct iphdr *iph;
+       struct iphdr *old_iph = ip_hdr(skb);
+
+       skb_push(skb, sizeof(struct iphdr));
+       skb->transport_header = skb->network_header;
+       skb_reset_network_header(skb);
+       iph = ip_hdr(skb);
 
        iph->version    =       4;
-       iph->tos        =       skb->nh.iph->tos;
-       iph->ttl        =       skb->nh.iph->ttl;
+       iph->tos        =       old_iph->tos;
+       iph->ttl        =       old_iph->ttl;
        iph->frag_off   =       0;
        iph->daddr      =       daddr;
        iph->saddr      =       saddr;
@@ -1110,8 +1120,6 @@ static void ip_encap(struct sk_buff *skb, __be32 saddr, __be32 daddr)
        ip_select_ident(iph, skb->dst, NULL);
        ip_send_check(iph);
 
-       skb->h.ipiph = skb->nh.iph;
-       skb->nh.iph = iph;
        memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
        nf_reset(skb);
 }
@@ -1134,7 +1142,7 @@ static inline int ipmr_forward_finish(struct sk_buff *skb)
 
 static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi)
 {
-       struct iphdr *iph = skb->nh.iph;
+       const struct iphdr *iph = ip_hdr(skb);
        struct vif_device *vif = &vif_table[vifi];
        struct net_device *dev;
        struct rtable *rt;
@@ -1200,8 +1208,7 @@ static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi)
 
        dst_release(skb->dst);
        skb->dst = &rt->u.dst;
-       iph = skb->nh.iph;
-       ip_decrease_ttl(iph);
+       ip_decrease_ttl(ip_hdr(skb));
 
        /* FIXME: forward and output firewalls used to be called here.
         * What do we do with netfilter? -- RR */
@@ -1301,7 +1308,7 @@ static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local
         *      Forward the frame
         */
        for (ct = cache->mfc_un.res.maxvif-1; ct >= cache->mfc_un.res.minvif; ct--) {
-               if (skb->nh.iph->ttl > cache->mfc_un.res.ttls[ct]) {
+               if (ip_hdr(skb)->ttl > cache->mfc_un.res.ttls[ct]) {
                        if (psend != -1) {
                                struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
                                if (skb2)
@@ -1347,7 +1354,7 @@ int ip_mr_input(struct sk_buff *skb)
                    if (IPCB(skb)->opt.router_alert) {
                            if (ip_call_ra_chain(skb))
                                    return 0;
-                   } else if (skb->nh.iph->protocol == IPPROTO_IGMP){
+                   } else if (ip_hdr(skb)->protocol == IPPROTO_IGMP){
                            /* IGMPv1 (and broken IGMPv2 implementations sort of
                               Cisco IOS <= 11.2(8)) do not put router alert
                               option to IGMP packets destined to routable
@@ -1366,7 +1373,7 @@ int ip_mr_input(struct sk_buff *skb)
        }
 
        read_lock(&mrt_lock);
-       cache = ipmr_cache_find(skb->nh.iph->saddr, skb->nh.iph->daddr);
+       cache = ipmr_cache_find(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
 
        /*
         *      No usable cache entry
@@ -1426,14 +1433,15 @@ int pim_rcv_v1(struct sk_buff * skb)
        if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap)))
                goto drop;
 
-       pim = (struct igmphdr*)skb->h.raw;
+       pim = igmp_hdr(skb);
 
        if (!mroute_do_pim ||
            skb->len < sizeof(*pim) + sizeof(*encap) ||
            pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER)
                goto drop;
 
-       encap = (struct iphdr*)(skb->h.raw + sizeof(struct igmphdr));
+       encap = (struct iphdr *)(skb_transport_header(skb) +
+                                sizeof(struct igmphdr));
        /*
           Check that:
           a. packet is really destinted to a multicast group
@@ -1455,9 +1463,9 @@ int pim_rcv_v1(struct sk_buff * skb)
        if (reg_dev == NULL)
                goto drop;
 
-       skb->mac.raw = skb->nh.raw;
+       skb->mac_header = skb->network_header;
        skb_pull(skb, (u8*)encap - skb->data);
-       skb->nh.iph = (struct iphdr *)skb->data;
+       skb_reset_network_header(skb);
        skb->dev = reg_dev;
        skb->protocol = htons(ETH_P_IP);
        skb->ip_summed = 0;
@@ -1486,7 +1494,7 @@ static int pim_rcv(struct sk_buff * skb)
        if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap)))
                goto drop;
 
-       pim = (struct pimreghdr*)skb->h.raw;
+       pim = (struct pimreghdr *)skb_transport_header(skb);
        if (pim->type != ((PIM_VERSION<<4)|(PIM_REGISTER)) ||
            (pim->flags&PIM_NULL_REGISTER) ||
            (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 &&
@@ -1494,7 +1502,8 @@ static int pim_rcv(struct sk_buff * skb)
                goto drop;
 
        /* check if the inner packet is destined to mcast group */
-       encap = (struct iphdr*)(skb->h.raw + sizeof(struct pimreghdr));
+       encap = (struct iphdr *)(skb_transport_header(skb) +
+                                sizeof(struct pimreghdr));
        if (!MULTICAST(encap->daddr) ||
            encap->tot_len == 0 ||
            ntohs(encap->tot_len) + sizeof(*pim) > skb->len)
@@ -1510,9 +1519,9 @@ static int pim_rcv(struct sk_buff * skb)
        if (reg_dev == NULL)
                goto drop;
 
-       skb->mac.raw = skb->nh.raw;
+       skb->mac_header = skb->network_header;
        skb_pull(skb, (u8*)encap - skb->data);
-       skb->nh.iph = (struct iphdr *)skb->data;
+       skb_reset_network_header(skb);
        skb->dev = reg_dev;
        skb->protocol = htons(ETH_P_IP);
        skb->ip_summed = 0;
@@ -1537,7 +1546,7 @@ ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm)
        int ct;
        struct rtnexthop *nhp;
        struct net_device *dev = vif_table[c->mfc_parent].dev;
-       u8 *b = skb->tail;
+       u8 *b = skb_tail_pointer(skb);
        struct rtattr *mp_head;
 
        if (dev)
@@ -1557,12 +1566,12 @@ ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm)
                }
        }
        mp_head->rta_type = RTA_MULTIPATH;
-       mp_head->rta_len = skb->tail - (u8*)mp_head;
+       mp_head->rta_len = skb_tail_pointer(skb) - (u8 *)mp_head;
        rtm->rtm_type = RTN_MULTICAST;
        return 1;
 
 rtattr_failure:
-       skb_trim(skb, b - skb->data);
+       nlmsg_trim(skb, b);
        return -EMSGSIZE;
 }
 
@@ -1577,6 +1586,7 @@ int ipmr_get_route(struct sk_buff *skb, struct rtmsg *rtm, int nowait)
 
        if (cache==NULL) {
                struct sk_buff *skb2;
+               struct iphdr *iph;
                struct net_device *dev;
                int vif;
 
@@ -1596,11 +1606,13 @@ int ipmr_get_route(struct sk_buff *skb, struct rtmsg *rtm, int nowait)
                        return -ENOMEM;
                }
 
-               skb2->nh.raw = skb_push(skb2, sizeof(struct iphdr));
-               skb2->nh.iph->ihl = sizeof(struct iphdr)>>2;
-               skb2->nh.iph->saddr = rt->rt_src;
-               skb2->nh.iph->daddr = rt->rt_dst;
-               skb2->nh.iph->version = 0;
+               skb_push(skb2, sizeof(struct iphdr));
+               skb_reset_network_header(skb2);
+               iph = ip_hdr(skb2);
+               iph->ihl = sizeof(struct iphdr) >> 2;
+               iph->saddr = rt->rt_src;
+               iph->daddr = rt->rt_dst;
+               iph->version = 0;
                err = ipmr_cache_unresolved(vif, skb2);
                read_unlock(&mrt_lock);
                return err;
@@ -1625,7 +1637,7 @@ static struct vif_device *ipmr_vif_seq_idx(struct ipmr_vif_iter *iter,
                                           loff_t pos)
 {
        for (iter->ct = 0; iter->ct < maxvif; ++iter->ct) {
-               if(!VIF_EXISTS(iter->ct))
+               if (!VIF_EXISTS(iter->ct))
                        continue;
                if (pos-- == 0)
                        return &vif_table[iter->ct];
@@ -1649,7 +1661,7 @@ static void *ipmr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
                return ipmr_vif_seq_idx(iter, 0);
 
        while (++iter->ct < maxvif) {
-               if(!VIF_EXISTS(iter->ct))
+               if (!VIF_EXISTS(iter->ct))
                        continue;
                return &vif_table[iter->ct];
        }
@@ -1680,7 +1692,7 @@ static int ipmr_vif_seq_show(struct seq_file *seq, void *v)
        return 0;
 }
 
-static struct seq_operations ipmr_vif_seq_ops = {
+static const struct seq_operations ipmr_vif_seq_ops = {
        .start = ipmr_vif_seq_start,
        .next  = ipmr_vif_seq_next,
        .stop  = ipmr_vif_seq_stop,
@@ -1732,14 +1744,14 @@ static struct mfc_cache *ipmr_mfc_seq_idx(struct ipmr_mfc_iter *it, loff_t pos)
        it->cache = mfc_cache_array;
        read_lock(&mrt_lock);
        for (it->ct = 0; it->ct < MFC_LINES; it->ct++)
-               for(mfc = mfc_cache_array[it->ct]; mfc; mfc = mfc->next)
+               for (mfc = mfc_cache_array[it->ct]; mfc; mfc = mfc->next)
                        if (pos-- == 0)
                                return mfc;
        read_unlock(&mrt_lock);
 
        it->cache = &mfc_unres_queue;
        spin_lock_bh(&mfc_unres_lock);
-       for(mfc = mfc_unres_queue; mfc; mfc = mfc->next)
+       for (mfc = mfc_unres_queue; mfc; mfc = mfc->next)
                if (pos-- == 0)
                        return mfc;
        spin_unlock_bh(&mfc_unres_lock);
@@ -1829,9 +1841,9 @@ static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
                           mfc->mfc_un.res.wrong_if);
 
                if (it->cache != &mfc_unres_queue) {
-                       for(n = mfc->mfc_un.res.minvif;
-                           n < mfc->mfc_un.res.maxvif; n++ ) {
-                               if(VIF_EXISTS(n)
+                       for (n = mfc->mfc_un.res.minvif;
+                            n < mfc->mfc_un.res.maxvif; n++ ) {
+                               if (VIF_EXISTS(n)
                                   && mfc->mfc_un.res.ttls[n] < 255)
                                seq_printf(seq,
                                           " %2d:%-3d",
@@ -1843,7 +1855,7 @@ static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
        return 0;
 }
 
-static struct seq_operations ipmr_mfc_seq_ops = {
+static const struct seq_operations ipmr_mfc_seq_ops = {
        .start = ipmr_mfc_seq_start,
        .next  = ipmr_mfc_seq_next,
        .stop  = ipmr_mfc_seq_stop,
index 22e104c..15ad5dd 100644 (file)
@@ -331,14 +331,14 @@ static inline int app_tcp_pkt_out(struct ip_vs_conn *cp, struct sk_buff **pskb,
                                  struct ip_vs_app *app)
 {
        int diff;
-       unsigned int tcp_offset = (*pskb)->nh.iph->ihl*4;
+       const unsigned int tcp_offset = ip_hdrlen(*pskb);
        struct tcphdr *th;
        __u32 seq;
 
        if (!ip_vs_make_skb_writable(pskb, tcp_offset + sizeof(*th)))
                return 0;
 
-       th = (struct tcphdr *)((*pskb)->nh.raw + tcp_offset);
+       th = (struct tcphdr *)(skb_network_header(*pskb) + tcp_offset);
 
        /*
         *      Remember seq number in case this pkt gets resized
@@ -406,14 +406,14 @@ static inline int app_tcp_pkt_in(struct ip_vs_conn *cp, struct sk_buff **pskb,
                                 struct ip_vs_app *app)
 {
        int diff;
-       unsigned int tcp_offset = (*pskb)->nh.iph->ihl*4;
+       const unsigned int tcp_offset = ip_hdrlen(*pskb);
        struct tcphdr *th;
        __u32 seq;
 
        if (!ip_vs_make_skb_writable(pskb, tcp_offset + sizeof(*th)))
                return 0;
 
-       th = (struct tcphdr *)((*pskb)->nh.raw + tcp_offset);
+       th = (struct tcphdr *)(skb_network_header(*pskb) + tcp_offset);
 
        /*
         *      Remember seq number in case this pkt gets resized
@@ -577,7 +577,6 @@ static const struct file_operations ip_vs_app_fops = {
 int ip_vs_skb_replace(struct sk_buff *skb, gfp_t pri,
                      char *o_buf, int o_len, char *n_buf, int n_len)
 {
-       struct iphdr *iph;
        int diff;
        int o_offset;
        int o_left;
@@ -603,12 +602,11 @@ int ip_vs_skb_replace(struct sk_buff *skb, gfp_t pri,
                skb_put(skb, diff);
                memmove(skb->data + o_offset + n_len,
                        skb->data + o_offset + o_len, o_left);
-               memcpy(skb->data + o_offset, n_buf, n_len);
+               skb_copy_to_linear_data_offset(skb, o_offset, n_buf, n_len);
        }
 
        /* must update the iph total length here */
-       iph = skb->nh.iph;
-       iph->tot_len = htons(skb->len);
+       ip_hdr(skb)->tot_len = htons(skb->len);
 
        LeaveFunction(9);
        return 0;
index 24d7b66..f005a2f 100644 (file)
@@ -212,7 +212,7 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
                    __be16 ports[2])
 {
        struct ip_vs_conn *cp = NULL;
-       struct iphdr *iph = skb->nh.iph;
+       struct iphdr *iph = ip_hdr(skb);
        struct ip_vs_dest *dest;
        struct ip_vs_conn *ct;
        __be16  dport;   /* destination port to forward */
@@ -381,7 +381,7 @@ struct ip_vs_conn *
 ip_vs_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
 {
        struct ip_vs_conn *cp = NULL;
-       struct iphdr *iph = skb->nh.iph;
+       struct iphdr *iph = ip_hdr(skb);
        struct ip_vs_dest *dest;
        __be16 _ports[2], *pptr;
 
@@ -447,7 +447,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
                struct ip_vs_protocol *pp)
 {
        __be16 _ports[2], *pptr;
-       struct iphdr *iph = skb->nh.iph;
+       struct iphdr *iph = ip_hdr(skb);
 
        pptr = skb_header_pointer(skb, iph->ihl*4,
                                  sizeof(_ports), _ports);
@@ -546,7 +546,7 @@ ip_vs_gather_frags(struct sk_buff *skb, u_int32_t user)
 {
        skb = ip_defrag(skb, user);
        if (skb)
-               ip_send_check(skb->nh.iph);
+               ip_send_check(ip_hdr(skb));
        return skb;
 }
 
@@ -557,9 +557,10 @@ ip_vs_gather_frags(struct sk_buff *skb, u_int32_t user)
 void ip_vs_nat_icmp(struct sk_buff *skb, struct ip_vs_protocol *pp,
                    struct ip_vs_conn *cp, int inout)
 {
-       struct iphdr *iph        = skb->nh.iph;
+       struct iphdr *iph        = ip_hdr(skb);
        unsigned int icmp_offset = iph->ihl*4;
-       struct icmphdr *icmph    = (struct icmphdr *)(skb->nh.raw + icmp_offset);
+       struct icmphdr *icmph    = (struct icmphdr *)(skb_network_header(skb) +
+                                                     icmp_offset);
        struct iphdr *ciph       = (struct iphdr *)(icmph + 1);
 
        if (inout) {
@@ -617,14 +618,14 @@ static int ip_vs_out_icmp(struct sk_buff **pskb, int *related)
        *related = 1;
 
        /* reassemble IP fragments */
-       if (skb->nh.iph->frag_off & __constant_htons(IP_MF|IP_OFFSET)) {
+       if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) {
                skb = ip_vs_gather_frags(skb, IP_DEFRAG_VS_OUT);
                if (!skb)
                        return NF_STOLEN;
                *pskb = skb;
        }
 
-       iph = skb->nh.iph;
+       iph = ip_hdr(skb);
        offset = ihl = iph->ihl * 4;
        ic = skb_header_pointer(skb, offset, sizeof(_icmph), &_icmph);
        if (ic == NULL)
@@ -659,7 +660,7 @@ static int ip_vs_out_icmp(struct sk_buff **pskb, int *related)
                return NF_ACCEPT;
 
        /* Is the embedded protocol header present? */
-       if (unlikely(cih->frag_off & __constant_htons(IP_OFFSET) &&
+       if (unlikely(cih->frag_off & htons(IP_OFFSET) &&
                     pp->dont_defrag))
                return NF_ACCEPT;
 
@@ -680,8 +681,7 @@ static int ip_vs_out_icmp(struct sk_buff **pskb, int *related)
        }
 
        /* Ensure the checksum is correct */
-       if (skb->ip_summed != CHECKSUM_UNNECESSARY &&
-           ip_vs_checksum_complete(skb, ihl)) {
+       if (!skb_csum_unnecessary(skb) && ip_vs_checksum_complete(skb, ihl)) {
                /* Failed checksum! */
                IP_VS_DBG(1, "Forward ICMP: failed checksum from %d.%d.%d.%d!\n",
                          NIPQUAD(iph->saddr));
@@ -712,8 +712,7 @@ static inline int is_tcp_reset(const struct sk_buff *skb)
 {
        struct tcphdr _tcph, *th;
 
-       th = skb_header_pointer(skb, skb->nh.iph->ihl * 4,
-                               sizeof(_tcph), &_tcph);
+       th = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_tcph), &_tcph);
        if (th == NULL)
                return 0;
        return th->rst;
@@ -740,14 +739,14 @@ ip_vs_out(unsigned int hooknum, struct sk_buff **pskb,
        if (skb->ipvs_property)
                return NF_ACCEPT;
 
-       iph = skb->nh.iph;
+       iph = ip_hdr(skb);
        if (unlikely(iph->protocol == IPPROTO_ICMP)) {
                int related, verdict = ip_vs_out_icmp(pskb, &related);
 
                if (related)
                        return verdict;
                skb = *pskb;
-               iph = skb->nh.iph;
+               iph = ip_hdr(skb);
        }
 
        pp = ip_vs_proto_get(iph->protocol);
@@ -755,12 +754,12 @@ ip_vs_out(unsigned int hooknum, struct sk_buff **pskb,
                return NF_ACCEPT;
 
        /* reassemble IP fragments */
-       if (unlikely(iph->frag_off & __constant_htons(IP_MF|IP_OFFSET) &&
+       if (unlikely(iph->frag_off & htons(IP_MF|IP_OFFSET) &&
                     !pp->dont_defrag)) {
                skb = ip_vs_gather_frags(skb, IP_DEFRAG_VS_OUT);
                if (!skb)
                        return NF_STOLEN;
-               iph = skb->nh.iph;
+               iph = ip_hdr(skb);
                *pskb = skb;
        }
 
@@ -810,8 +809,8 @@ ip_vs_out(unsigned int hooknum, struct sk_buff **pskb,
        if (pp->snat_handler && !pp->snat_handler(pskb, pp, cp))
                goto drop;
        skb = *pskb;
-       skb->nh.iph->saddr = cp->vaddr;
-       ip_send_check(skb->nh.iph);
+       ip_hdr(skb)->saddr = cp->vaddr;
+       ip_send_check(ip_hdr(skb));
 
        /* For policy routing, packets originating from this
         * machine itself may be routed differently to packets
@@ -861,7 +860,7 @@ ip_vs_in_icmp(struct sk_buff **pskb, int *related, unsigned int hooknum)
        *related = 1;
 
        /* reassemble IP fragments */
-       if (skb->nh.iph->frag_off & __constant_htons(IP_MF|IP_OFFSET)) {
+       if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) {
                skb = ip_vs_gather_frags(skb,
                                         hooknum == NF_IP_LOCAL_IN ?
                                         IP_DEFRAG_VS_IN : IP_DEFRAG_VS_FWD);
@@ -870,7 +869,7 @@ ip_vs_in_icmp(struct sk_buff **pskb, int *related, unsigned int hooknum)
                *pskb = skb;
        }
 
-       iph = skb->nh.iph;
+       iph = ip_hdr(skb);
        offset = ihl = iph->ihl * 4;
        ic = skb_header_pointer(skb, offset, sizeof(_icmph), &_icmph);
        if (ic == NULL)
@@ -905,7 +904,7 @@ ip_vs_in_icmp(struct sk_buff **pskb, int *related, unsigned int hooknum)
                return NF_ACCEPT;
 
        /* Is the embedded protocol header present? */
-       if (unlikely(cih->frag_off & __constant_htons(IP_OFFSET) &&
+       if (unlikely(cih->frag_off & htons(IP_OFFSET) &&
                     pp->dont_defrag))
                return NF_ACCEPT;
 
@@ -921,8 +920,7 @@ ip_vs_in_icmp(struct sk_buff **pskb, int *related, unsigned int hooknum)
        verdict = NF_DROP;
 
        /* Ensure the checksum is correct */
-       if (skb->ip_summed != CHECKSUM_UNNECESSARY &&
-           ip_vs_checksum_complete(skb, ihl)) {
+       if (!skb_csum_unnecessary(skb) && ip_vs_checksum_complete(skb, ihl)) {
                /* Failed checksum! */
                IP_VS_DBG(1, "Incoming ICMP: failed checksum from %d.%d.%d.%d!\n",
                          NIPQUAD(iph->saddr));
@@ -966,19 +964,19 @@ ip_vs_in(unsigned int hooknum, struct sk_buff **pskb,
                     || skb->dev == &loopback_dev || skb->sk)) {
                IP_VS_DBG(12, "packet type=%d proto=%d daddr=%d.%d.%d.%d ignored\n",
                          skb->pkt_type,
-                         skb->nh.iph->protocol,
-                         NIPQUAD(skb->nh.iph->daddr));
+                         ip_hdr(skb)->protocol,
+                         NIPQUAD(ip_hdr(skb)->daddr));
                return NF_ACCEPT;
        }
 
-       iph = skb->nh.iph;
+       iph = ip_hdr(skb);
        if (unlikely(iph->protocol == IPPROTO_ICMP)) {
                int related, verdict = ip_vs_in_icmp(pskb, &related, hooknum);
 
                if (related)
                        return verdict;
                skb = *pskb;
-               iph = skb->nh.iph;
+               iph = ip_hdr(skb);
        }
 
        /* Protocol supported? */
@@ -1064,7 +1062,7 @@ ip_vs_forward_icmp(unsigned int hooknum, struct sk_buff **pskb,
 {
        int r;
 
-       if ((*pskb)->nh.iph->protocol != IPPROTO_ICMP)
+       if (ip_hdr(*pskb)->protocol != IPPROTO_ICMP)
                return NF_ACCEPT;
 
        return ip_vs_in_icmp(pskb, &r, hooknum);
index 502111f..dcf5d46 100644 (file)
@@ -204,7 +204,7 @@ ip_vs_dh_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
 {
        struct ip_vs_dest *dest;
        struct ip_vs_dh_bucket *tbl;
-       struct iphdr *iph = skb->nh.iph;
+       struct iphdr *iph = ip_hdr(skb);
 
        IP_VS_DBG(6, "ip_vs_dh_schedule(): Scheduling...\n");
 
index 847c47a..344ddbb 100644 (file)
@@ -159,10 +159,10 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
                return 0;
 
        if (cp->app_data == &ip_vs_ftp_pasv) {
-               iph = (*pskb)->nh.iph;
+               iph = ip_hdr(*pskb);
                th = (struct tcphdr *)&(((char *)iph)[iph->ihl*4]);
                data = (char *)th + (th->doff << 2);
-               data_limit = (*pskb)->tail;
+               data_limit = skb_tail_pointer(*pskb);
 
                if (ip_vs_ftp_get_addrport(data, data_limit,
                                           SERVER_STRING,
@@ -262,14 +262,14 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp,
        /*
         * Detecting whether it is passive
         */
-       iph = (*pskb)->nh.iph;
+       iph = ip_hdr(*pskb);
        th = (struct tcphdr *)&(((char *)iph)[iph->ihl*4]);
 
        /* Since there may be OPTIONS in the TCP packet and the HLEN is
           the length of the header in 32-bit multiples, it is accurate
           to calculate data address by th+HLEN*4 */
        data = data_start = (char *)th + (th->doff << 2);
-       data_limit = (*pskb)->tail;
+       data_limit = skb_tail_pointer(*pskb);
 
        while (data <= data_limit - 6) {
                if (strnicmp(data, "PASV\r\n", 6) == 0) {
index c801273..052f4ed 100644 (file)
@@ -521,7 +521,7 @@ ip_vs_lblc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
        struct ip_vs_dest *dest;
        struct ip_vs_lblc_table *tbl;
        struct ip_vs_lblc_entry *en;
-       struct iphdr *iph = skb->nh.iph;
+       struct iphdr *iph = ip_hdr(skb);
 
        IP_VS_DBG(6, "ip_vs_lblc_schedule(): Scheduling...\n");
 
index 23f9b9e..6225aca 100644 (file)
@@ -775,7 +775,7 @@ ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
        struct ip_vs_dest *dest;
        struct ip_vs_lblcr_table *tbl;
        struct ip_vs_lblcr_entry *en;
-       struct iphdr *iph = skb->nh.iph;
+       struct iphdr *iph = ip_hdr(skb);
 
        IP_VS_DBG(6, "ip_vs_lblcr_schedule(): Scheduling...\n");
 
index 8b0505b..a842676 100644 (file)
@@ -52,15 +52,15 @@ ah_conn_in_get(const struct sk_buff *skb,
        if (likely(!inverse)) {
                cp = ip_vs_conn_in_get(IPPROTO_UDP,
                                       iph->saddr,
-                                      __constant_htons(PORT_ISAKMP),
+                                      htons(PORT_ISAKMP),
                                       iph->daddr,
-                                      __constant_htons(PORT_ISAKMP));
+                                      htons(PORT_ISAKMP));
        } else {
                cp = ip_vs_conn_in_get(IPPROTO_UDP,
                                       iph->daddr,
-                                      __constant_htons(PORT_ISAKMP),
+                                      htons(PORT_ISAKMP),
                                       iph->saddr,
-                                      __constant_htons(PORT_ISAKMP));
+                                      htons(PORT_ISAKMP));
        }
 
        if (!cp) {
@@ -89,15 +89,15 @@ ah_conn_out_get(const struct sk_buff *skb, struct ip_vs_protocol *pp,
        if (likely(!inverse)) {
                cp = ip_vs_conn_out_get(IPPROTO_UDP,
                                        iph->saddr,
-                                       __constant_htons(PORT_ISAKMP),
+                                       htons(PORT_ISAKMP),
                                        iph->daddr,
-                                       __constant_htons(PORT_ISAKMP));
+                                       htons(PORT_ISAKMP));
        } else {
                cp = ip_vs_conn_out_get(IPPROTO_UDP,
                                        iph->daddr,
-                                       __constant_htons(PORT_ISAKMP),
+                                       htons(PORT_ISAKMP),
                                        iph->saddr,
-                                       __constant_htons(PORT_ISAKMP));
+                                       htons(PORT_ISAKMP));
        }
 
        if (!cp) {
index 16a9ebe..e65577a 100644 (file)
@@ -76,16 +76,15 @@ tcp_conn_schedule(struct sk_buff *skb,
        struct ip_vs_service *svc;
        struct tcphdr _tcph, *th;
 
-       th = skb_header_pointer(skb, skb->nh.iph->ihl*4,
-                               sizeof(_tcph), &_tcph);
+       th = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_tcph), &_tcph);
        if (th == NULL) {
                *verdict = NF_DROP;
                return 0;
        }
 
        if (th->syn &&
-           (svc = ip_vs_service_get(skb->mark, skb->nh.iph->protocol,
-                                    skb->nh.iph->daddr, th->dest))) {
+           (svc = ip_vs_service_get(skb->mark, ip_hdr(skb)->protocol,
+                                    ip_hdr(skb)->daddr, th->dest))) {
                if (ip_vs_todrop()) {
                        /*
                         * It seems that we are very loaded.
@@ -127,7 +126,7 @@ tcp_snat_handler(struct sk_buff **pskb,
                 struct ip_vs_protocol *pp, struct ip_vs_conn *cp)
 {
        struct tcphdr *tcph;
-       unsigned int tcphoff = (*pskb)->nh.iph->ihl * 4;
+       const unsigned int tcphoff = ip_hdrlen(*pskb);
 
        /* csum_check requires unshared skb */
        if (!ip_vs_make_skb_writable(pskb, tcphoff+sizeof(*tcph)))
@@ -143,7 +142,7 @@ tcp_snat_handler(struct sk_buff **pskb,
                        return 0;
        }
 
-       tcph = (void *)(*pskb)->nh.iph + tcphoff;
+       tcph = (void *)ip_hdr(*pskb) + tcphoff;
        tcph->source = cp->vport;
 
        /* Adjust TCP checksums */
@@ -175,7 +174,7 @@ tcp_dnat_handler(struct sk_buff **pskb,
                 struct ip_vs_protocol *pp, struct ip_vs_conn *cp)
 {
        struct tcphdr *tcph;
-       unsigned int tcphoff = (*pskb)->nh.iph->ihl * 4;
+       const unsigned int tcphoff = ip_hdrlen(*pskb);
 
        /* csum_check requires unshared skb */
        if (!ip_vs_make_skb_writable(pskb, tcphoff+sizeof(*tcph)))
@@ -194,7 +193,7 @@ tcp_dnat_handler(struct sk_buff **pskb,
                        return 0;
        }
 
-       tcph = (void *)(*pskb)->nh.iph + tcphoff;
+       tcph = (void *)ip_hdr(*pskb) + tcphoff;
        tcph->dest = cp->dport;
 
        /*
@@ -224,15 +223,15 @@ tcp_dnat_handler(struct sk_buff **pskb,
 static int
 tcp_csum_check(struct sk_buff *skb, struct ip_vs_protocol *pp)
 {
-       unsigned int tcphoff = skb->nh.iph->ihl*4;
+       const unsigned int tcphoff = ip_hdrlen(skb);
 
        switch (skb->ip_summed) {
        case CHECKSUM_NONE:
                skb->csum = skb_checksum(skb, tcphoff, skb->len - tcphoff, 0);
        case CHECKSUM_COMPLETE:
-               if (csum_tcpudp_magic(skb->nh.iph->saddr, skb->nh.iph->daddr,
+               if (csum_tcpudp_magic(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
                                      skb->len - tcphoff,
-                                     skb->nh.iph->protocol, skb->csum)) {
+                                     ip_hdr(skb)->protocol, skb->csum)) {
                        IP_VS_DBG_RL_PKT(0, pp, skb, 0,
                                         "Failed checksum for");
                        return 0;
@@ -467,8 +466,7 @@ tcp_state_transition(struct ip_vs_conn *cp, int direction,
 {
        struct tcphdr _tcph, *th;
 
-       th = skb_header_pointer(skb, skb->nh.iph->ihl*4,
-                               sizeof(_tcph), &_tcph);
+       th = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_tcph), &_tcph);
        if (th == NULL)
                return 0;
 
index 03f0a41..8ee5fe6 100644 (file)
@@ -22,7 +22,7 @@
 #include <linux/udp.h>
 
 #include <net/ip_vs.h>
-
+#include <net/ip.h>
 
 static struct ip_vs_conn *
 udp_conn_in_get(const struct sk_buff *skb, struct ip_vs_protocol *pp,
@@ -56,7 +56,7 @@ udp_conn_out_get(const struct sk_buff *skb, struct ip_vs_protocol *pp,
        struct ip_vs_conn *cp;
        __be16 _ports[2], *pptr;
 
-       pptr = skb_header_pointer(skb, skb->nh.iph->ihl*4,
+       pptr = skb_header_pointer(skb, ip_hdrlen(skb),
                                  sizeof(_ports), _ports);
        if (pptr == NULL)
                return NULL;
@@ -82,15 +82,15 @@ udp_conn_schedule(struct sk_buff *skb, struct ip_vs_protocol *pp,
        struct ip_vs_service *svc;
        struct udphdr _udph, *uh;
 
-       uh = skb_header_pointer(skb, skb->nh.iph->ihl*4,
+       uh = skb_header_pointer(skb, ip_hdrlen(skb),
                                sizeof(_udph), &_udph);
        if (uh == NULL) {
                *verdict = NF_DROP;
                return 0;
        }
 
-       if ((svc = ip_vs_service_get(skb->mark, skb->nh.iph->protocol,
-                                    skb->nh.iph->daddr, uh->dest))) {
+       if ((svc = ip_vs_service_get(skb->mark, ip_hdr(skb)->protocol,
+                                    ip_hdr(skb)->daddr, uh->dest))) {
                if (ip_vs_todrop()) {
                        /*
                         * It seems that we are very loaded.
@@ -133,7 +133,7 @@ udp_snat_handler(struct sk_buff **pskb,
                 struct ip_vs_protocol *pp, struct ip_vs_conn *cp)
 {
        struct udphdr *udph;
-       unsigned int udphoff = (*pskb)->nh.iph->ihl * 4;
+       const unsigned int udphoff = ip_hdrlen(*pskb);
 
        /* csum_check requires unshared skb */
        if (!ip_vs_make_skb_writable(pskb, udphoff+sizeof(*udph)))
@@ -151,7 +151,7 @@ udp_snat_handler(struct sk_buff **pskb,
                        return 0;
        }
 
-       udph = (void *)(*pskb)->nh.iph + udphoff;
+       udph = (void *)ip_hdr(*pskb) + udphoff;
        udph->source = cp->vport;
 
        /*
@@ -187,7 +187,7 @@ udp_dnat_handler(struct sk_buff **pskb,
                 struct ip_vs_protocol *pp, struct ip_vs_conn *cp)
 {
        struct udphdr *udph;
-       unsigned int udphoff = (*pskb)->nh.iph->ihl * 4;
+       unsigned int udphoff = ip_hdrlen(*pskb);
 
        /* csum_check requires unshared skb */
        if (!ip_vs_make_skb_writable(pskb, udphoff+sizeof(*udph)))
@@ -206,7 +206,7 @@ udp_dnat_handler(struct sk_buff **pskb,
                        return 0;
        }
 
-       udph = (void *)(*pskb)->nh.iph + udphoff;
+       udph = (void *)ip_hdr(*pskb) + udphoff;
        udph->dest = cp->dport;
 
        /*
@@ -239,7 +239,7 @@ static int
 udp_csum_check(struct sk_buff *skb, struct ip_vs_protocol *pp)
 {
        struct udphdr _udph, *uh;
-       unsigned int udphoff = skb->nh.iph->ihl*4;
+       const unsigned int udphoff = ip_hdrlen(skb);
 
        uh = skb_header_pointer(skb, udphoff, sizeof(_udph), &_udph);
        if (uh == NULL)
@@ -251,10 +251,10 @@ udp_csum_check(struct sk_buff *skb, struct ip_vs_protocol *pp)
                        skb->csum = skb_checksum(skb, udphoff,
                                                 skb->len - udphoff, 0);
                case CHECKSUM_COMPLETE:
-                       if (csum_tcpudp_magic(skb->nh.iph->saddr,
-                                             skb->nh.iph->daddr,
+                       if (csum_tcpudp_magic(ip_hdr(skb)->saddr,
+                                             ip_hdr(skb)->daddr,
                                              skb->len - udphoff,
-                                             skb->nh.iph->protocol,
+                                             ip_hdr(skb)->protocol,
                                              skb->csum)) {
                                IP_VS_DBG_RL_PKT(0, pp, skb, 0,
                                                 "Failed checksum for");
index 338668f..1b25b00 100644 (file)
@@ -201,7 +201,7 @@ ip_vs_sh_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
 {
        struct ip_vs_dest *dest;
        struct ip_vs_sh_bucket *tbl;
-       struct iphdr *iph = skb->nh.iph;
+       struct iphdr *iph = ip_hdr(skb);
 
        IP_VS_DBG(6, "ip_vs_sh_schedule(): Scheduling...\n");
 
index e1f77bd..900ce29 100644 (file)
@@ -156,7 +156,7 @@ ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
                  struct ip_vs_protocol *pp)
 {
        struct rtable *rt;                      /* Route to the other host */
-       struct iphdr  *iph = skb->nh.iph;
+       struct iphdr  *iph = ip_hdr(skb);
        u8     tos = iph->tos;
        int    mtu;
        struct flowi fl = {
@@ -178,7 +178,7 @@ ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
 
        /* MTU checking */
        mtu = dst_mtu(&rt->u.dst);
-       if ((skb->len > mtu) && (iph->frag_off&__constant_htons(IP_DF))) {
+       if ((skb->len > mtu) && (iph->frag_off & htons(IP_DF))) {
                ip_rt_put(rt);
                icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
                IP_VS_DBG_RL("ip_vs_bypass_xmit(): frag needed\n");
@@ -193,7 +193,7 @@ ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
                ip_rt_put(rt);
                return NF_STOLEN;
        }
-       ip_send_check(skb->nh.iph);
+       ip_send_check(ip_hdr(skb));
 
        /* drop old route */
        dst_release(skb->dst);
@@ -226,7 +226,7 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
 {
        struct rtable *rt;              /* Route to the other host */
        int mtu;
-       struct iphdr *iph = skb->nh.iph;
+       struct iphdr *iph = ip_hdr(skb);
 
        EnterFunction(10);
 
@@ -245,7 +245,7 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
 
        /* MTU checking */
        mtu = dst_mtu(&rt->u.dst);
-       if ((skb->len > mtu) && (iph->frag_off&__constant_htons(IP_DF))) {
+       if ((skb->len > mtu) && (iph->frag_off & htons(IP_DF))) {
                ip_rt_put(rt);
                icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
                IP_VS_DBG_RL_PKT(0, pp, skb, 0, "ip_vs_nat_xmit(): frag needed for");
@@ -266,8 +266,8 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
        /* mangle the packet */
        if (pp->dnat_handler && !pp->dnat_handler(&skb, pp, cp))
                goto tx_error;
-       skb->nh.iph->daddr = cp->daddr;
-       ip_send_check(skb->nh.iph);
+       ip_hdr(skb)->daddr = cp->daddr;
+       ip_send_check(ip_hdr(skb));
 
        IP_VS_DBG_PKT(10, pp, skb, 0, "After DNAT");
 
@@ -320,19 +320,20 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
 {
        struct rtable *rt;                      /* Route to the other host */
        struct net_device *tdev;                /* Device to other host */
-       struct iphdr  *old_iph = skb->nh.iph;
+       struct iphdr  *old_iph = ip_hdr(skb);
        u8     tos = old_iph->tos;
        __be16 df = old_iph->frag_off;
+       sk_buff_data_t old_transport_header = skb->transport_header;
        struct iphdr  *iph;                     /* Our new IP header */
        int    max_headroom;                    /* The extra header space needed */
        int    mtu;
 
        EnterFunction(10);
 
-       if (skb->protocol != __constant_htons(ETH_P_IP)) {
+       if (skb->protocol != htons(ETH_P_IP)) {
                IP_VS_DBG_RL("ip_vs_tunnel_xmit(): protocol error, "
                             "ETH_P_IP: %d, skb protocol: %d\n",
-                            __constant_htons(ETH_P_IP), skb->protocol);
+                            htons(ETH_P_IP), skb->protocol);
                goto tx_error;
        }
 
@@ -350,9 +351,9 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
        if (skb->dst)
                skb->dst->ops->update_pmtu(skb->dst, mtu);
 
-       df |= (old_iph->frag_off&__constant_htons(IP_DF));
+       df |= (old_iph->frag_off & htons(IP_DF));
 
-       if ((old_iph->frag_off&__constant_htons(IP_DF))
+       if ((old_iph->frag_off & htons(IP_DF))
            && mtu < ntohs(old_iph->tot_len)) {
                icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
                ip_rt_put(rt);
@@ -377,15 +378,16 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
                }
                kfree_skb(skb);
                skb = new_skb;
-               old_iph = skb->nh.iph;
+               old_iph = ip_hdr(skb);
        }
 
-       skb->h.raw = (void *) old_iph;
+       skb->transport_header = old_transport_header;
 
        /* fix old IP header checksum */
        ip_send_check(old_iph);
 
-       skb->nh.raw = skb_push(skb, sizeof(struct iphdr));
+       skb_push(skb, sizeof(struct iphdr));
+       skb_reset_network_header(skb);
        memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
 
        /* drop old route */
@@ -395,7 +397,7 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
        /*
         *      Push down and install the IPIP header.
         */
-       iph                     =       skb->nh.iph;
+       iph                     =       ip_hdr(skb);
        iph->version            =       4;
        iph->ihl                =       sizeof(struct iphdr)>>2;
        iph->frag_off           =       df;
@@ -435,7 +437,7 @@ ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
              struct ip_vs_protocol *pp)
 {
        struct rtable *rt;                      /* Route to the other host */
-       struct iphdr  *iph = skb->nh.iph;
+       struct iphdr  *iph = ip_hdr(skb);
        int    mtu;
 
        EnterFunction(10);
@@ -445,7 +447,7 @@ ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
 
        /* MTU checking */
        mtu = dst_mtu(&rt->u.dst);
-       if ((iph->frag_off&__constant_htons(IP_DF)) && skb->len > mtu) {
+       if ((iph->frag_off & htons(IP_DF)) && skb->len > mtu) {
                icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
                ip_rt_put(rt);
                IP_VS_DBG_RL("ip_vs_dr_xmit(): frag needed\n");
@@ -460,7 +462,7 @@ ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
                ip_rt_put(rt);
                return NF_STOLEN;
        }
-       ip_send_check(skb->nh.iph);
+       ip_send_check(ip_hdr(skb));
 
        /* drop old route */
        dst_release(skb->dst);
@@ -514,12 +516,12 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
         * mangle and send the packet here (only for VS/NAT)
         */
 
-       if (!(rt = __ip_vs_get_out_rt(cp, RT_TOS(skb->nh.iph->tos))))
+       if (!(rt = __ip_vs_get_out_rt(cp, RT_TOS(ip_hdr(skb)->tos))))
                goto tx_error_icmp;
 
        /* MTU checking */
        mtu = dst_mtu(&rt->u.dst);
-       if ((skb->len > mtu) && (skb->nh.iph->frag_off&__constant_htons(IP_DF))) {
+       if ((skb->len > mtu) && (ip_hdr(skb)->frag_off & htons(IP_DF))) {
                ip_rt_put(rt);
                icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
                IP_VS_DBG_RL("ip_vs_in_icmp(): frag needed\n");
index 574c735..b03c5ca 100644 (file)
@@ -100,7 +100,7 @@ static int drr_dev_event(struct notifier_block *this,
 
                spin_unlock_bh(&state_lock);
                break;
-       };
+       }
 
        return NOTIFY_DONE;
 }
index 6069a11..b441929 100644 (file)
@@ -10,7 +10,7 @@
 /* route_me_harder function, used by iptable_nat, iptable_mangle + ip_queue */
 int ip_route_me_harder(struct sk_buff **pskb, unsigned addr_type)
 {
-       struct iphdr *iph = (*pskb)->nh.iph;
+       const struct iphdr *iph = ip_hdr(*pskb);
        struct rtable *rt;
        struct flowi fl = {};
        struct dst_entry *odst;
@@ -142,7 +142,7 @@ static void nf_ip_saveroute(const struct sk_buff *skb, struct nf_info *info)
        struct ip_rt_info *rt_info = nf_info_reroute(info);
 
        if (info->hook == NF_IP_LOCAL_OUT) {
-               const struct iphdr *iph = skb->nh.iph;
+               const struct iphdr *iph = ip_hdr(skb);
 
                rt_info->tos = iph->tos;
                rt_info->daddr = iph->daddr;
@@ -155,7 +155,7 @@ static int nf_ip_reroute(struct sk_buff **pskb, const struct nf_info *info)
        const struct ip_rt_info *rt_info = nf_info_reroute(info);
 
        if (info->hook == NF_IP_LOCAL_OUT) {
-               struct iphdr *iph = (*pskb)->nh.iph;
+               const struct iphdr *iph = ip_hdr(*pskb);
 
                if (!(iph->tos == rt_info->tos
                      && iph->daddr == rt_info->daddr
@@ -168,7 +168,7 @@ static int nf_ip_reroute(struct sk_buff **pskb, const struct nf_info *info)
 __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
                            unsigned int dataoff, u_int8_t protocol)
 {
-       struct iphdr *iph = skb->nh.iph;
+       const struct iphdr *iph = ip_hdr(skb);
        __sum16 csum = 0;
 
        switch (skb->ip_summed) {
index 601808c..46509fa 100644 (file)
@@ -30,188 +30,6 @@ config NF_CONNTRACK_PROC_COMPAT
 
          If unsure, say Y.
 
-# connection tracking, helpers and protocols
-config IP_NF_CT_ACCT
-       bool "Connection tracking flow accounting"
-       depends on IP_NF_CONNTRACK
-       help
-         If this option is enabled, the connection tracking code will
-         keep per-flow packet and byte counters.
-
-         Those counters can be used for flow-based accounting or the
-         `connbytes' match.
-
-         If unsure, say `N'.
-
-config IP_NF_CONNTRACK_MARK
-       bool  'Connection mark tracking support'
-       depends on IP_NF_CONNTRACK
-       help
-         This option enables support for connection marks, used by the
-         `CONNMARK' target and `connmark' match. Similar to the mark value
-         of packets, but this mark value is kept in the conntrack session
-         instead of the individual packets.
-       
-config IP_NF_CONNTRACK_SECMARK
-       bool  'Connection tracking security mark support'
-       depends on IP_NF_CONNTRACK && NETWORK_SECMARK
-       help
-         This option enables security markings to be applied to
-         connections.  Typically they are copied to connections from
-         packets using the CONNSECMARK target and copied back from
-         connections to packets with the same target, with the packets
-         being originally labeled via SECMARK.
-
-         If unsure, say 'N'.
-
-config IP_NF_CONNTRACK_EVENTS
-       bool "Connection tracking events (EXPERIMENTAL)"
-       depends on EXPERIMENTAL && IP_NF_CONNTRACK
-       help
-         If this option is enabled, the connection tracking code will
-         provide a notifier chain that can be used by other kernel code
-         to get notified about changes in the connection tracking state.
-         
-         IF unsure, say `N'.
-
-config IP_NF_CONNTRACK_NETLINK
-       tristate 'Connection tracking netlink interface (EXPERIMENTAL)'
-       depends on EXPERIMENTAL && IP_NF_CONNTRACK && NETFILTER_NETLINK
-       depends on IP_NF_CONNTRACK!=y || NETFILTER_NETLINK!=m
-       depends on IP_NF_NAT=n || IP_NF_NAT
-       help
-         This option enables support for a netlink-based userspace interface
-
-
-config IP_NF_CT_PROTO_SCTP
-       tristate  'SCTP protocol connection tracking support (EXPERIMENTAL)'
-       depends on IP_NF_CONNTRACK && EXPERIMENTAL
-       help
-         With this option enabled, the connection tracking code will
-         be able to do state tracking on SCTP connections.
-
-         If you want to compile it as a module, say M here and read
-         <file:Documentation/modules.txt>.  If unsure, say `N'.
-
-config IP_NF_FTP
-       tristate "FTP protocol support"
-       depends on IP_NF_CONNTRACK
-       help
-         Tracking FTP connections is problematic: special helpers are
-         required for tracking them, and doing masquerading and other forms
-         of Network Address Translation on them.
-
-         To compile it as a module, choose M here.  If unsure, say Y.
-
-config IP_NF_IRC
-       tristate "IRC protocol support"
-       depends on IP_NF_CONNTRACK
-       ---help---
-         There is a commonly-used extension to IRC called
-         Direct Client-to-Client Protocol (DCC).  This enables users to send
-         files to each other, and also chat to each other without the need
-         of a server.  DCC Sending is used anywhere you send files over IRC,
-         and DCC Chat is most commonly used by Eggdrop bots.  If you are
-         using NAT, this extension will enable you to send files and initiate
-         chats.  Note that you do NOT need this extension to get files or
-         have others initiate chats, or everything else in IRC.
-
-         To compile it as a module, choose M here.  If unsure, say Y.
-
-config IP_NF_NETBIOS_NS
-       tristate "NetBIOS name service protocol support (EXPERIMENTAL)"
-       depends on IP_NF_CONNTRACK && EXPERIMENTAL
-       help
-         NetBIOS name service requests are sent as broadcast messages from an
-         unprivileged port and responded to with unicast messages to the
-         same port. This make them hard to firewall properly because connection
-         tracking doesn't deal with broadcasts. This helper tracks locally
-         originating NetBIOS name service requests and the corresponding
-         responses. It relies on correct IP address configuration, specifically
-         netmask and broadcast address. When properly configured, the output
-         of "ip address show" should look similar to this:
-
-         $ ip -4 address show eth0
-         4: eth0: <BROADCAST,MULTICAST,UP> mtu 1500 qdisc pfifo_fast qlen 1000
-             inet 172.16.2.252/24 brd 172.16.2.255 scope global eth0
-         
-         To compile it as a module, choose M here.  If unsure, say N.
-
-config IP_NF_TFTP
-       tristate "TFTP protocol support"
-       depends on IP_NF_CONNTRACK
-       help
-         TFTP connection tracking helper, this is required depending
-         on how restrictive your ruleset is.
-         If you are using a tftp client behind -j SNAT or -j MASQUERADING
-         you will need this.
-
-         To compile it as a module, choose M here.  If unsure, say Y.
-
-config IP_NF_AMANDA
-       tristate "Amanda backup protocol support"
-       depends on IP_NF_CONNTRACK
-       select TEXTSEARCH
-       select TEXTSEARCH_KMP
-       help
-         If you are running the Amanda backup package <http://www.amanda.org/>
-         on this machine or machines that will be MASQUERADED through this
-         machine, then you may want to enable this feature.  This allows the
-         connection tracking and natting code to allow the sub-channels that
-         Amanda requires for communication of the backup data, messages and
-         index.
-
-         To compile it as a module, choose M here.  If unsure, say Y.
-
-config IP_NF_PPTP
-       tristate  'PPTP protocol support'
-       depends on IP_NF_CONNTRACK
-       help
-         This module adds support for PPTP (Point to Point Tunnelling
-         Protocol, RFC2637) connection tracking and NAT. 
-       
-         If you are running PPTP sessions over a stateful firewall or NAT
-         box, you may want to enable this feature.  
-       
-         Please note that not all PPTP modes of operation are supported yet.
-         For more info, read top of the file
-         net/ipv4/netfilter/ip_conntrack_pptp.c
-       
-         If you want to compile it as a module, say M here and read
-         Documentation/modules.txt.  If unsure, say `N'.
-
-config IP_NF_H323
-       tristate  'H.323 protocol support (EXPERIMENTAL)'
-       depends on IP_NF_CONNTRACK && EXPERIMENTAL
-       help
-         H.323 is a VoIP signalling protocol from ITU-T. As one of the most
-         important VoIP protocols, it is widely used by voice hardware and
-         software including voice gateways, IP phones, Netmeeting, OpenPhone,
-         Gnomemeeting, etc.
-
-         With this module you can support H.323 on a connection tracking/NAT
-         firewall.
-
-         This module supports RAS, Fast Start, H.245 Tunnelling, Call
-         Forwarding, RTP/RTCP and T.120 based audio, video, fax, chat,
-         whiteboard, file transfer, etc. For more information, please
-         visit http://nath323.sourceforge.net/.
-
-         If you want to compile it as a module, say 'M' here and read
-         Documentation/modules.txt.  If unsure, say 'N'.
-
-config IP_NF_SIP
-       tristate "SIP protocol support (EXPERIMENTAL)"
-       depends on IP_NF_CONNTRACK && EXPERIMENTAL
-       help
-         SIP is an application-layer control protocol that can establish,
-         modify, and terminate multimedia sessions (conferences) such as
-         Internet telephony calls. With the ip_conntrack_sip and
-         the ip_nat_sip modules you can support the protocol on a connection
-         tracking/NATing firewall.
-
-         To compile it as a module, choose M here.  If unsure, say Y.
-
 config IP_NF_QUEUE
        tristate "IP Userspace queueing via NETLINK (OBSOLETE)"
        help
@@ -361,17 +179,6 @@ config IP_NF_TARGET_ULOG
 
          To compile it as a module, choose M here.  If unsure, say N.
 
-# NAT + specific targets: ip_conntrack
-config IP_NF_NAT
-       tristate "Full NAT"
-       depends on IP_NF_IPTABLES && IP_NF_CONNTRACK
-       help
-         The Full NAT option allows masquerading, port forwarding and other
-         forms of full Network Address Port Translation.  It is controlled by
-         the `nat' table in iptables: see the man page for iptables(8).
-
-         To compile it as a module, choose M here.  If unsure, say N.
-
 # NAT + specific targets: nf_conntrack
 config NF_NAT
        tristate "Full NAT"
@@ -383,11 +190,6 @@ config NF_NAT
 
          To compile it as a module, choose M here.  If unsure, say N.
 
-config IP_NF_NAT_NEEDED
-       bool
-       depends on IP_NF_NAT
-       default y
-
 config NF_NAT_NEEDED
        bool
        depends on NF_NAT
@@ -395,7 +197,7 @@ config NF_NAT_NEEDED
 
 config IP_NF_TARGET_MASQUERADE
        tristate "MASQUERADE target support"
-       depends on (NF_NAT || IP_NF_NAT)
+       depends on NF_NAT
        help
          Masquerading is a special case of NAT: all outgoing connections are
          changed to seem to come from a particular interface's address, and
@@ -407,7 +209,7 @@ config IP_NF_TARGET_MASQUERADE
 
 config IP_NF_TARGET_REDIRECT
        tristate "REDIRECT target support"
-       depends on (NF_NAT || IP_NF_NAT)
+       depends on NF_NAT
        help
          REDIRECT is a special case of NAT: all incoming connections are
          mapped onto the incoming interface's address, causing the packets to
@@ -418,7 +220,7 @@ config IP_NF_TARGET_REDIRECT
 
 config IP_NF_TARGET_NETMAP
        tristate "NETMAP target support"
-       depends on (NF_NAT || IP_NF_NAT)
+       depends on NF_NAT
        help
          NETMAP is an implementation of static 1:1 NAT mapping of network
          addresses. It maps the network address part, while keeping the host
@@ -429,28 +231,13 @@ config IP_NF_TARGET_NETMAP
 
 config IP_NF_TARGET_SAME
        tristate "SAME target support"
-       depends on (NF_NAT || IP_NF_NAT)
+       depends on NF_NAT
        help
          This option adds a `SAME' target, which works like the standard SNAT
          target, but attempts to give clients the same IP for all connections.
 
          To compile it as a module, choose M here.  If unsure, say N.
 
-config IP_NF_NAT_SNMP_BASIC
-       tristate "Basic SNMP-ALG support (EXPERIMENTAL)"
-       depends on EXPERIMENTAL && IP_NF_NAT
-       ---help---
-
-         This module implements an Application Layer Gateway (ALG) for
-         SNMP payloads.  In conjunction with NAT, it allows a network
-         management system to access multiple private networks with
-         conflicting addresses.  It works by modifying IP addresses
-         inside SNMP payloads to match IP-layer NAT mapping.
-
-         This is the "basic" form of SNMP-ALG, as described in RFC 2962
-
-         To compile it as a module, choose M here.  If unsure, say N.
-
 config NF_NAT_SNMP_BASIC
        tristate "Basic SNMP-ALG support (EXPERIMENTAL)"
        depends on EXPERIMENTAL && NF_NAT
@@ -477,78 +264,37 @@ config NF_NAT_PROTO_GRE
        tristate
        depends on NF_NAT && NF_CT_PROTO_GRE
 
-config IP_NF_NAT_FTP
-       tristate
-       depends on IP_NF_IPTABLES && IP_NF_CONNTRACK && IP_NF_NAT
-       default IP_NF_NAT && IP_NF_FTP
-
 config NF_NAT_FTP
        tristate
        depends on IP_NF_IPTABLES && NF_CONNTRACK && NF_NAT
        default NF_NAT && NF_CONNTRACK_FTP
 
-config IP_NF_NAT_IRC
-       tristate
-       depends on IP_NF_IPTABLES!=n && IP_NF_CONNTRACK!=n && IP_NF_NAT!=n
-       default IP_NF_NAT if IP_NF_IRC=y
-       default m if IP_NF_IRC=m
-
 config NF_NAT_IRC
        tristate
        depends on IP_NF_IPTABLES && NF_CONNTRACK && NF_NAT
        default NF_NAT && NF_CONNTRACK_IRC
 
-config IP_NF_NAT_TFTP
-       tristate
-       depends on IP_NF_IPTABLES!=n && IP_NF_CONNTRACK!=n && IP_NF_NAT!=n
-       default IP_NF_NAT if IP_NF_TFTP=y
-       default m if IP_NF_TFTP=m
-
 config NF_NAT_TFTP
        tristate
        depends on IP_NF_IPTABLES && NF_CONNTRACK && NF_NAT
        default NF_NAT && NF_CONNTRACK_TFTP
 
-config IP_NF_NAT_AMANDA
-       tristate
-       depends on IP_NF_IPTABLES!=n && IP_NF_CONNTRACK!=n && IP_NF_NAT!=n
-       default IP_NF_NAT if IP_NF_AMANDA=y
-       default m if IP_NF_AMANDA=m
-
 config NF_NAT_AMANDA
        tristate
        depends on IP_NF_IPTABLES && NF_CONNTRACK && NF_NAT
        default NF_NAT && NF_CONNTRACK_AMANDA
 
-config IP_NF_NAT_PPTP
-       tristate
-       depends on IP_NF_NAT!=n && IP_NF_PPTP!=n
-       default IP_NF_NAT if IP_NF_PPTP=y
-       default m if IP_NF_PPTP=m
-
 config NF_NAT_PPTP
        tristate
        depends on IP_NF_IPTABLES && NF_CONNTRACK && NF_NAT
        default NF_NAT && NF_CONNTRACK_PPTP
        select NF_NAT_PROTO_GRE
 
-config IP_NF_NAT_H323
-       tristate
-       depends on IP_NF_IPTABLES!=n && IP_NF_CONNTRACK!=n && IP_NF_NAT!=n
-       default IP_NF_NAT if IP_NF_H323=y
-       default m if IP_NF_H323=m
-
 config NF_NAT_H323
        tristate
        depends on IP_NF_IPTABLES && NF_CONNTRACK && NF_NAT
        default NF_NAT && NF_CONNTRACK_H323
 
-config IP_NF_NAT_SIP
-       tristate
-       depends on IP_NF_IPTABLES!=n && IP_NF_CONNTRACK!=n && IP_NF_NAT!=n
-       default IP_NF_NAT if IP_NF_SIP=y
-       default m if IP_NF_SIP=m
-
 config NF_NAT_SIP
        tristate
        depends on IP_NF_IPTABLES && NF_CONNTRACK && NF_NAT
@@ -606,9 +352,8 @@ config IP_NF_TARGET_TTL
 config IP_NF_TARGET_CLUSTERIP
        tristate "CLUSTERIP target support (EXPERIMENTAL)"
        depends on IP_NF_MANGLE && EXPERIMENTAL
-       depends on IP_NF_CONNTRACK || NF_CONNTRACK_IPV4
-       select IP_NF_CONNTRACK_MARK if IP_NF_CONNTRACK
-       select NF_CONNTRACK_MARK if NF_CONNTRACK_IPV4
+       depends on NF_CONNTRACK_IPV4
+       select NF_CONNTRACK_MARK
        help
          The CLUSTERIP target allows you to build load-balancing clusters of
          network servers without having a dedicated load-balancing
index 6625ec6..409d273 100644 (file)
@@ -2,8 +2,6 @@
 # Makefile for the netfilter modules on top of IPv4.
 #
 
-# objects for the standalone - connection tracking / NAT
-ip_conntrack-objs      := ip_conntrack_standalone.o ip_conntrack_core.o ip_conntrack_proto_generic.o ip_conntrack_proto_tcp.o ip_conntrack_proto_udp.o ip_conntrack_proto_icmp.o
 # objects for l3 independent conntrack
 nf_conntrack_ipv4-objs  :=  nf_conntrack_l3proto_ipv4.o nf_conntrack_proto_icmp.o
 ifeq ($(CONFIG_NF_CONNTRACK_PROC_COMPAT),y)
@@ -12,53 +10,14 @@ nf_conntrack_ipv4-objs      += nf_conntrack_l3proto_ipv4_compat.o
 endif
 endif
 
-ip_nat-objs    := ip_nat_core.o ip_nat_helper.o ip_nat_proto_unknown.o ip_nat_proto_tcp.o ip_nat_proto_udp.o ip_nat_proto_icmp.o
-nf_nat-objs    := nf_nat_core.o nf_nat_helper.o nf_nat_proto_unknown.o nf_nat_proto_tcp.o nf_nat_proto_udp.o nf_nat_proto_icmp.o
-ifneq ($(CONFIG_NF_NAT),)
+nf_nat-objs            := nf_nat_core.o nf_nat_helper.o nf_nat_proto_unknown.o nf_nat_proto_tcp.o nf_nat_proto_udp.o nf_nat_proto_icmp.o
 iptable_nat-objs       := nf_nat_rule.o nf_nat_standalone.o
-else
-iptable_nat-objs       := ip_nat_rule.o ip_nat_standalone.o
-endif
-
-ip_conntrack_pptp-objs := ip_conntrack_helper_pptp.o ip_conntrack_proto_gre.o
-ip_nat_pptp-objs       := ip_nat_helper_pptp.o ip_nat_proto_gre.o
-
-ip_conntrack_h323-objs := ip_conntrack_helper_h323.o ../../netfilter/nf_conntrack_h323_asn1.o
-ip_nat_h323-objs := ip_nat_helper_h323.o
 
 # connection tracking
-obj-$(CONFIG_IP_NF_CONNTRACK) += ip_conntrack.o
 obj-$(CONFIG_NF_CONNTRACK_IPV4) += nf_conntrack_ipv4.o
 
-obj-$(CONFIG_IP_NF_NAT) += ip_nat.o
 obj-$(CONFIG_NF_NAT) += nf_nat.o
 
-# conntrack netlink interface
-obj-$(CONFIG_IP_NF_CONNTRACK_NETLINK) += ip_conntrack_netlink.o
-
-
-# SCTP protocol connection tracking
-obj-$(CONFIG_IP_NF_CT_PROTO_SCTP) += ip_conntrack_proto_sctp.o
-
-# connection tracking helpers
-obj-$(CONFIG_IP_NF_H323) += ip_conntrack_h323.o
-obj-$(CONFIG_IP_NF_PPTP) += ip_conntrack_pptp.o
-obj-$(CONFIG_IP_NF_AMANDA) += ip_conntrack_amanda.o
-obj-$(CONFIG_IP_NF_TFTP) += ip_conntrack_tftp.o
-obj-$(CONFIG_IP_NF_FTP) += ip_conntrack_ftp.o
-obj-$(CONFIG_IP_NF_IRC) += ip_conntrack_irc.o
-obj-$(CONFIG_IP_NF_SIP) += ip_conntrack_sip.o
-obj-$(CONFIG_IP_NF_NETBIOS_NS) += ip_conntrack_netbios_ns.o
-
-# NAT helpers (ip_conntrack)
-obj-$(CONFIG_IP_NF_NAT_H323) += ip_nat_h323.o
-obj-$(CONFIG_IP_NF_NAT_PPTP) += ip_nat_pptp.o
-obj-$(CONFIG_IP_NF_NAT_AMANDA) += ip_nat_amanda.o
-obj-$(CONFIG_IP_NF_NAT_TFTP) += ip_nat_tftp.o
-obj-$(CONFIG_IP_NF_NAT_FTP) += ip_nat_ftp.o
-obj-$(CONFIG_IP_NF_NAT_IRC) += ip_nat_irc.o
-obj-$(CONFIG_IP_NF_NAT_SIP) += ip_nat_sip.o
-
 # NAT helpers (nf_conntrack)
 obj-$(CONFIG_NF_NAT_AMANDA) += nf_nat_amanda.o
 obj-$(CONFIG_NF_NAT_FTP) += nf_nat_ftp.o
@@ -78,7 +37,6 @@ obj-$(CONFIG_IP_NF_IPTABLES) += ip_tables.o
 # the three instances of ip_tables
 obj-$(CONFIG_IP_NF_FILTER) += iptable_filter.o
 obj-$(CONFIG_IP_NF_MANGLE) += iptable_mangle.o
-obj-$(CONFIG_IP_NF_NAT) += iptable_nat.o
 obj-$(CONFIG_NF_NAT) += iptable_nat.o
 obj-$(CONFIG_IP_NF_RAW) += iptable_raw.o
 
@@ -100,7 +58,6 @@ obj-$(CONFIG_IP_NF_TARGET_MASQUERADE) += ipt_MASQUERADE.o
 obj-$(CONFIG_IP_NF_TARGET_REDIRECT) += ipt_REDIRECT.o
 obj-$(CONFIG_IP_NF_TARGET_NETMAP) += ipt_NETMAP.o
 obj-$(CONFIG_IP_NF_TARGET_SAME) += ipt_SAME.o
-obj-$(CONFIG_IP_NF_NAT_SNMP_BASIC) += ip_nat_snmp_basic.o
 obj-$(CONFIG_IP_NF_TARGET_LOG) += ipt_LOG.o
 obj-$(CONFIG_IP_NF_TARGET_ULOG) += ipt_ULOG.o
 obj-$(CONFIG_IP_NF_TARGET_CLUSTERIP) += ipt_CLUSTERIP.o
index 57b0221..cae4121 100644 (file)
@@ -245,7 +245,7 @@ unsigned int arpt_do_table(struct sk_buff **pskb,
        e = get_entry(table_base, private->hook_entry[hook]);
        back = get_entry(table_base, private->underflow[hook]);
 
-       arp = (*pskb)->nh.arph;
+       arp = arp_hdr(*pskb);
        do {
                if (arp_packet_match(arp, (*pskb)->dev, indev, outdev, &e->arp)) {
                        struct arpt_entry_target *t;
@@ -297,7 +297,7 @@ unsigned int arpt_do_table(struct sk_buff **pskb,
                                                                     t->data);
 
                                /* Target might have changed stuff. */
-                               arp = (*pskb)->nh.arph;
+                               arp = arp_hdr(*pskb);
 
                                if (verdict == ARPT_CONTINUE)
                                        e = (void *)e + e->next_offset;
index 709db4d..6298d40 100644 (file)
@@ -30,35 +30,35 @@ target(struct sk_buff **pskb,
                *pskb = nskb;
        }
 
-       arp = (*pskb)->nh.arph;
-       arpptr = (*pskb)->nh.raw + sizeof(*arp);
+       arp = arp_hdr(*pskb);
+       arpptr = skb_network_header(*pskb) + sizeof(*arp);
        pln = arp->ar_pln;
        hln = arp->ar_hln;
        /* We assume that pln and hln were checked in the match */
        if (mangle->flags & ARPT_MANGLE_SDEV) {
                if (ARPT_DEV_ADDR_LEN_MAX < hln ||
-                  (arpptr + hln > (**pskb).tail))
+                  (arpptr + hln > skb_tail_pointer(*pskb)))
                        return NF_DROP;
                memcpy(arpptr, mangle->src_devaddr, hln);
        }
        arpptr += hln;
        if (mangle->flags & ARPT_MANGLE_SIP) {
                if (ARPT_MANGLE_ADDR_LEN_MAX < pln ||
-                  (arpptr + pln > (**pskb).tail))
+                  (arpptr + pln > skb_tail_pointer(*pskb)))
                        return NF_DROP;
                memcpy(arpptr, &mangle->u_s.src_ip, pln);
        }
        arpptr += pln;
        if (mangle->flags & ARPT_MANGLE_TDEV) {
                if (ARPT_DEV_ADDR_LEN_MAX < hln ||
-                  (arpptr + hln > (**pskb).tail))
+                  (arpptr + hln > skb_tail_pointer(*pskb)))
                        return NF_DROP;
                memcpy(arpptr, mangle->tgt_devaddr, hln);
        }
        arpptr += hln;
        if (mangle->flags & ARPT_MANGLE_TIP) {
                if (ARPT_MANGLE_ADDR_LEN_MAX < pln ||
-                  (arpptr + pln > (**pskb).tail))
+                  (arpptr + pln > skb_tail_pointer(*pskb)))
                        return NF_DROP;
                memcpy(arpptr, &mangle->u_t.tgt_ip, pln);
        }
diff --git a/net/ipv4/netfilter/ip_conntrack_amanda.c b/net/ipv4/netfilter/ip_conntrack_amanda.c
deleted file mode 100644 (file)
index 4f561f5..0000000
+++ /dev/null
@@ -1,229 +0,0 @@
-/* Amanda extension for IP connection tracking, Version 0.2
- * (C) 2002 by Brian J. Murrell <netfilter@interlinx.bc.ca>
- * based on HW's ip_conntrack_irc.c as well as other modules
- *
- *      This program is free software; you can redistribute it and/or
- *      modify it under the terms of the GNU General Public License
- *      as published by the Free Software Foundation; either version
- *      2 of the License, or (at your option) any later version.
- *
- *     Module load syntax:
- *     insmod ip_conntrack_amanda.o [master_timeout=n]
- *
- *     Where master_timeout is the timeout (in seconds) of the master
- *     connection (port 10080).  This defaults to 5 minutes but if
- *     your clients take longer than 5 minutes to do their work
- *     before getting back to the Amanda server, you can increase
- *     this value.
- *
- */
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/textsearch.h>
-#include <linux/skbuff.h>
-#include <linux/in.h>
-#include <linux/ip.h>
-#include <linux/udp.h>
-
-#include <linux/netfilter.h>
-#include <linux/netfilter_ipv4/ip_conntrack_helper.h>
-#include <linux/netfilter_ipv4/ip_conntrack_amanda.h>
-
-static unsigned int master_timeout = 300;
-static char *ts_algo = "kmp";
-
-MODULE_AUTHOR("Brian J. Murrell <netfilter@interlinx.bc.ca>");
-MODULE_DESCRIPTION("Amanda connection tracking module");
-MODULE_LICENSE("GPL");
-module_param(master_timeout, uint, 0600);
-MODULE_PARM_DESC(master_timeout, "timeout for the master connection");
-module_param(ts_algo, charp, 0400);
-MODULE_PARM_DESC(ts_algo, "textsearch algorithm to use (default kmp)");
-
-unsigned int (*ip_nat_amanda_hook)(struct sk_buff **pskb,
-                                  enum ip_conntrack_info ctinfo,
-                                  unsigned int matchoff,
-                                  unsigned int matchlen,
-                                  struct ip_conntrack_expect *exp);
-EXPORT_SYMBOL_GPL(ip_nat_amanda_hook);
-
-enum amanda_strings {
-       SEARCH_CONNECT,
-       SEARCH_NEWLINE,
-       SEARCH_DATA,
-       SEARCH_MESG,
-       SEARCH_INDEX,
-};
-
-static struct {
-       char                    *string;
-       size_t                  len;
-       struct ts_config        *ts;
-} search[] = {
-       [SEARCH_CONNECT] = {
-               .string = "CONNECT ",
-               .len    = 8,
-       },
-       [SEARCH_NEWLINE] = {
-               .string = "\n",
-               .len    = 1,
-       },
-       [SEARCH_DATA] = {
-               .string = "DATA ",
-               .len    = 5,
-       },
-       [SEARCH_MESG] = {
-               .string = "MESG ",
-               .len    = 5,
-       },
-       [SEARCH_INDEX] = {
-               .string = "INDEX ",
-               .len    = 6,
-       },
-};
-
-static int help(struct sk_buff **pskb,
-               struct ip_conntrack *ct, enum ip_conntrack_info ctinfo)
-{
-       struct ts_state ts;
-       struct ip_conntrack_expect *exp;
-       unsigned int dataoff, start, stop, off, i;
-       char pbuf[sizeof("65535")], *tmp;
-       u_int16_t port, len;
-       int ret = NF_ACCEPT;
-       typeof(ip_nat_amanda_hook) ip_nat_amanda;
-
-       /* Only look at packets from the Amanda server */
-       if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL)
-               return NF_ACCEPT;
-
-       /* increase the UDP timeout of the master connection as replies from
-        * Amanda clients to the server can be quite delayed */
-       ip_ct_refresh(ct, *pskb, master_timeout * HZ);
-
-       /* No data? */
-       dataoff = (*pskb)->nh.iph->ihl*4 + sizeof(struct udphdr);
-       if (dataoff >= (*pskb)->len) {
-               if (net_ratelimit())
-                       printk("amanda_help: skblen = %u\n", (*pskb)->len);
-               return NF_ACCEPT;
-       }
-
-       memset(&ts, 0, sizeof(ts));
-       start = skb_find_text(*pskb, dataoff, (*pskb)->len,
-                             search[SEARCH_CONNECT].ts, &ts);
-       if (start == UINT_MAX)
-               goto out;
-       start += dataoff + search[SEARCH_CONNECT].len;
-
-       memset(&ts, 0, sizeof(ts));
-       stop = skb_find_text(*pskb, start, (*pskb)->len,
-                            search[SEARCH_NEWLINE].ts, &ts);
-       if (stop == UINT_MAX)
-               goto out;
-       stop += start;
-
-       for (i = SEARCH_DATA; i <= SEARCH_INDEX; i++) {
-               memset(&ts, 0, sizeof(ts));
-               off = skb_find_text(*pskb, start, stop, search[i].ts, &ts);
-               if (off == UINT_MAX)
-                       continue;
-               off += start + search[i].len;
-
-               len = min_t(unsigned int, sizeof(pbuf) - 1, stop - off);
-               if (skb_copy_bits(*pskb, off, pbuf, len))
-                       break;
-               pbuf[len] = '\0';
-
-               port = simple_strtoul(pbuf, &tmp, 10);
-               len = tmp - pbuf;
-               if (port == 0 || len > 5)
-                       break;
-
-               exp = ip_conntrack_expect_alloc(ct);
-               if (exp == NULL) {
-                       ret = NF_DROP;
-                       goto out;
-               }
-
-               exp->expectfn = NULL;
-               exp->flags = 0;
-
-               exp->tuple.src.ip = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.ip;
-               exp->tuple.src.u.tcp.port = 0;
-               exp->tuple.dst.ip = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.ip;
-               exp->tuple.dst.protonum = IPPROTO_TCP;
-               exp->tuple.dst.u.tcp.port = htons(port);
-
-               exp->mask.src.ip = htonl(0xFFFFFFFF);
-               exp->mask.src.u.tcp.port = 0;
-               exp->mask.dst.ip = htonl(0xFFFFFFFF);
-               exp->mask.dst.protonum = 0xFF;
-               exp->mask.dst.u.tcp.port = htons(0xFFFF);
-
-               /* RCU read locked by nf_hook_slow */
-               ip_nat_amanda = rcu_dereference(ip_nat_amanda_hook);
-               if (ip_nat_amanda)
-                       ret = ip_nat_amanda(pskb, ctinfo, off - dataoff,
-                                           len, exp);
-               else if (ip_conntrack_expect_related(exp) != 0)
-                       ret = NF_DROP;
-               ip_conntrack_expect_put(exp);
-       }
-
-out:
-       return ret;
-}
-
-static struct ip_conntrack_helper amanda_helper = {
-       .max_expected = 3,
-       .timeout = 180,
-       .me = THIS_MODULE,
-       .help = help,
-       .name = "amanda",
-
-       .tuple = { .src = { .u = { .udp = {.port = __constant_htons(10080) } } },
-                  .dst = { .protonum = IPPROTO_UDP },
-       },
-       .mask = { .src = { .u = { 0xFFFF } },
-                .dst = { .protonum = 0xFF },
-       },
-};
-
-static void __exit ip_conntrack_amanda_fini(void)
-{
-       int i;
-
-       ip_conntrack_helper_unregister(&amanda_helper);
-       for (i = 0; i < ARRAY_SIZE(search); i++)
-               textsearch_destroy(search[i].ts);
-}
-
-static int __init ip_conntrack_amanda_init(void)
-{
-       int ret, i;
-
-       ret = -ENOMEM;
-       for (i = 0; i < ARRAY_SIZE(search); i++) {
-               search[i].ts = textsearch_prepare(ts_algo, search[i].string,
-                                                 search[i].len,
-                                                 GFP_KERNEL, TS_AUTOLOAD);
-               if (search[i].ts == NULL)
-                       goto err;
-       }
-       ret = ip_conntrack_helper_register(&amanda_helper);
-       if (ret < 0)
-               goto err;
-       return 0;
-
-err:
-       for (; i >= 0; i--) {
-               if (search[i].ts)
-                       textsearch_destroy(search[i].ts);
-       }
-       return ret;
-}
-
-module_init(ip_conntrack_amanda_init);
-module_exit(ip_conntrack_amanda_fini);
diff --git a/net/ipv4/netfilter/ip_conntrack_core.c b/net/ipv4/netfilter/ip_conntrack_core.c
deleted file mode 100644 (file)
index 23b99ae..0000000
+++ /dev/null
@@ -1,1550 +0,0 @@
-/* Connection state tracking for netfilter.  This is separated from,
-   but required by, the NAT layer; it can also be used by an iptables
-   extension. */
-
-/* (C) 1999-2001 Paul `Rusty' Russell
- * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * 23 Apr 2001: Harald Welte <laforge@gnumonks.org>
- *     - new API and handling of conntrack/nat helpers
- *     - now capable of multiple expectations for one master
- * 16 Jul 2002: Harald Welte <laforge@gnumonks.org>
- *     - add usage/reference counts to ip_conntrack_expect
- *     - export ip_conntrack[_expect]_{find_get,put} functions
- * */
-
-#include <linux/types.h>
-#include <linux/icmp.h>
-#include <linux/ip.h>
-#include <linux/netfilter.h>
-#include <linux/netfilter_ipv4.h>
-#include <linux/module.h>
-#include <linux/skbuff.h>
-#include <linux/proc_fs.h>
-#include <linux/vmalloc.h>
-#include <net/checksum.h>
-#include <net/ip.h>
-#include <linux/stddef.h>
-#include <linux/sysctl.h>
-#include <linux/slab.h>
-#include <linux/random.h>
-#include <linux/jhash.h>
-#include <linux/err.h>
-#include <linux/percpu.h>
-#include <linux/moduleparam.h>
-#include <linux/notifier.h>
-
-/* ip_conntrack_lock protects the main hash table, protocol/helper/expected
-   registrations, conntrack timers*/
-#include <linux/netfilter_ipv4/ip_conntrack.h>
-#include <linux/netfilter_ipv4/ip_conntrack_protocol.h>
-#include <linux/netfilter_ipv4/ip_conntrack_helper.h>
-#include <linux/netfilter_ipv4/ip_conntrack_core.h>
-
-#define IP_CONNTRACK_VERSION   "2.4"
-
-#if 0
-#define DEBUGP printk
-#else
-#define DEBUGP(format, args...)
-#endif
-
-DEFINE_RWLOCK(ip_conntrack_lock);
-
-/* ip_conntrack_standalone needs this */
-atomic_t ip_conntrack_count = ATOMIC_INIT(0);
-
-void (*ip_conntrack_destroyed)(struct ip_conntrack *conntrack) = NULL;
-LIST_HEAD(ip_conntrack_expect_list);
-struct ip_conntrack_protocol *ip_ct_protos[MAX_IP_CT_PROTO] __read_mostly;
-static LIST_HEAD(helpers);
-unsigned int ip_conntrack_htable_size __read_mostly = 0;
-int ip_conntrack_max __read_mostly;
-struct list_head *ip_conntrack_hash __read_mostly;
-static struct kmem_cache *ip_conntrack_cachep __read_mostly;
-static struct kmem_cache *ip_conntrack_expect_cachep __read_mostly;
-struct ip_conntrack ip_conntrack_untracked;
-unsigned int ip_ct_log_invalid __read_mostly;
-static LIST_HEAD(unconfirmed);
-static int ip_conntrack_vmalloc __read_mostly;
-
-static unsigned int ip_conntrack_next_id;
-static unsigned int ip_conntrack_expect_next_id;
-#ifdef CONFIG_IP_NF_CONNTRACK_EVENTS
-ATOMIC_NOTIFIER_HEAD(ip_conntrack_chain);
-ATOMIC_NOTIFIER_HEAD(ip_conntrack_expect_chain);
-
-DEFINE_PER_CPU(struct ip_conntrack_ecache, ip_conntrack_ecache);
-
-/* deliver cached events and clear cache entry - must be called with locally
- * disabled softirqs */
-static inline void
-__ip_ct_deliver_cached_events(struct ip_conntrack_ecache *ecache)
-{
-       DEBUGP("ecache: delivering events for %p\n", ecache->ct);
-       if (is_confirmed(ecache->ct) && !is_dying(ecache->ct) && ecache->events)
-               atomic_notifier_call_chain(&ip_conntrack_chain, ecache->events,
-                                   ecache->ct);
-       ecache->events = 0;
-       ip_conntrack_put(ecache->ct);
-       ecache->ct = NULL;
-}
-
-/* Deliver all cached events for a particular conntrack. This is called
- * by code prior to async packet handling or freeing the skb */
-void ip_ct_deliver_cached_events(const struct ip_conntrack *ct)
-{
-       struct ip_conntrack_ecache *ecache;
-
-       local_bh_disable();
-       ecache = &__get_cpu_var(ip_conntrack_ecache);
-       if (ecache->ct == ct)
-               __ip_ct_deliver_cached_events(ecache);
-       local_bh_enable();
-}
-
-void __ip_ct_event_cache_init(struct ip_conntrack *ct)
-{
-       struct ip_conntrack_ecache *ecache;
-
-       /* take care of delivering potentially old events */
-       ecache = &__get_cpu_var(ip_conntrack_ecache);
-       BUG_ON(ecache->ct == ct);
-       if (ecache->ct)
-               __ip_ct_deliver_cached_events(ecache);
-       /* initialize for this conntrack/packet */
-       ecache->ct = ct;
-       nf_conntrack_get(&ct->ct_general);
-}
-
-/* flush the event cache - touches other CPU's data and must not be called while
- * packets are still passing through the code */
-static void ip_ct_event_cache_flush(void)
-{
-       struct ip_conntrack_ecache *ecache;
-       int cpu;
-
-       for_each_possible_cpu(cpu) {
-               ecache = &per_cpu(ip_conntrack_ecache, cpu);
-               if (ecache->ct)
-                       ip_conntrack_put(ecache->ct);
-       }
-}
-#else
-static inline void ip_ct_event_cache_flush(void) {}
-#endif /* CONFIG_IP_NF_CONNTRACK_EVENTS */
-
-DEFINE_PER_CPU(struct ip_conntrack_stat, ip_conntrack_stat);
-
-static int ip_conntrack_hash_rnd_initted;
-static unsigned int ip_conntrack_hash_rnd;
-
-static u_int32_t __hash_conntrack(const struct ip_conntrack_tuple *tuple,
-                           unsigned int size, unsigned int rnd)
-{
-       return (jhash_3words((__force u32)tuple->src.ip,
-                            ((__force u32)tuple->dst.ip ^ tuple->dst.protonum),
-                            (tuple->src.u.all | (tuple->dst.u.all << 16)),
-                            rnd) % size);
-}
-
-static u_int32_t
-hash_conntrack(const struct ip_conntrack_tuple *tuple)
-{
-       return __hash_conntrack(tuple, ip_conntrack_htable_size,
-                               ip_conntrack_hash_rnd);
-}
-
-int
-ip_ct_get_tuple(const struct iphdr *iph,
-               const struct sk_buff *skb,
-               unsigned int dataoff,
-               struct ip_conntrack_tuple *tuple,
-               const struct ip_conntrack_protocol *protocol)
-{
-       /* Never happen */
-       if (iph->frag_off & htons(IP_OFFSET)) {
-               printk("ip_conntrack_core: Frag of proto %u.\n",
-                      iph->protocol);
-               return 0;
-       }
-
-       tuple->src.ip = iph->saddr;
-       tuple->dst.ip = iph->daddr;
-       tuple->dst.protonum = iph->protocol;
-       tuple->dst.dir = IP_CT_DIR_ORIGINAL;
-
-       return protocol->pkt_to_tuple(skb, dataoff, tuple);
-}
-
-int
-ip_ct_invert_tuple(struct ip_conntrack_tuple *inverse,
-                  const struct ip_conntrack_tuple *orig,
-                  const struct ip_conntrack_protocol *protocol)
-{
-       inverse->src.ip = orig->dst.ip;
-       inverse->dst.ip = orig->src.ip;
-       inverse->dst.protonum = orig->dst.protonum;
-       inverse->dst.dir = !orig->dst.dir;
-
-       return protocol->invert_tuple(inverse, orig);
-}
-
-
-/* ip_conntrack_expect helper functions */
-void ip_ct_unlink_expect(struct ip_conntrack_expect *exp)
-{
-       IP_NF_ASSERT(!timer_pending(&exp->timeout));
-       list_del(&exp->list);
-       CONNTRACK_STAT_INC(expect_delete);
-       exp->master->expecting--;
-       ip_conntrack_expect_put(exp);
-}
-
-static void expectation_timed_out(unsigned long ul_expect)
-{
-       struct ip_conntrack_expect *exp = (void *)ul_expect;
-
-       write_lock_bh(&ip_conntrack_lock);
-       ip_ct_unlink_expect(exp);
-       write_unlock_bh(&ip_conntrack_lock);
-       ip_conntrack_expect_put(exp);
-}
-
-struct ip_conntrack_expect *
-__ip_conntrack_expect_find(const struct ip_conntrack_tuple *tuple)
-{
-       struct ip_conntrack_expect *i;
-
-       list_for_each_entry(i, &ip_conntrack_expect_list, list) {
-               if (ip_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask))
-                       return i;
-       }
-       return NULL;
-}
-
-/* Just find a expectation corresponding to a tuple. */
-struct ip_conntrack_expect *
-ip_conntrack_expect_find_get(const struct ip_conntrack_tuple *tuple)
-{
-       struct ip_conntrack_expect *i;
-
-       read_lock_bh(&ip_conntrack_lock);
-       i = __ip_conntrack_expect_find(tuple);
-       if (i)
-               atomic_inc(&i->use);
-       read_unlock_bh(&ip_conntrack_lock);
-
-       return i;
-}
-
-/* If an expectation for this connection is found, it gets delete from
- * global list then returned. */
-static struct ip_conntrack_expect *
-find_expectation(const struct ip_conntrack_tuple *tuple)
-{
-       struct ip_conntrack_expect *i;
-
-       list_for_each_entry(i, &ip_conntrack_expect_list, list) {
-               /* If master is not in hash table yet (ie. packet hasn't left
-                  this machine yet), how can other end know about expected?
-                  Hence these are not the droids you are looking for (if
-                  master ct never got confirmed, we'd hold a reference to it
-                  and weird things would happen to future packets). */
-               if (ip_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask)
-                   && is_confirmed(i->master)) {
-                       if (i->flags & IP_CT_EXPECT_PERMANENT) {
-                               atomic_inc(&i->use);
-                               return i;
-                       } else if (del_timer(&i->timeout)) {
-                               ip_ct_unlink_expect(i);
-                               return i;
-                       }
-               }
-       }
-       return NULL;
-}
-
-/* delete all expectations for this conntrack */
-void ip_ct_remove_expectations(struct ip_conntrack *ct)
-{
-       struct ip_conntrack_expect *i, *tmp;
-
-       /* Optimization: most connection never expect any others. */
-       if (ct->expecting == 0)
-               return;
-
-       list_for_each_entry_safe(i, tmp, &ip_conntrack_expect_list, list) {
-               if (i->master == ct && del_timer(&i->timeout)) {
-                       ip_ct_unlink_expect(i);
-                       ip_conntrack_expect_put(i);
-               }
-       }
-}
-
-static void
-clean_from_lists(struct ip_conntrack *ct)
-{
-       DEBUGP("clean_from_lists(%p)\n", ct);
-       list_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].list);
-       list_del(&ct->tuplehash[IP_CT_DIR_REPLY].list);
-
-       /* Destroy all pending expectations */
-       ip_ct_remove_expectations(ct);
-}
-
-static void
-destroy_conntrack(struct nf_conntrack *nfct)
-{
-       struct ip_conntrack *ct = (struct ip_conntrack *)nfct;
-       struct ip_conntrack_protocol *proto;
-       struct ip_conntrack_helper *helper;
-       typeof(ip_conntrack_destroyed) destroyed;
-
-       DEBUGP("destroy_conntrack(%p)\n", ct);
-       IP_NF_ASSERT(atomic_read(&nfct->use) == 0);
-       IP_NF_ASSERT(!timer_pending(&ct->timeout));
-
-       ip_conntrack_event(IPCT_DESTROY, ct);
-       set_bit(IPS_DYING_BIT, &ct->status);
-
-       helper = ct->helper;
-       if (helper && helper->destroy)
-               helper->destroy(ct);
-
-       /* To make sure we don't get any weird locking issues here:
-        * destroy_conntrack() MUST NOT be called with a write lock
-        * to ip_conntrack_lock!!! -HW */
-       rcu_read_lock();
-       proto = __ip_conntrack_proto_find(ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.protonum);
-       if (proto && proto->destroy)
-               proto->destroy(ct);
-
-       destroyed = rcu_dereference(ip_conntrack_destroyed);
-       if (destroyed)
-               destroyed(ct);
-
-       rcu_read_unlock();
-
-       write_lock_bh(&ip_conntrack_lock);
-       /* Expectations will have been removed in clean_from_lists,
-        * except TFTP can create an expectation on the first packet,
-        * before connection is in the list, so we need to clean here,
-        * too. */
-       ip_ct_remove_expectations(ct);
-
-       /* We overload first tuple to link into unconfirmed list. */
-       if (!is_confirmed(ct)) {
-               BUG_ON(list_empty(&ct->tuplehash[IP_CT_DIR_ORIGINAL].list));
-               list_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].list);
-       }
-
-       CONNTRACK_STAT_INC(delete);
-       write_unlock_bh(&ip_conntrack_lock);
-
-       if (ct->master)
-               ip_conntrack_put(ct->master);
-
-       DEBUGP("destroy_conntrack: returning ct=%p to slab\n", ct);
-       ip_conntrack_free(ct);
-}
-
-static void death_by_timeout(unsigned long ul_conntrack)
-{
-       struct ip_conntrack *ct = (void *)ul_conntrack;
-
-       write_lock_bh(&ip_conntrack_lock);
-       /* Inside lock so preempt is disabled on module removal path.
-        * Otherwise we can get spurious warnings. */
-       CONNTRACK_STAT_INC(delete_list);
-       clean_from_lists(ct);
-       write_unlock_bh(&ip_conntrack_lock);
-       ip_conntrack_put(ct);
-}
-
-struct ip_conntrack_tuple_hash *
-__ip_conntrack_find(const struct ip_conntrack_tuple *tuple,
-                   const struct ip_conntrack *ignored_conntrack)
-{
-       struct ip_conntrack_tuple_hash *h;
-       unsigned int hash = hash_conntrack(tuple);
-
-       list_for_each_entry(h, &ip_conntrack_hash[hash], list) {
-               if (tuplehash_to_ctrack(h) != ignored_conntrack &&
-                   ip_ct_tuple_equal(tuple, &h->tuple)) {
-                       CONNTRACK_STAT_INC(found);
-                       return h;
-               }
-               CONNTRACK_STAT_INC(searched);
-       }
-
-       return NULL;
-}
-
-/* Find a connection corresponding to a tuple. */
-struct ip_conntrack_tuple_hash *
-ip_conntrack_find_get(const struct ip_conntrack_tuple *tuple,
-                     const struct ip_conntrack *ignored_conntrack)
-{
-       struct ip_conntrack_tuple_hash *h;
-
-       read_lock_bh(&ip_conntrack_lock);
-       h = __ip_conntrack_find(tuple, ignored_conntrack);
-       if (h)
-               atomic_inc(&tuplehash_to_ctrack(h)->ct_general.use);
-       read_unlock_bh(&ip_conntrack_lock);
-
-       return h;
-}
-
-static void __ip_conntrack_hash_insert(struct ip_conntrack *ct,
-                                       unsigned int hash,
-                                       unsigned int repl_hash)
-{
-       ct->id = ++ip_conntrack_next_id;
-       list_add(&ct->tuplehash[IP_CT_DIR_ORIGINAL].list,
-                &ip_conntrack_hash[hash]);
-       list_add(&ct->tuplehash[IP_CT_DIR_REPLY].list,
-                &ip_conntrack_hash[repl_hash]);
-}
-
-void ip_conntrack_hash_insert(struct ip_conntrack *ct)
-{
-       unsigned int hash, repl_hash;
-
-       hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
-       repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
-
-       write_lock_bh(&ip_conntrack_lock);
-       __ip_conntrack_hash_insert(ct, hash, repl_hash);
-       write_unlock_bh(&ip_conntrack_lock);
-}
-
-/* Confirm a connection given skb; places it in hash table */
-int
-__ip_conntrack_confirm(struct sk_buff **pskb)
-{
-       unsigned int hash, repl_hash;
-       struct ip_conntrack_tuple_hash *h;
-       struct ip_conntrack *ct;
-       enum ip_conntrack_info ctinfo;
-
-       ct = ip_conntrack_get(*pskb, &ctinfo);
-
-       /* ipt_REJECT uses ip_conntrack_attach to attach related
-          ICMP/TCP RST packets in other direction.  Actual packet
-          which created connection will be IP_CT_NEW or for an
-          expected connection, IP_CT_RELATED. */
-       if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
-               return NF_ACCEPT;
-
-       hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
-       repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
-
-       /* We're not in hash table, and we refuse to set up related
-          connections for unconfirmed conns.  But packet copies and
-          REJECT will give spurious warnings here. */
-       /* IP_NF_ASSERT(atomic_read(&ct->ct_general.use) == 1); */
-
-       /* No external references means noone else could have
-          confirmed us. */
-       IP_NF_ASSERT(!is_confirmed(ct));
-       DEBUGP("Confirming conntrack %p\n", ct);
-
-       write_lock_bh(&ip_conntrack_lock);
-
-       /* See if there's one in the list already, including reverse:
-          NAT could have grabbed it without realizing, since we're
-          not in the hash.  If there is, we lost race. */
-       list_for_each_entry(h, &ip_conntrack_hash[hash], list)
-               if (ip_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
-                                     &h->tuple))
-                       goto out;
-       list_for_each_entry(h, &ip_conntrack_hash[repl_hash], list)
-               if (ip_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
-                                     &h->tuple))
-                       goto out;
-
-       /* Remove from unconfirmed list */
-       list_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].list);
-
-       __ip_conntrack_hash_insert(ct, hash, repl_hash);
-       /* Timer relative to confirmation time, not original
-          setting time, otherwise we'd get timer wrap in
-          weird delay cases. */
-       ct->timeout.expires += jiffies;
-       add_timer(&ct->timeout);
-       atomic_inc(&ct->ct_general.use);
-       set_bit(IPS_CONFIRMED_BIT, &ct->status);
-       CONNTRACK_STAT_INC(insert);
-       write_unlock_bh(&ip_conntrack_lock);
-       if (ct->helper)
-               ip_conntrack_event_cache(IPCT_HELPER, *pskb);
-#ifdef CONFIG_IP_NF_NAT_NEEDED
-       if (test_bit(IPS_SRC_NAT_DONE_BIT, &ct->status) ||
-           test_bit(IPS_DST_NAT_DONE_BIT, &ct->status))
-               ip_conntrack_event_cache(IPCT_NATINFO, *pskb);
-#endif
-       ip_conntrack_event_cache(master_ct(ct) ?
-                                IPCT_RELATED : IPCT_NEW, *pskb);
-
-       return NF_ACCEPT;
-
-out:
-       CONNTRACK_STAT_INC(insert_failed);
-       write_unlock_bh(&ip_conntrack_lock);
-       return NF_DROP;
-}
-
-/* Returns true if a connection correspondings to the tuple (required
-   for NAT). */
-int
-ip_conntrack_tuple_taken(const struct ip_conntrack_tuple *tuple,
-                        const struct ip_conntrack *ignored_conntrack)
-{
-       struct ip_conntrack_tuple_hash *h;
-
-       read_lock_bh(&ip_conntrack_lock);
-       h = __ip_conntrack_find(tuple, ignored_conntrack);
-       read_unlock_bh(&ip_conntrack_lock);
-
-       return h != NULL;
-}
-
-/* There's a small race here where we may free a just-assured
-   connection.  Too bad: we're in trouble anyway. */
-static int early_drop(struct list_head *chain)
-{
-       /* Traverse backwards: gives us oldest, which is roughly LRU */
-       struct ip_conntrack_tuple_hash *h;
-       struct ip_conntrack *ct = NULL, *tmp;
-       int dropped = 0;
-
-       read_lock_bh(&ip_conntrack_lock);
-       list_for_each_entry_reverse(h, chain, list) {
-               tmp = tuplehash_to_ctrack(h);
-               if (!test_bit(IPS_ASSURED_BIT, &tmp->status)) {
-                       ct = tmp;
-                       atomic_inc(&ct->ct_general.use);
-                       break;
-               }
-       }
-       read_unlock_bh(&ip_conntrack_lock);
-
-       if (!ct)
-               return dropped;
-
-       if (del_timer(&ct->timeout)) {
-               death_by_timeout((unsigned long)ct);
-               dropped = 1;
-               CONNTRACK_STAT_INC_ATOMIC(early_drop);
-       }
-       ip_conntrack_put(ct);
-       return dropped;
-}
-
-static struct ip_conntrack_helper *
-__ip_conntrack_helper_find( const struct ip_conntrack_tuple *tuple)
-{
-       struct ip_conntrack_helper *h;
-
-       list_for_each_entry(h, &helpers, list) {
-               if (ip_ct_tuple_mask_cmp(tuple, &h->tuple, &h->mask))
-                       return h;
-       }
-       return NULL;
-}
-
-struct ip_conntrack_helper *
-ip_conntrack_helper_find_get( const struct ip_conntrack_tuple *tuple)
-{
-       struct ip_conntrack_helper *helper;
-
-       /* need ip_conntrack_lock to assure that helper exists until
-        * try_module_get() is called */
-       read_lock_bh(&ip_conntrack_lock);
-
-       helper = __ip_conntrack_helper_find(tuple);
-       if (helper) {
-               /* need to increase module usage count to assure helper will
-                * not go away while the caller is e.g. busy putting a
-                * conntrack in the hash that uses the helper */
-               if (!try_module_get(helper->me))
-                       helper = NULL;
-       }
-
-       read_unlock_bh(&ip_conntrack_lock);
-
-       return helper;
-}
-
-void ip_conntrack_helper_put(struct ip_conntrack_helper *helper)
-{
-       module_put(helper->me);
-}
-
-struct ip_conntrack_protocol *
-__ip_conntrack_proto_find(u_int8_t protocol)
-{
-       return ip_ct_protos[protocol];
-}
-
-/* this is guaranteed to always return a valid protocol helper, since
- * it falls back to generic_protocol */
-struct ip_conntrack_protocol *
-ip_conntrack_proto_find_get(u_int8_t protocol)
-{
-       struct ip_conntrack_protocol *p;
-
-       rcu_read_lock();
-       p = __ip_conntrack_proto_find(protocol);
-       if (p) {
-               if (!try_module_get(p->me))
-                       p = &ip_conntrack_generic_protocol;
-       }
-       rcu_read_unlock();
-
-       return p;
-}
-
-void ip_conntrack_proto_put(struct ip_conntrack_protocol *p)
-{
-       module_put(p->me);
-}
-
-struct ip_conntrack *ip_conntrack_alloc(struct ip_conntrack_tuple *orig,
-                                       struct ip_conntrack_tuple *repl)
-{
-       struct ip_conntrack *conntrack;
-
-       if (!ip_conntrack_hash_rnd_initted) {
-               get_random_bytes(&ip_conntrack_hash_rnd, 4);
-               ip_conntrack_hash_rnd_initted = 1;
-       }
-
-       /* We don't want any race condition at early drop stage */
-       atomic_inc(&ip_conntrack_count);
-
-       if (ip_conntrack_max
-           && atomic_read(&ip_conntrack_count) > ip_conntrack_max) {
-               unsigned int hash = hash_conntrack(orig);
-               /* Try dropping from this hash chain. */
-               if (!early_drop(&ip_conntrack_hash[hash])) {
-                       atomic_dec(&ip_conntrack_count);
-                       if (net_ratelimit())
-                               printk(KERN_WARNING
-                                      "ip_conntrack: table full, dropping"
-                                      " packet.\n");
-                       return ERR_PTR(-ENOMEM);
-               }
-       }
-
-       conntrack = kmem_cache_zalloc(ip_conntrack_cachep, GFP_ATOMIC);
-       if (!conntrack) {
-               DEBUGP("Can't allocate conntrack.\n");
-               atomic_dec(&ip_conntrack_count);
-               return ERR_PTR(-ENOMEM);
-       }
-
-       atomic_set(&conntrack->ct_general.use, 1);
-       conntrack->ct_general.destroy = destroy_conntrack;
-       conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;
-       conntrack->tuplehash[IP_CT_DIR_REPLY].tuple = *repl;
-       /* Don't set timer yet: wait for confirmation */
-       init_timer(&conntrack->timeout);
-       conntrack->timeout.data = (unsigned long)conntrack;
-       conntrack->timeout.function = death_by_timeout;
-
-       return conntrack;
-}
-
-void
-ip_conntrack_free(struct ip_conntrack *conntrack)
-{
-       atomic_dec(&ip_conntrack_count);
-       kmem_cache_free(ip_conntrack_cachep, conntrack);
-}
-
-/* Allocate a new conntrack: we return -ENOMEM if classification
- * failed due to stress.   Otherwise it really is unclassifiable */
-static struct ip_conntrack_tuple_hash *
-init_conntrack(struct ip_conntrack_tuple *tuple,
-              struct ip_conntrack_protocol *protocol,
-              struct sk_buff *skb)
-{
-       struct ip_conntrack *conntrack;
-       struct ip_conntrack_tuple repl_tuple;
-       struct ip_conntrack_expect *exp;
-
-       if (!ip_ct_invert_tuple(&repl_tuple, tuple, protocol)) {
-               DEBUGP("Can't invert tuple.\n");
-               return NULL;
-       }
-
-       conntrack = ip_conntrack_alloc(tuple, &repl_tuple);
-       if (conntrack == NULL || IS_ERR(conntrack))
-               return (struct ip_conntrack_tuple_hash *)conntrack;
-
-       if (!protocol->new(conntrack, skb)) {
-               ip_conntrack_free(conntrack);
-               return NULL;
-       }
-
-       write_lock_bh(&ip_conntrack_lock);
-       exp = find_expectation(tuple);
-
-       if (exp) {
-               DEBUGP("conntrack: expectation arrives ct=%p exp=%p\n",
-                       conntrack, exp);
-               /* Welcome, Mr. Bond.  We've been expecting you... */
-               __set_bit(IPS_EXPECTED_BIT, &conntrack->status);
-               conntrack->master = exp->master;
-#ifdef CONFIG_IP_NF_CONNTRACK_MARK
-               conntrack->mark = exp->master->mark;
-#endif
-#if defined(CONFIG_IP_NF_TARGET_MASQUERADE) || \
-    defined(CONFIG_IP_NF_TARGET_MASQUERADE_MODULE)
-               /* this is ugly, but there is no other place where to put it */
-               conntrack->nat.masq_index = exp->master->nat.masq_index;
-#endif
-#ifdef CONFIG_IP_NF_CONNTRACK_SECMARK
-               conntrack->secmark = exp->master->secmark;
-#endif
-               nf_conntrack_get(&conntrack->master->ct_general);
-               CONNTRACK_STAT_INC(expect_new);
-       } else {
-               conntrack->helper = __ip_conntrack_helper_find(&repl_tuple);
-
-               CONNTRACK_STAT_INC(new);
-       }
-
-       /* Overload tuple linked list to put us in unconfirmed list. */
-       list_add(&conntrack->tuplehash[IP_CT_DIR_ORIGINAL].list, &unconfirmed);
-
-       write_unlock_bh(&ip_conntrack_lock);
-
-       if (exp) {
-               if (exp->expectfn)
-                       exp->expectfn(conntrack, exp);
-               ip_conntrack_expect_put(exp);
-       }
-
-       return &conntrack->tuplehash[IP_CT_DIR_ORIGINAL];
-}
-
-/* On success, returns conntrack ptr, sets skb->nfct and ctinfo */
-static inline struct ip_conntrack *
-resolve_normal_ct(struct sk_buff *skb,
-                 struct ip_conntrack_protocol *proto,
-                 int *set_reply,
-                 unsigned int hooknum,
-                 enum ip_conntrack_info *ctinfo)
-{
-       struct ip_conntrack_tuple tuple;
-       struct ip_conntrack_tuple_hash *h;
-       struct ip_conntrack *ct;
-
-       IP_NF_ASSERT((skb->nh.iph->frag_off & htons(IP_OFFSET)) == 0);
-
-       if (!ip_ct_get_tuple(skb->nh.iph, skb, skb->nh.iph->ihl*4,
-                               &tuple,proto))
-               return NULL;
-
-       /* look for tuple match */
-       h = ip_conntrack_find_get(&tuple, NULL);
-       if (!h) {
-               h = init_conntrack(&tuple, proto, skb);
-               if (!h)
-                       return NULL;
-               if (IS_ERR(h))
-                       return (void *)h;
-       }
-       ct = tuplehash_to_ctrack(h);
-
-       /* It exists; we have (non-exclusive) reference. */
-       if (DIRECTION(h) == IP_CT_DIR_REPLY) {
-               *ctinfo = IP_CT_ESTABLISHED + IP_CT_IS_REPLY;
-               /* Please set reply bit if this packet OK */
-               *set_reply = 1;
-       } else {
-               /* Once we've had two way comms, always ESTABLISHED. */
-               if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
-                       DEBUGP("ip_conntrack_in: normal packet for %p\n",
-                              ct);
-                       *ctinfo = IP_CT_ESTABLISHED;
-               } else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) {
-                       DEBUGP("ip_conntrack_in: related packet for %p\n",
-                              ct);
-                       *ctinfo = IP_CT_RELATED;
-               } else {
-                       DEBUGP("ip_conntrack_in: new packet for %p\n",
-                              ct);
-                       *ctinfo = IP_CT_NEW;
-               }
-               *set_reply = 0;
-       }
-       skb->nfct = &ct->ct_general;
-       skb->nfctinfo = *ctinfo;
-       return ct;
-}
-
-/* Netfilter hook itself. */
-unsigned int ip_conntrack_in(unsigned int hooknum,
-                            struct sk_buff **pskb,
-                            const struct net_device *in,
-                            const struct net_device *out,
-                            int (*okfn)(struct sk_buff *))
-{
-       struct ip_conntrack *ct;
-       enum ip_conntrack_info ctinfo;
-       struct ip_conntrack_protocol *proto;
-       int set_reply = 0;
-       int ret;
-
-       /* Previously seen (loopback or untracked)?  Ignore. */
-       if ((*pskb)->nfct) {
-               CONNTRACK_STAT_INC_ATOMIC(ignore);
-               return NF_ACCEPT;
-       }
-
-       /* Never happen */
-       if ((*pskb)->nh.iph->frag_off & htons(IP_OFFSET)) {
-               if (net_ratelimit()) {
-               printk(KERN_ERR "ip_conntrack_in: Frag of proto %u (hook=%u)\n",
-                      (*pskb)->nh.iph->protocol, hooknum);
-               }
-               return NF_DROP;
-       }
-
-/* Doesn't cover locally-generated broadcast, so not worth it. */
-#if 0
-       /* Ignore broadcast: no `connection'. */
-       if ((*pskb)->pkt_type == PACKET_BROADCAST) {
-               printk("Broadcast packet!\n");
-               return NF_ACCEPT;
-       } else if (((*pskb)->nh.iph->daddr & htonl(0x000000FF))
-                  == htonl(0x000000FF)) {
-               printk("Should bcast: %u.%u.%u.%u->%u.%u.%u.%u (sk=%p, ptype=%u)\n",
-                      NIPQUAD((*pskb)->nh.iph->saddr),
-                      NIPQUAD((*pskb)->nh.iph->daddr),
-                      (*pskb)->sk, (*pskb)->pkt_type);
-       }
-#endif
-
-       /* rcu_read_lock()ed by nf_hook_slow */
-       proto = __ip_conntrack_proto_find((*pskb)->nh.iph->protocol);
-
-       /* It may be an special packet, error, unclean...
-        * inverse of the return code tells to the netfilter
-        * core what to do with the packet. */
-       if (proto->error != NULL
-           && (ret = proto->error(*pskb, &ctinfo, hooknum)) <= 0) {
-               CONNTRACK_STAT_INC_ATOMIC(error);
-               CONNTRACK_STAT_INC_ATOMIC(invalid);
-               return -ret;
-       }
-
-       if (!(ct = resolve_normal_ct(*pskb, proto,&set_reply,hooknum,&ctinfo))) {
-               /* Not valid part of a connection */
-               CONNTRACK_STAT_INC_ATOMIC(invalid);
-               return NF_ACCEPT;
-       }
-
-       if (IS_ERR(ct)) {
-               /* Too stressed to deal. */
-               CONNTRACK_STAT_INC_ATOMIC(drop);
-               return NF_DROP;
-       }
-
-       IP_NF_ASSERT((*pskb)->nfct);
-
-       ret = proto->packet(ct, *pskb, ctinfo);
-       if (ret < 0) {
-               /* Invalid: inverse of the return code tells
-                * the netfilter core what to do*/
-               nf_conntrack_put((*pskb)->nfct);
-               (*pskb)->nfct = NULL;
-               CONNTRACK_STAT_INC_ATOMIC(invalid);
-               return -ret;
-       }
-
-       if (set_reply && !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status))
-               ip_conntrack_event_cache(IPCT_STATUS, *pskb);
-
-       return ret;
-}
-
-int invert_tuplepr(struct ip_conntrack_tuple *inverse,
-                  const struct ip_conntrack_tuple *orig)
-{
-       struct ip_conntrack_protocol *proto;
-       int ret;
-
-       rcu_read_lock();
-       proto = __ip_conntrack_proto_find(orig->dst.protonum);
-       ret = ip_ct_invert_tuple(inverse, orig, proto);
-       rcu_read_unlock();
-
-       return ret;
-}
-
-/* Would two expected things clash? */
-static inline int expect_clash(const struct ip_conntrack_expect *a,
-                              const struct ip_conntrack_expect *b)
-{
-       /* Part covered by intersection of masks must be unequal,
-          otherwise they clash */
-       struct ip_conntrack_tuple intersect_mask
-               = { { a->mask.src.ip & b->mask.src.ip,
-                     { a->mask.src.u.all & b->mask.src.u.all } },
-                   { a->mask.dst.ip & b->mask.dst.ip,
-                     { a->mask.dst.u.all & b->mask.dst.u.all },
-                     a->mask.dst.protonum & b->mask.dst.protonum } };
-
-       return ip_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask);
-}
-
-static inline int expect_matches(const struct ip_conntrack_expect *a,
-                                const struct ip_conntrack_expect *b)
-{
-       return a->master == b->master
-               && ip_ct_tuple_equal(&a->tuple, &b->tuple)
-               && ip_ct_tuple_equal(&a->mask, &b->mask);
-}
-
-/* Generally a bad idea to call this: could have matched already. */
-void ip_conntrack_unexpect_related(struct ip_conntrack_expect *exp)
-{
-       struct ip_conntrack_expect *i;
-
-       write_lock_bh(&ip_conntrack_lock);
-       /* choose the the oldest expectation to evict */
-       list_for_each_entry_reverse(i, &ip_conntrack_expect_list, list) {
-               if (expect_matches(i, exp) && del_timer(&i->timeout)) {
-                       ip_ct_unlink_expect(i);
-                       write_unlock_bh(&ip_conntrack_lock);
-                       ip_conntrack_expect_put(i);
-                       return;
-               }
-       }
-       write_unlock_bh(&ip_conntrack_lock);
-}
-
-/* We don't increase the master conntrack refcount for non-fulfilled
- * conntracks. During the conntrack destruction, the expectations are
- * always killed before the conntrack itself */
-struct ip_conntrack_expect *ip_conntrack_expect_alloc(struct ip_conntrack *me)
-{
-       struct ip_conntrack_expect *new;
-
-       new = kmem_cache_alloc(ip_conntrack_expect_cachep, GFP_ATOMIC);
-       if (!new) {
-               DEBUGP("expect_related: OOM allocating expect\n");
-               return NULL;
-       }
-       new->master = me;
-       atomic_set(&new->use, 1);
-       return new;
-}
-
-void ip_conntrack_expect_put(struct ip_conntrack_expect *exp)
-{
-       if (atomic_dec_and_test(&exp->use))
-               kmem_cache_free(ip_conntrack_expect_cachep, exp);
-}
-
-static void ip_conntrack_expect_insert(struct ip_conntrack_expect *exp)
-{
-       atomic_inc(&exp->use);
-       exp->master->expecting++;
-       list_add(&exp->list, &ip_conntrack_expect_list);
-
-       init_timer(&exp->timeout);
-       exp->timeout.data = (unsigned long)exp;
-       exp->timeout.function = expectation_timed_out;
-       exp->timeout.expires = jiffies + exp->master->helper->timeout * HZ;
-       add_timer(&exp->timeout);
-
-       exp->id = ++ip_conntrack_expect_next_id;
-       atomic_inc(&exp->use);
-       CONNTRACK_STAT_INC(expect_create);
-}
-
-/* Race with expectations being used means we could have none to find; OK. */
-static void evict_oldest_expect(struct ip_conntrack *master)
-{
-       struct ip_conntrack_expect *i;
-
-       list_for_each_entry_reverse(i, &ip_conntrack_expect_list, list) {
-               if (i->master == master) {
-                       if (del_timer(&i->timeout)) {
-                               ip_ct_unlink_expect(i);
-                               ip_conntrack_expect_put(i);
-                       }
-                       break;
-               }
-       }
-}
-
-static inline int refresh_timer(struct ip_conntrack_expect *i)
-{
-       if (!del_timer(&i->timeout))
-               return 0;
-
-       i->timeout.expires = jiffies + i->master->helper->timeout*HZ;
-       add_timer(&i->timeout);
-       return 1;
-}
-
-int ip_conntrack_expect_related(struct ip_conntrack_expect *expect)
-{
-       struct ip_conntrack_expect *i;
-       int ret;
-
-       DEBUGP("ip_conntrack_expect_related %p\n", related_to);
-       DEBUGP("tuple: "); DUMP_TUPLE(&expect->tuple);
-       DEBUGP("mask:  "); DUMP_TUPLE(&expect->mask);
-
-       write_lock_bh(&ip_conntrack_lock);
-       list_for_each_entry(i, &ip_conntrack_expect_list, list) {
-               if (expect_matches(i, expect)) {
-                       /* Refresh timer: if it's dying, ignore.. */
-                       if (refresh_timer(i)) {
-                               ret = 0;
-                               goto out;
-                       }
-               } else if (expect_clash(i, expect)) {
-                       ret = -EBUSY;
-                       goto out;
-               }
-       }
-
-       /* Will be over limit? */
-       if (expect->master->helper->max_expected &&
-           expect->master->expecting >= expect->master->helper->max_expected)
-               evict_oldest_expect(expect->master);
-
-       ip_conntrack_expect_insert(expect);
-       ip_conntrack_expect_event(IPEXP_NEW, expect);
-       ret = 0;
-out:
-       write_unlock_bh(&ip_conntrack_lock);
-       return ret;
-}
-
-/* Alter reply tuple (maybe alter helper).  This is for NAT, and is
-   implicitly racy: see __ip_conntrack_confirm */
-void ip_conntrack_alter_reply(struct ip_conntrack *conntrack,
-                             const struct ip_conntrack_tuple *newreply)
-{
-       write_lock_bh(&ip_conntrack_lock);
-       /* Should be unconfirmed, so not in hash table yet */
-       IP_NF_ASSERT(!is_confirmed(conntrack));
-
-       DEBUGP("Altering reply tuple of %p to ", conntrack);
-       DUMP_TUPLE(newreply);
-
-       conntrack->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply;
-       if (!conntrack->master && conntrack->expecting == 0)
-               conntrack->helper = __ip_conntrack_helper_find(newreply);
-       write_unlock_bh(&ip_conntrack_lock);
-}
-
-int ip_conntrack_helper_register(struct ip_conntrack_helper *me)
-{
-       BUG_ON(me->timeout == 0);
-       write_lock_bh(&ip_conntrack_lock);
-       list_add(&me->list, &helpers);
-       write_unlock_bh(&ip_conntrack_lock);
-
-       return 0;
-}
-
-struct ip_conntrack_helper *
-__ip_conntrack_helper_find_byname(const char *name)
-{
-       struct ip_conntrack_helper *h;
-
-       list_for_each_entry(h, &helpers, list) {
-               if (!strcmp(h->name, name))
-                       return h;
-       }
-
-       return NULL;
-}
-
-static inline void unhelp(struct ip_conntrack_tuple_hash *i,
-                         const struct ip_conntrack_helper *me)
-{
-       if (tuplehash_to_ctrack(i)->helper == me) {
-               ip_conntrack_event(IPCT_HELPER, tuplehash_to_ctrack(i));
-               tuplehash_to_ctrack(i)->helper = NULL;
-       }
-}
-
-void ip_conntrack_helper_unregister(struct ip_conntrack_helper *me)
-{
-       unsigned int i;
-       struct ip_conntrack_tuple_hash *h;
-       struct ip_conntrack_expect *exp, *tmp;
-
-       /* Need write lock here, to delete helper. */
-       write_lock_bh(&ip_conntrack_lock);
-       list_del(&me->list);
-
-       /* Get rid of expectations */
-       list_for_each_entry_safe(exp, tmp, &ip_conntrack_expect_list, list) {
-               if (exp->master->helper == me && del_timer(&exp->timeout)) {
-                       ip_ct_unlink_expect(exp);
-                       ip_conntrack_expect_put(exp);
-               }
-       }
-       /* Get rid of expecteds, set helpers to NULL. */
-       list_for_each_entry(h, &unconfirmed, list)
-               unhelp(h, me);
-       for (i = 0; i < ip_conntrack_htable_size; i++) {
-               list_for_each_entry(h, &ip_conntrack_hash[i], list)
-                       unhelp(h, me);
-       }
-       write_unlock_bh(&ip_conntrack_lock);
-
-       /* Someone could be still looking at the helper in a bh. */
-       synchronize_net();
-}
-
-/* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */
-void __ip_ct_refresh_acct(struct ip_conntrack *ct,
-                       enum ip_conntrack_info ctinfo,
-                       const struct sk_buff *skb,
-                       unsigned long extra_jiffies,
-                       int do_acct)
-{
-       int event = 0;
-
-       IP_NF_ASSERT(ct->timeout.data == (unsigned long)ct);
-       IP_NF_ASSERT(skb);
-
-       write_lock_bh(&ip_conntrack_lock);
-
-       /* Only update if this is not a fixed timeout */
-       if (test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status)) {
-               write_unlock_bh(&ip_conntrack_lock);
-               return;
-       }
-
-       /* If not in hash table, timer will not be active yet */
-       if (!is_confirmed(ct)) {
-               ct->timeout.expires = extra_jiffies;
-               event = IPCT_REFRESH;
-       } else {
-               /* Need del_timer for race avoidance (may already be dying). */
-               if (del_timer(&ct->timeout)) {
-                       ct->timeout.expires = jiffies + extra_jiffies;
-                       add_timer(&ct->timeout);
-                       event = IPCT_REFRESH;
-               }
-       }
-
-#ifdef CONFIG_IP_NF_CT_ACCT
-       if (do_acct) {
-               ct->counters[CTINFO2DIR(ctinfo)].packets++;
-               ct->counters[CTINFO2DIR(ctinfo)].bytes +=
-                                               ntohs(skb->nh.iph->tot_len);
-               if ((ct->counters[CTINFO2DIR(ctinfo)].packets & 0x80000000)
-                   || (ct->counters[CTINFO2DIR(ctinfo)].bytes & 0x80000000))
-                       event |= IPCT_COUNTER_FILLING;
-       }
-#endif
-
-       write_unlock_bh(&ip_conntrack_lock);
-
-       /* must be unlocked when calling event cache */
-       if (event)
-               ip_conntrack_event_cache(event, skb);
-}
-
-#if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \
-    defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE)
-/* Generic function for tcp/udp/sctp/dccp and alike. This needs to be
- * in ip_conntrack_core, since we don't want the protocols to autoload
- * or depend on ctnetlink */
-int ip_ct_port_tuple_to_nfattr(struct sk_buff *skb,
-                              const struct ip_conntrack_tuple *tuple)
-{
-       NFA_PUT(skb, CTA_PROTO_SRC_PORT, sizeof(__be16),
-               &tuple->src.u.tcp.port);
-       NFA_PUT(skb, CTA_PROTO_DST_PORT, sizeof(__be16),
-               &tuple->dst.u.tcp.port);
-       return 0;
-
-nfattr_failure:
-       return -1;
-}
-
-int ip_ct_port_nfattr_to_tuple(struct nfattr *tb[],
-                              struct ip_conntrack_tuple *t)
-{
-       if (!tb[CTA_PROTO_SRC_PORT-1] || !tb[CTA_PROTO_DST_PORT-1])
-               return -EINVAL;
-
-       t->src.u.tcp.port =
-               *(__be16 *)NFA_DATA(tb[CTA_PROTO_SRC_PORT-1]);
-       t->dst.u.tcp.port =
-               *(__be16 *)NFA_DATA(tb[CTA_PROTO_DST_PORT-1]);
-
-       return 0;
-}
-#endif
-
-/* Returns new sk_buff, or NULL */
-struct sk_buff *
-ip_ct_gather_frags(struct sk_buff *skb, u_int32_t user)
-{
-       skb_orphan(skb);
-
-       local_bh_disable();
-       skb = ip_defrag(skb, user);
-       local_bh_enable();
-
-       if (skb)
-               ip_send_check(skb->nh.iph);
-       return skb;
-}
-
-/* Used by ipt_REJECT. */
-static void ip_conntrack_attach(struct sk_buff *nskb, struct sk_buff *skb)
-{
-       struct ip_conntrack *ct;
-       enum ip_conntrack_info ctinfo;
-
-       /* This ICMP is in reverse direction to the packet which caused it */
-       ct = ip_conntrack_get(skb, &ctinfo);
-
-       if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL)
-               ctinfo = IP_CT_RELATED + IP_CT_IS_REPLY;
-       else
-               ctinfo = IP_CT_RELATED;
-
-       /* Attach to new skbuff, and increment count */
-       nskb->nfct = &ct->ct_general;
-       nskb->nfctinfo = ctinfo;
-       nf_conntrack_get(nskb->nfct);
-}
-
-/* Bring out ya dead! */
-static struct ip_conntrack *
-get_next_corpse(int (*iter)(struct ip_conntrack *i, void *data),
-               void *data, unsigned int *bucket)
-{
-       struct ip_conntrack_tuple_hash *h;
-       struct ip_conntrack *ct;
-
-       write_lock_bh(&ip_conntrack_lock);
-       for (; *bucket < ip_conntrack_htable_size; (*bucket)++) {
-               list_for_each_entry(h, &ip_conntrack_hash[*bucket], list) {
-                       ct = tuplehash_to_ctrack(h);
-                       if (iter(ct, data))
-                               goto found;
-               }
-       }
-       list_for_each_entry(h, &unconfirmed, list) {
-               ct = tuplehash_to_ctrack(h);
-               if (iter(ct, data))
-                       set_bit(IPS_DYING_BIT, &ct->status);
-       }
-       write_unlock_bh(&ip_conntrack_lock);
-       return NULL;
-
-found:
-       atomic_inc(&ct->ct_general.use);
-       write_unlock_bh(&ip_conntrack_lock);
-       return ct;
-}
-
-void
-ip_ct_iterate_cleanup(int (*iter)(struct ip_conntrack *i, void *), void *data)
-{
-       struct ip_conntrack *ct;
-       unsigned int bucket = 0;
-
-       while ((ct = get_next_corpse(iter, data, &bucket)) != NULL) {
-               /* Time to push up daises... */
-               if (del_timer(&ct->timeout))
-                       death_by_timeout((unsigned long)ct);
-               /* ... else the timer will get him soon. */
-
-               ip_conntrack_put(ct);
-       }
-}
-
-/* Fast function for those who don't want to parse /proc (and I don't
-   blame them). */
-/* Reversing the socket's dst/src point of view gives us the reply
-   mapping. */
-static int
-getorigdst(struct sock *sk, int optval, void __user *user, int *len)
-{
-       struct inet_sock *inet = inet_sk(sk);
-       struct ip_conntrack_tuple_hash *h;
-       struct ip_conntrack_tuple tuple;
-
-       IP_CT_TUPLE_U_BLANK(&tuple);
-       tuple.src.ip = inet->rcv_saddr;
-       tuple.src.u.tcp.port = inet->sport;
-       tuple.dst.ip = inet->daddr;
-       tuple.dst.u.tcp.port = inet->dport;
-       tuple.dst.protonum = IPPROTO_TCP;
-
-       /* We only do TCP at the moment: is there a better way? */
-       if (strcmp(sk->sk_prot->name, "TCP")) {
-               DEBUGP("SO_ORIGINAL_DST: Not a TCP socket\n");
-               return -ENOPROTOOPT;
-       }
-
-       if ((unsigned int) *len < sizeof(struct sockaddr_in)) {
-               DEBUGP("SO_ORIGINAL_DST: len %u not %u\n",
-                      *len, sizeof(struct sockaddr_in));
-               return -EINVAL;
-       }
-
-       h = ip_conntrack_find_get(&tuple, NULL);
-       if (h) {
-               struct sockaddr_in sin;
-               struct ip_conntrack *ct = tuplehash_to_ctrack(h);
-
-               sin.sin_family = AF_INET;
-               sin.sin_port = ct->tuplehash[IP_CT_DIR_ORIGINAL]
-                       .tuple.dst.u.tcp.port;
-               sin.sin_addr.s_addr = ct->tuplehash[IP_CT_DIR_ORIGINAL]
-                       .tuple.dst.ip;
-               memset(sin.sin_zero, 0, sizeof(sin.sin_zero));
-
-               DEBUGP("SO_ORIGINAL_DST: %u.%u.%u.%u %u\n",
-                      NIPQUAD(sin.sin_addr.s_addr), ntohs(sin.sin_port));
-               ip_conntrack_put(ct);
-               if (copy_to_user(user, &sin, sizeof(sin)) != 0)
-                       return -EFAULT;
-               else
-                       return 0;
-       }
-       DEBUGP("SO_ORIGINAL_DST: Can't find %u.%u.%u.%u/%u-%u.%u.%u.%u/%u.\n",
-              NIPQUAD(tuple.src.ip), ntohs(tuple.src.u.tcp.port),
-              NIPQUAD(tuple.dst.ip), ntohs(tuple.dst.u.tcp.port));
-       return -ENOENT;
-}
-
-static struct nf_sockopt_ops so_getorigdst = {
-       .pf             = PF_INET,
-       .get_optmin     = SO_ORIGINAL_DST,
-       .get_optmax     = SO_ORIGINAL_DST+1,
-       .get            = &getorigdst,
-};
-
-static int kill_all(struct ip_conntrack *i, void *data)
-{
-       return 1;
-}
-
-void ip_conntrack_flush(void)
-{
-       ip_ct_iterate_cleanup(kill_all, NULL);
-}
-
-static void free_conntrack_hash(struct list_head *hash, int vmalloced,int size)
-{
-       if (vmalloced)
-               vfree(hash);
-       else
-               free_pages((unsigned long)hash,
-                          get_order(sizeof(struct list_head) * size));
-}
-
-/* Mishearing the voices in his head, our hero wonders how he's
-   supposed to kill the mall. */
-void ip_conntrack_cleanup(void)
-{
-       rcu_assign_pointer(ip_ct_attach, NULL);
-
-       /* This makes sure all current packets have passed through
-          netfilter framework.  Roll on, two-stage module
-          delete... */
-       synchronize_net();
-
-       ip_ct_event_cache_flush();
- i_see_dead_people:
-       ip_conntrack_flush();
-       if (atomic_read(&ip_conntrack_count) != 0) {
-               schedule();
-               goto i_see_dead_people;
-       }
-       /* wait until all references to ip_conntrack_untracked are dropped */
-       while (atomic_read(&ip_conntrack_untracked.ct_general.use) > 1)
-               schedule();
-
-       kmem_cache_destroy(ip_conntrack_cachep);
-       kmem_cache_destroy(ip_conntrack_expect_cachep);
-       free_conntrack_hash(ip_conntrack_hash, ip_conntrack_vmalloc,
-                           ip_conntrack_htable_size);
-       nf_unregister_sockopt(&so_getorigdst);
-}
-
-static struct list_head *alloc_hashtable(int size, int *vmalloced)
-{
-       struct list_head *hash;
-       unsigned int i;
-
-       *vmalloced = 0;
-       hash = (void*)__get_free_pages(GFP_KERNEL,
-                                      get_order(sizeof(struct list_head)
-                                                * size));
-       if (!hash) {
-               *vmalloced = 1;
-               printk(KERN_WARNING"ip_conntrack: falling back to vmalloc.\n");
-               hash = vmalloc(sizeof(struct list_head) * size);
-       }
-
-       if (hash)
-               for (i = 0; i < size; i++)
-                       INIT_LIST_HEAD(&hash[i]);
-
-       return hash;
-}
-
-static int set_hashsize(const char *val, struct kernel_param *kp)
-{
-       int i, bucket, hashsize, vmalloced;
-       int old_vmalloced, old_size;
-       int rnd;
-       struct list_head *hash, *old_hash;
-       struct ip_conntrack_tuple_hash *h;
-
-       /* On boot, we can set this without any fancy locking. */
-       if (!ip_conntrack_htable_size)
-               return param_set_int(val, kp);
-
-       hashsize = simple_strtol(val, NULL, 0);
-       if (!hashsize)
-               return -EINVAL;
-
-       hash = alloc_hashtable(hashsize, &vmalloced);
-       if (!hash)
-               return -ENOMEM;
-
-       /* We have to rehash for the new table anyway, so we also can
-        * use a new random seed */
-       get_random_bytes(&rnd, 4);
-
-       write_lock_bh(&ip_conntrack_lock);
-       for (i = 0; i < ip_conntrack_htable_size; i++) {
-               while (!list_empty(&ip_conntrack_hash[i])) {
-                       h = list_entry(ip_conntrack_hash[i].next,
-                                      struct ip_conntrack_tuple_hash, list);
-                       list_del(&h->list);
-                       bucket = __hash_conntrack(&h->tuple, hashsize, rnd);
-                       list_add_tail(&h->list, &hash[bucket]);
-               }
-       }
-       old_size = ip_conntrack_htable_size;
-       old_vmalloced = ip_conntrack_vmalloc;
-       old_hash = ip_conntrack_hash;
-
-       ip_conntrack_htable_size = hashsize;
-       ip_conntrack_vmalloc = vmalloced;
-       ip_conntrack_hash = hash;
-       ip_conntrack_hash_rnd = rnd;
-       write_unlock_bh(&ip_conntrack_lock);
-
-       free_conntrack_hash(old_hash, old_vmalloced, old_size);
-       return 0;
-}
-
-module_param_call(hashsize, set_hashsize, param_get_uint,
-                 &ip_conntrack_htable_size, 0600);
-
-int __init ip_conntrack_init(void)
-{
-       unsigned int i;
-       int ret;
-
-       /* Idea from tcp.c: use 1/16384 of memory.  On i386: 32MB
-        * machine has 256 buckets.  >= 1GB machines have 8192 buckets. */
-       if (!ip_conntrack_htable_size) {
-               ip_conntrack_htable_size
-                       = (((num_physpages << PAGE_SHIFT) / 16384)
-                          / sizeof(struct list_head));
-               if (num_physpages > (1024 * 1024 * 1024 / PAGE_SIZE))
-                       ip_conntrack_htable_size = 8192;
-               if (ip_conntrack_htable_size < 16)
-                       ip_conntrack_htable_size = 16;
-       }
-       ip_conntrack_max = 8 * ip_conntrack_htable_size;
-
-       printk("ip_conntrack version %s (%u buckets, %d max)"
-              " - %Zd bytes per conntrack\n", IP_CONNTRACK_VERSION,
-              ip_conntrack_htable_size, ip_conntrack_max,
-              sizeof(struct ip_conntrack));
-
-       ret = nf_register_sockopt(&so_getorigdst);
-       if (ret != 0) {
-               printk(KERN_ERR "Unable to register netfilter socket option\n");
-               return ret;
-       }
-
-       ip_conntrack_hash = alloc_hashtable(ip_conntrack_htable_size,
-                                           &ip_conntrack_vmalloc);
-       if (!ip_conntrack_hash) {
-               printk(KERN_ERR "Unable to create ip_conntrack_hash\n");
-               goto err_unreg_sockopt;
-       }
-
-       ip_conntrack_cachep = kmem_cache_create("ip_conntrack",
-                                               sizeof(struct ip_conntrack), 0,
-                                               0, NULL, NULL);
-       if (!ip_conntrack_cachep) {
-               printk(KERN_ERR "Unable to create ip_conntrack slab cache\n");
-               goto err_free_hash;
-       }
-
-       ip_conntrack_expect_cachep = kmem_cache_create("ip_conntrack_expect",
-                                       sizeof(struct ip_conntrack_expect),
-                                       0, 0, NULL, NULL);
-       if (!ip_conntrack_expect_cachep) {
-               printk(KERN_ERR "Unable to create ip_expect slab cache\n");
-               goto err_free_conntrack_slab;
-       }
-
-       /* Don't NEED lock here, but good form anyway. */
-       write_lock_bh(&ip_conntrack_lock);
-       for (i = 0; i < MAX_IP_CT_PROTO; i++)
-               rcu_assign_pointer(ip_ct_protos[i], &ip_conntrack_generic_protocol);
-       /* Sew in builtin protocols. */
-       rcu_assign_pointer(ip_ct_protos[IPPROTO_TCP], &ip_conntrack_protocol_tcp);
-       rcu_assign_pointer(ip_ct_protos[IPPROTO_UDP], &ip_conntrack_protocol_udp);
-       rcu_assign_pointer(ip_ct_protos[IPPROTO_ICMP], &ip_conntrack_protocol_icmp);
-       write_unlock_bh(&ip_conntrack_lock);
-
-       /* For use by ipt_REJECT */
-       rcu_assign_pointer(ip_ct_attach, ip_conntrack_attach);
-
-       /* Set up fake conntrack:
-           - to never be deleted, not in any hashes */
-       atomic_set(&ip_conntrack_untracked.ct_general.use, 1);
-       /*  - and look it like as a confirmed connection */
-       set_bit(IPS_CONFIRMED_BIT, &ip_conntrack_untracked.status);
-
-       return ret;
-
-err_free_conntrack_slab:
-       kmem_cache_destroy(ip_conntrack_cachep);
-err_free_hash:
-       free_conntrack_hash(ip_conntrack_hash, ip_conntrack_vmalloc,
-                           ip_conntrack_htable_size);
-err_unreg_sockopt:
-       nf_unregister_sockopt(&so_getorigdst);
-
-       return -ENOMEM;
-}
diff --git a/net/ipv4/netfilter/ip_conntrack_ftp.c b/net/ipv4/netfilter/ip_conntrack_ftp.c
deleted file mode 100644 (file)
index 1faa68a..0000000
+++ /dev/null
@@ -1,520 +0,0 @@
-/* FTP extension for IP connection tracking. */
-
-/* (C) 1999-2001 Paul `Rusty' Russell
- * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/netfilter.h>
-#include <linux/ip.h>
-#include <linux/ctype.h>
-#include <net/checksum.h>
-#include <net/tcp.h>
-
-#include <linux/netfilter_ipv4/ip_conntrack_helper.h>
-#include <linux/netfilter_ipv4/ip_conntrack_ftp.h>
-#include <linux/moduleparam.h>
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Rusty Russell <rusty@rustcorp.com.au>");
-MODULE_DESCRIPTION("ftp connection tracking helper");
-
-/* This is slow, but it's simple. --RR */
-static char *ftp_buffer;
-static DEFINE_SPINLOCK(ip_ftp_lock);
-
-#define MAX_PORTS 8
-static unsigned short ports[MAX_PORTS];
-static int ports_c;
-module_param_array(ports, ushort, &ports_c, 0400);
-
-static int loose;
-module_param(loose, bool, 0600);
-
-unsigned int (*ip_nat_ftp_hook)(struct sk_buff **pskb,
-                               enum ip_conntrack_info ctinfo,
-                               enum ip_ct_ftp_type type,
-                               unsigned int matchoff,
-                               unsigned int matchlen,
-                               struct ip_conntrack_expect *exp,
-                               u32 *seq);
-EXPORT_SYMBOL_GPL(ip_nat_ftp_hook);
-
-#if 0
-#define DEBUGP printk
-#else
-#define DEBUGP(format, args...)
-#endif
-
-static int try_rfc959(const char *, size_t, u_int32_t [], char);
-static int try_eprt(const char *, size_t, u_int32_t [], char);
-static int try_epsv_response(const char *, size_t, u_int32_t [], char);
-
-static const struct ftp_search {
-       const char *pattern;
-       size_t plen;
-       char skip;
-       char term;
-       enum ip_ct_ftp_type ftptype;
-       int (*getnum)(const char *, size_t, u_int32_t[], char);
-} search[IP_CT_DIR_MAX][2] = {
-       [IP_CT_DIR_ORIGINAL] = {
-               {
-                       .pattern        =  "PORT",
-                       .plen           = sizeof("PORT") - 1,
-                       .skip           = ' ',
-                       .term           = '\r',
-                       .ftptype        = IP_CT_FTP_PORT,
-                       .getnum         = try_rfc959,
-               },
-               {
-                       .pattern        = "EPRT",
-                       .plen           = sizeof("EPRT") - 1,
-                       .skip           = ' ',
-                       .term           = '\r',
-                       .ftptype        = IP_CT_FTP_EPRT,
-                       .getnum         = try_eprt,
-               },
-       },
-       [IP_CT_DIR_REPLY] = {
-               {
-                       .pattern        = "227 ",
-                       .plen           = sizeof("227 ") - 1,
-                       .skip           = '(',
-                       .term           = ')',
-                       .ftptype        = IP_CT_FTP_PASV,
-                       .getnum         = try_rfc959,
-               },
-               {
-                       .pattern        = "229 ",
-                       .plen           = sizeof("229 ") - 1,
-                       .skip           = '(',
-                       .term           = ')',
-                       .ftptype        = IP_CT_FTP_EPSV,
-                       .getnum         = try_epsv_response,
-               },
-       },
-};
-
-static int try_number(const char *data, size_t dlen, u_int32_t array[],
-                     int array_size, char sep, char term)
-{
-       u_int32_t i, len;
-
-       memset(array, 0, sizeof(array[0])*array_size);
-
-       /* Keep data pointing at next char. */
-       for (i = 0, len = 0; len < dlen && i < array_size; len++, data++) {
-               if (*data >= '0' && *data <= '9') {
-                       array[i] = array[i]*10 + *data - '0';
-               }
-               else if (*data == sep)
-                       i++;
-               else {
-                       /* Unexpected character; true if it's the
-                          terminator and we're finished. */
-                       if (*data == term && i == array_size - 1)
-                               return len;
-
-                       DEBUGP("Char %u (got %u nums) `%u' unexpected\n",
-                              len, i, *data);
-                       return 0;
-               }
-       }
-       DEBUGP("Failed to fill %u numbers separated by %c\n", array_size, sep);
-
-       return 0;
-}
-
-/* Returns 0, or length of numbers: 192,168,1,1,5,6 */
-static int try_rfc959(const char *data, size_t dlen, u_int32_t array[6],
-                      char term)
-{
-       return try_number(data, dlen, array, 6, ',', term);
-}
-
-/* Grab port: number up to delimiter */
-static int get_port(const char *data, int start, size_t dlen, char delim,
-                   u_int32_t array[2])
-{
-       u_int16_t port = 0;
-       int i;
-
-       for (i = start; i < dlen; i++) {
-               /* Finished? */
-               if (data[i] == delim) {
-                       if (port == 0)
-                               break;
-                       array[0] = port >> 8;
-                       array[1] = port;
-                       return i + 1;
-               }
-               else if (data[i] >= '0' && data[i] <= '9')
-                       port = port*10 + data[i] - '0';
-               else /* Some other crap */
-                       break;
-       }
-       return 0;
-}
-
-/* Returns 0, or length of numbers: |1|132.235.1.2|6275| */
-static int try_eprt(const char *data, size_t dlen, u_int32_t array[6],
-                   char term)
-{
-       char delim;
-       int length;
-
-       /* First character is delimiter, then "1" for IPv4, then
-          delimiter again. */
-       if (dlen <= 3) return 0;
-       delim = data[0];
-       if (isdigit(delim) || delim < 33 || delim > 126
-           || data[1] != '1' || data[2] != delim)
-               return 0;
-
-       DEBUGP("EPRT: Got |1|!\n");
-       /* Now we have IP address. */
-       length = try_number(data + 3, dlen - 3, array, 4, '.', delim);
-       if (length == 0)
-               return 0;
-
-       DEBUGP("EPRT: Got IP address!\n");
-       /* Start offset includes initial "|1|", and trailing delimiter */
-       return get_port(data, 3 + length + 1, dlen, delim, array+4);
-}
-
-/* Returns 0, or length of numbers: |||6446| */
-static int try_epsv_response(const char *data, size_t dlen, u_int32_t array[6],
-                            char term)
-{
-       char delim;
-
-       /* Three delimiters. */
-       if (dlen <= 3) return 0;
-       delim = data[0];
-       if (isdigit(delim) || delim < 33 || delim > 126
-           || data[1] != delim || data[2] != delim)
-               return 0;
-
-       return get_port(data, 3, dlen, delim, array+4);
-}
-
-/* Return 1 for match, 0 for accept, -1 for partial. */
-static int find_pattern(const char *data, size_t dlen,
-                       const char *pattern, size_t plen,
-                       char skip, char term,
-                       unsigned int *numoff,
-                       unsigned int *numlen,
-                       u_int32_t array[6],
-                       int (*getnum)(const char *, size_t, u_int32_t[], char))
-{
-       size_t i;
-
-       DEBUGP("find_pattern `%s': dlen = %u\n", pattern, dlen);
-       if (dlen == 0)
-               return 0;
-
-       if (dlen <= plen) {
-               /* Short packet: try for partial? */
-               if (strnicmp(data, pattern, dlen) == 0)
-                       return -1;
-               else return 0;
-       }
-
-       if (strnicmp(data, pattern, plen) != 0) {
-#if 0
-               size_t i;
-
-               DEBUGP("ftp: string mismatch\n");
-               for (i = 0; i < plen; i++) {
-                       DEBUGP("ftp:char %u `%c'(%u) vs `%c'(%u)\n",
-                               i, data[i], data[i],
-                               pattern[i], pattern[i]);
-               }
-#endif
-               return 0;
-       }
-
-       DEBUGP("Pattern matches!\n");
-       /* Now we've found the constant string, try to skip
-          to the 'skip' character */
-       for (i = plen; data[i] != skip; i++)
-               if (i == dlen - 1) return -1;
-
-       /* Skip over the last character */
-       i++;
-
-       DEBUGP("Skipped up to `%c'!\n", skip);
-
-       *numoff = i;
-       *numlen = getnum(data + i, dlen - i, array, term);
-       if (!*numlen)
-               return -1;
-
-       DEBUGP("Match succeeded!\n");
-       return 1;
-}
-
-/* Look up to see if we're just after a \n. */
-static int find_nl_seq(u32 seq, const struct ip_ct_ftp_master *info, int dir)
-{
-       unsigned int i;
-
-       for (i = 0; i < info->seq_aft_nl_num[dir]; i++)
-               if (info->seq_aft_nl[dir][i] == seq)
-                       return 1;
-       return 0;
-}
-
-/* We don't update if it's older than what we have. */
-static void update_nl_seq(u32 nl_seq, struct ip_ct_ftp_master *info, int dir,
-                         struct sk_buff *skb)
-{
-       unsigned int i, oldest = NUM_SEQ_TO_REMEMBER;
-
-       /* Look for oldest: if we find exact match, we're done. */
-       for (i = 0; i < info->seq_aft_nl_num[dir]; i++) {
-               if (info->seq_aft_nl[dir][i] == nl_seq)
-                       return;
-
-               if (oldest == info->seq_aft_nl_num[dir]
-                   || before(info->seq_aft_nl[dir][i], oldest))
-                       oldest = i;
-       }
-
-       if (info->seq_aft_nl_num[dir] < NUM_SEQ_TO_REMEMBER) {
-               info->seq_aft_nl[dir][info->seq_aft_nl_num[dir]++] = nl_seq;
-               ip_conntrack_event_cache(IPCT_HELPINFO_VOLATILE, skb);
-       } else if (oldest != NUM_SEQ_TO_REMEMBER) {
-               info->seq_aft_nl[dir][oldest] = nl_seq;
-               ip_conntrack_event_cache(IPCT_HELPINFO_VOLATILE, skb);
-       }
-}
-
-static int help(struct sk_buff **pskb,
-               struct ip_conntrack *ct,
-               enum ip_conntrack_info ctinfo)
-{
-       unsigned int dataoff, datalen;
-       struct tcphdr _tcph, *th;
-       char *fb_ptr;
-       int ret;
-       u32 seq, array[6] = { 0 };
-       int dir = CTINFO2DIR(ctinfo);
-       unsigned int matchlen, matchoff;
-       struct ip_ct_ftp_master *ct_ftp_info = &ct->help.ct_ftp_info;
-       struct ip_conntrack_expect *exp;
-       unsigned int i;
-       int found = 0, ends_in_nl;
-       typeof(ip_nat_ftp_hook) ip_nat_ftp;
-
-       /* Until there's been traffic both ways, don't look in packets. */
-       if (ctinfo != IP_CT_ESTABLISHED
-           && ctinfo != IP_CT_ESTABLISHED+IP_CT_IS_REPLY) {
-               DEBUGP("ftp: Conntrackinfo = %u\n", ctinfo);
-               return NF_ACCEPT;
-       }
-
-       th = skb_header_pointer(*pskb, (*pskb)->nh.iph->ihl*4,
-                               sizeof(_tcph), &_tcph);
-       if (th == NULL)
-               return NF_ACCEPT;
-
-       dataoff = (*pskb)->nh.iph->ihl*4 + th->doff*4;
-       /* No data? */
-       if (dataoff >= (*pskb)->len) {
-               DEBUGP("ftp: pskblen = %u\n", (*pskb)->len);
-               return NF_ACCEPT;
-       }
-       datalen = (*pskb)->len - dataoff;
-
-       spin_lock_bh(&ip_ftp_lock);
-       fb_ptr = skb_header_pointer(*pskb, dataoff,
-                                   (*pskb)->len - dataoff, ftp_buffer);
-       BUG_ON(fb_ptr == NULL);
-
-       ends_in_nl = (fb_ptr[datalen - 1] == '\n');
-       seq = ntohl(th->seq) + datalen;
-
-       /* Look up to see if we're just after a \n. */
-       if (!find_nl_seq(ntohl(th->seq), ct_ftp_info, dir)) {
-               /* Now if this ends in \n, update ftp info. */
-               DEBUGP("ip_conntrack_ftp_help: wrong seq pos %s(%u) or %s(%u)\n",
-                      ct_ftp_info->seq_aft_nl[0][dir]
-                      old_seq_aft_nl_set ? "":"(UNSET) ", old_seq_aft_nl);
-               ret = NF_ACCEPT;
-               goto out_update_nl;
-       }
-
-       /* Initialize IP array to expected address (it's not mentioned
-          in EPSV responses) */
-       array[0] = (ntohl(ct->tuplehash[dir].tuple.src.ip) >> 24) & 0xFF;
-       array[1] = (ntohl(ct->tuplehash[dir].tuple.src.ip) >> 16) & 0xFF;
-       array[2] = (ntohl(ct->tuplehash[dir].tuple.src.ip) >> 8) & 0xFF;
-       array[3] = ntohl(ct->tuplehash[dir].tuple.src.ip) & 0xFF;
-
-       for (i = 0; i < ARRAY_SIZE(search[dir]); i++) {
-               found = find_pattern(fb_ptr, (*pskb)->len - dataoff,
-                                    search[dir][i].pattern,
-                                    search[dir][i].plen,
-                                    search[dir][i].skip,
-                                    search[dir][i].term,
-                                    &matchoff, &matchlen,
-                                    array,
-                                    search[dir][i].getnum);
-               if (found) break;
-       }
-       if (found == -1) {
-               /* We don't usually drop packets.  After all, this is
-                  connection tracking, not packet filtering.
-                  However, it is necessary for accurate tracking in
-                  this case. */
-               if (net_ratelimit())
-                       printk("conntrack_ftp: partial %s %u+%u\n",
-                              search[dir][i].pattern,
-                              ntohl(th->seq), datalen);
-               ret = NF_DROP;
-               goto out;
-       } else if (found == 0) { /* No match */
-               ret = NF_ACCEPT;
-               goto out_update_nl;
-       }
-
-       DEBUGP("conntrack_ftp: match `%s' (%u bytes at %u)\n",
-              fb_ptr + matchoff, matchlen, ntohl(th->seq) + matchoff);
-
-       /* Allocate expectation which will be inserted */
-       exp = ip_conntrack_expect_alloc(ct);
-       if (exp == NULL) {
-               ret = NF_DROP;
-               goto out;
-       }
-
-       /* We refer to the reverse direction ("!dir") tuples here,
-        * because we're expecting something in the other direction.
-        * Doesn't matter unless NAT is happening.  */
-       exp->tuple.dst.ip = ct->tuplehash[!dir].tuple.dst.ip;
-
-       if (htonl((array[0] << 24) | (array[1] << 16) | (array[2] << 8) | array[3])
-           != ct->tuplehash[dir].tuple.src.ip) {
-               /* Enrico Scholz's passive FTP to partially RNAT'd ftp
-                  server: it really wants us to connect to a
-                  different IP address.  Simply don't record it for
-                  NAT. */
-               DEBUGP("conntrack_ftp: NOT RECORDING: %u,%u,%u,%u != %u.%u.%u.%u\n",
-                      array[0], array[1], array[2], array[3],
-                      NIPQUAD(ct->tuplehash[dir].tuple.src.ip));
-
-               /* Thanks to Cristiano Lincoln Mattos
-                  <lincoln@cesar.org.br> for reporting this potential
-                  problem (DMZ machines opening holes to internal
-                  networks, or the packet filter itself). */
-               if (!loose) {
-                       ret = NF_ACCEPT;
-                       goto out_put_expect;
-               }
-               exp->tuple.dst.ip = htonl((array[0] << 24) | (array[1] << 16)
-                                        | (array[2] << 8) | array[3]);
-       }
-
-       exp->tuple.src.ip = ct->tuplehash[!dir].tuple.src.ip;
-       exp->tuple.dst.u.tcp.port = htons(array[4] << 8 | array[5]);
-       exp->tuple.src.u.tcp.port = 0; /* Don't care. */
-       exp->tuple.dst.protonum = IPPROTO_TCP;
-       exp->mask = ((struct ip_conntrack_tuple)
-               { { htonl(0xFFFFFFFF), { 0 } },
-                 { htonl(0xFFFFFFFF), { .tcp = { htons(0xFFFF) } }, 0xFF }});
-
-       exp->expectfn = NULL;
-       exp->flags = 0;
-
-       /* Now, NAT might want to mangle the packet, and register the
-        * (possibly changed) expectation itself. */
-       ip_nat_ftp = rcu_dereference(ip_nat_ftp_hook);
-       if (ip_nat_ftp)
-               ret = ip_nat_ftp(pskb, ctinfo, search[dir][i].ftptype,
-                                matchoff, matchlen, exp, &seq);
-       else {
-               /* Can't expect this?  Best to drop packet now. */
-               if (ip_conntrack_expect_related(exp) != 0)
-                       ret = NF_DROP;
-               else
-                       ret = NF_ACCEPT;
-       }
-
-out_put_expect:
-       ip_conntrack_expect_put(exp);
-
-out_update_nl:
-       /* Now if this ends in \n, update ftp info.  Seq may have been
-        * adjusted by NAT code. */
-       if (ends_in_nl)
-               update_nl_seq(seq, ct_ftp_info,dir, *pskb);
- out:
-       spin_unlock_bh(&ip_ftp_lock);
-       return ret;
-}
-
-static struct ip_conntrack_helper ftp[MAX_PORTS];
-static char ftp_names[MAX_PORTS][sizeof("ftp-65535")];
-
-/* Not __exit: called from init() */
-static void ip_conntrack_ftp_fini(void)
-{
-       int i;
-       for (i = 0; i < ports_c; i++) {
-               DEBUGP("ip_ct_ftp: unregistering helper for port %d\n",
-                               ports[i]);
-               ip_conntrack_helper_unregister(&ftp[i]);
-       }
-
-       kfree(ftp_buffer);
-}
-
-static int __init ip_conntrack_ftp_init(void)
-{
-       int i, ret;
-       char *tmpname;
-
-       ftp_buffer = kmalloc(65536, GFP_KERNEL);
-       if (!ftp_buffer)
-               return -ENOMEM;
-
-       if (ports_c == 0)
-               ports[ports_c++] = FTP_PORT;
-
-       for (i = 0; i < ports_c; i++) {
-               ftp[i].tuple.src.u.tcp.port = htons(ports[i]);
-               ftp[i].tuple.dst.protonum = IPPROTO_TCP;
-               ftp[i].mask.src.u.tcp.port = htons(0xFFFF);
-               ftp[i].mask.dst.protonum = 0xFF;
-               ftp[i].max_expected = 1;
-               ftp[i].timeout = 5 * 60; /* 5 minutes */
-               ftp[i].me = THIS_MODULE;
-               ftp[i].help = help;
-
-               tmpname = &ftp_names[i][0];
-               if (ports[i] == FTP_PORT)
-                       sprintf(tmpname, "ftp");
-               else
-                       sprintf(tmpname, "ftp-%d", ports[i]);
-               ftp[i].name = tmpname;
-
-               DEBUGP("ip_ct_ftp: registering helper for port %d\n",
-                               ports[i]);
-               ret = ip_conntrack_helper_register(&ftp[i]);
-
-               if (ret) {
-                       ip_conntrack_ftp_fini();
-                       return ret;
-               }
-       }
-       return 0;
-}
-
-module_init(ip_conntrack_ftp_init);
-module_exit(ip_conntrack_ftp_fini);
diff --git a/net/ipv4/netfilter/ip_conntrack_helper_h323.c b/net/ipv4/netfilter/ip_conntrack_helper_h323.c
deleted file mode 100644 (file)
index 53eb365..0000000
+++ /dev/null
@@ -1,1841 +0,0 @@
-/*
- * H.323 connection tracking helper
- *
- * Copyright (c) 2006 Jing Min Zhao <zhaojingmin@users.sourceforge.net>
- *
- * This source code is licensed under General Public License version 2.
- *
- * Based on the 'brute force' H.323 connection tracking module by
- * Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
- *
- * For more information, please see http://nath323.sourceforge.net/
- */
-
-#include <linux/module.h>
-#include <linux/netfilter.h>
-#include <linux/ip.h>
-#include <net/tcp.h>
-#include <linux/netfilter_ipv4/ip_conntrack.h>
-#include <linux/netfilter_ipv4/ip_conntrack_core.h>
-#include <linux/netfilter_ipv4/ip_conntrack_helper.h>
-#include <linux/netfilter_ipv4/ip_conntrack_tuple.h>
-#include <linux/netfilter_ipv4/ip_conntrack_h323.h>
-#include <linux/moduleparam.h>
-#include <linux/ctype.h>
-#include <linux/inet.h>
-
-#if 0
-#define DEBUGP printk
-#else
-#define DEBUGP(format, args...)
-#endif
-
-/* Parameters */
-static unsigned int default_rrq_ttl = 300;
-module_param(default_rrq_ttl, uint, 0600);
-MODULE_PARM_DESC(default_rrq_ttl, "use this TTL if it's missing in RRQ");
-
-static int gkrouted_only = 1;
-module_param(gkrouted_only, int, 0600);
-MODULE_PARM_DESC(gkrouted_only, "only accept calls from gatekeeper");
-
-static int callforward_filter = 1;
-module_param(callforward_filter, bool, 0600);
-MODULE_PARM_DESC(callforward_filter, "only create call forwarding expectations "
-                                    "if both endpoints are on different sides "
-                                    "(determined by routing information)");
-
-/* Hooks for NAT */
-int (*set_h245_addr_hook) (struct sk_buff ** pskb,
-                          unsigned char **data, int dataoff,
-                          H245_TransportAddress * addr,
-                          __be32 ip, u_int16_t port);
-int (*set_h225_addr_hook) (struct sk_buff ** pskb,
-                          unsigned char **data, int dataoff,
-                          TransportAddress * addr,
-                          __be32 ip, u_int16_t port);
-int (*set_sig_addr_hook) (struct sk_buff ** pskb,
-                         struct ip_conntrack * ct,
-                         enum ip_conntrack_info ctinfo,
-                         unsigned char **data,
-                         TransportAddress * addr, int count);
-int (*set_ras_addr_hook) (struct sk_buff ** pskb,
-                         struct ip_conntrack * ct,
-                         enum ip_conntrack_info ctinfo,
-                         unsigned char **data,
-                         TransportAddress * addr, int count);
-int (*nat_rtp_rtcp_hook) (struct sk_buff ** pskb,
-                         struct ip_conntrack * ct,
-                         enum ip_conntrack_info ctinfo,
-                         unsigned char **data, int dataoff,
-                         H245_TransportAddress * addr,
-                         u_int16_t port, u_int16_t rtp_port,
-                         struct ip_conntrack_expect * rtp_exp,
-                         struct ip_conntrack_expect * rtcp_exp);
-int (*nat_t120_hook) (struct sk_buff ** pskb,
-                     struct ip_conntrack * ct,
-                     enum ip_conntrack_info ctinfo,
-                     unsigned char **data, int dataoff,
-                     H245_TransportAddress * addr, u_int16_t port,
-                     struct ip_conntrack_expect * exp);
-int (*nat_h245_hook) (struct sk_buff ** pskb,
-                     struct ip_conntrack * ct,
-                     enum ip_conntrack_info ctinfo,
-                     unsigned char **data, int dataoff,
-                     TransportAddress * addr, u_int16_t port,
-                     struct ip_conntrack_expect * exp);
-int (*nat_callforwarding_hook) (struct sk_buff ** pskb,
-                               struct ip_conntrack * ct,
-                               enum ip_conntrack_info ctinfo,
-                               unsigned char **data, int dataoff,
-                               TransportAddress * addr, u_int16_t port,
-                               struct ip_conntrack_expect * exp);
-int (*nat_q931_hook) (struct sk_buff ** pskb,
-                     struct ip_conntrack * ct,
-                     enum ip_conntrack_info ctinfo,
-                     unsigned char **data, TransportAddress * addr, int idx,
-                     u_int16_t port, struct ip_conntrack_expect * exp);
-
-
-static DEFINE_SPINLOCK(ip_h323_lock);
-static char *h323_buffer;
-
-/****************************************************************************/
-static int get_tpkt_data(struct sk_buff **pskb, struct ip_conntrack *ct,
-                        enum ip_conntrack_info ctinfo,
-                        unsigned char **data, int *datalen, int *dataoff)
-{
-       struct ip_ct_h323_master *info = &ct->help.ct_h323_info;
-       int dir = CTINFO2DIR(ctinfo);
-       struct tcphdr _tcph, *th;
-       int tcpdatalen;
-       int tcpdataoff;
-       unsigned char *tpkt;
-       int tpktlen;
-       int tpktoff;
-
-       /* Get TCP header */
-       th = skb_header_pointer(*pskb, (*pskb)->nh.iph->ihl * 4,
-                               sizeof(_tcph), &_tcph);
-       if (th == NULL)
-               return 0;
-
-       /* Get TCP data offset */
-       tcpdataoff = (*pskb)->nh.iph->ihl * 4 + th->doff * 4;
-
-       /* Get TCP data length */
-       tcpdatalen = (*pskb)->len - tcpdataoff;
-       if (tcpdatalen <= 0)    /* No TCP data */
-               goto clear_out;
-
-       if (*data == NULL) {    /* first TPKT */
-               /* Get first TPKT pointer */
-               tpkt = skb_header_pointer(*pskb, tcpdataoff, tcpdatalen,
-                                         h323_buffer);
-               BUG_ON(tpkt == NULL);
-
-               /* Validate TPKT identifier */
-               if (tcpdatalen < 4 || tpkt[0] != 0x03 || tpkt[1] != 0) {
-                       /* Netmeeting sends TPKT header and data separately */
-                       if (info->tpkt_len[dir] > 0) {
-                               DEBUGP("ip_ct_h323: previous packet "
-                                      "indicated separate TPKT data of %hu "
-                                      "bytes\n", info->tpkt_len[dir]);
-                               if (info->tpkt_len[dir] <= tcpdatalen) {
-                                       /* Yes, there was a TPKT header
-                                        * received */
-                                       *data = tpkt;
-                                       *datalen = info->tpkt_len[dir];
-                                       *dataoff = 0;
-                                       goto out;
-                               }
-
-                               /* Fragmented TPKT */
-                               if (net_ratelimit())
-                                       printk("ip_ct_h323: "
-                                              "fragmented TPKT\n");
-                               goto clear_out;
-                       }
-
-                       /* It is not even a TPKT */
-                       return 0;
-               }
-               tpktoff = 0;
-       } else {                /* Next TPKT */
-               tpktoff = *dataoff + *datalen;
-               tcpdatalen -= tpktoff;
-               if (tcpdatalen <= 4)    /* No more TPKT */
-                       goto clear_out;
-               tpkt = *data + *datalen;
-
-               /* Validate TPKT identifier */
-               if (tpkt[0] != 0x03 || tpkt[1] != 0)
-                       goto clear_out;
-       }
-
-       /* Validate TPKT length */
-       tpktlen = tpkt[2] * 256 + tpkt[3];
-       if (tpktlen < 4)
-               goto clear_out;
-       if (tpktlen > tcpdatalen) {
-               if (tcpdatalen == 4) {  /* Separate TPKT header */
-                       /* Netmeeting sends TPKT header and data separately */
-                       DEBUGP("ip_ct_h323: separate TPKT header indicates "
-                              "there will be TPKT data of %hu bytes\n",
-                              tpktlen - 4);
-                       info->tpkt_len[dir] = tpktlen - 4;
-                       return 0;
-               }
-
-               if (net_ratelimit())
-                       printk("ip_ct_h323: incomplete TPKT (fragmented?)\n");
-               goto clear_out;
-       }
-
-       /* This is the encapsulated data */
-       *data = tpkt + 4;
-       *datalen = tpktlen - 4;
-       *dataoff = tpktoff + 4;
-
-      out:
-       /* Clear TPKT length */
-       info->tpkt_len[dir] = 0;
-       return 1;
-
-      clear_out:
-       info->tpkt_len[dir] = 0;
-       return 0;
-}
-
-/****************************************************************************/
-static int get_h245_addr(unsigned char *data, H245_TransportAddress * addr,
-                        __be32 * ip, u_int16_t * port)
-{
-       unsigned char *p;
-
-       if (addr->choice != eH245_TransportAddress_unicastAddress ||
-           addr->unicastAddress.choice != eUnicastAddress_iPAddress)
-               return 0;
-
-       p = data + addr->unicastAddress.iPAddress.network;
-       *ip = htonl((p[0] << 24) | (p[1] << 16) | (p[2] << 8) | (p[3]));
-       *port = (p[4] << 8) | (p[5]);
-
-       return 1;
-}
-
-/****************************************************************************/
-static int expect_rtp_rtcp(struct sk_buff **pskb, struct ip_conntrack *ct,
-                          enum ip_conntrack_info ctinfo,
-                          unsigned char **data, int dataoff,
-                          H245_TransportAddress * addr)
-{
-       int dir = CTINFO2DIR(ctinfo);
-       int ret = 0;
-       __be32 ip;
-       u_int16_t port;
-       u_int16_t rtp_port;
-       struct ip_conntrack_expect *rtp_exp;
-       struct ip_conntrack_expect *rtcp_exp;
-       typeof(nat_rtp_rtcp_hook) nat_rtp_rtcp;
-
-       /* Read RTP or RTCP address */
-       if (!get_h245_addr(*data, addr, &ip, &port) ||
-           ip != ct->tuplehash[dir].tuple.src.ip || port == 0)
-               return 0;
-
-       /* RTP port is even */
-       rtp_port = port & (~1);
-
-       /* Create expect for RTP */
-       if ((rtp_exp = ip_conntrack_expect_alloc(ct)) == NULL)
-               return -1;
-       rtp_exp->tuple.src.ip = ct->tuplehash[!dir].tuple.src.ip;
-       rtp_exp->tuple.src.u.udp.port = 0;
-       rtp_exp->tuple.dst.ip = ct->tuplehash[!dir].tuple.dst.ip;
-       rtp_exp->tuple.dst.u.udp.port = htons(rtp_port);
-       rtp_exp->tuple.dst.protonum = IPPROTO_UDP;
-       rtp_exp->mask.src.ip = htonl(0xFFFFFFFF);
-       rtp_exp->mask.src.u.udp.port = 0;
-       rtp_exp->mask.dst.ip = htonl(0xFFFFFFFF);
-       rtp_exp->mask.dst.u.udp.port = htons(0xFFFF);
-       rtp_exp->mask.dst.protonum = 0xFF;
-       rtp_exp->flags = 0;
-
-       /* Create expect for RTCP */
-       if ((rtcp_exp = ip_conntrack_expect_alloc(ct)) == NULL) {
-               ip_conntrack_expect_put(rtp_exp);
-               return -1;
-       }
-       rtcp_exp->tuple.src.ip = ct->tuplehash[!dir].tuple.src.ip;
-       rtcp_exp->tuple.src.u.udp.port = 0;
-       rtcp_exp->tuple.dst.ip = ct->tuplehash[!dir].tuple.dst.ip;
-       rtcp_exp->tuple.dst.u.udp.port = htons(rtp_port + 1);
-       rtcp_exp->tuple.dst.protonum = IPPROTO_UDP;
-       rtcp_exp->mask.src.ip = htonl(0xFFFFFFFF);
-       rtcp_exp->mask.src.u.udp.port = 0;
-       rtcp_exp->mask.dst.ip = htonl(0xFFFFFFFF);
-       rtcp_exp->mask.dst.u.udp.port = htons(0xFFFF);
-       rtcp_exp->mask.dst.protonum = 0xFF;
-       rtcp_exp->flags = 0;
-
-       if (ct->tuplehash[dir].tuple.src.ip !=
-           ct->tuplehash[!dir].tuple.dst.ip &&
-           (nat_rtp_rtcp = rcu_dereference(nat_rtp_rtcp_hook))) {
-               /* NAT needed */
-               ret = nat_rtp_rtcp(pskb, ct, ctinfo, data, dataoff,
-                                  addr, port, rtp_port, rtp_exp, rtcp_exp);
-       } else {                /* Conntrack only */
-               rtp_exp->expectfn = NULL;
-               rtcp_exp->expectfn = NULL;
-
-               if (ip_conntrack_expect_related(rtp_exp) == 0) {
-                       if (ip_conntrack_expect_related(rtcp_exp) == 0) {
-                               DEBUGP("ip_ct_h323: expect RTP "
-                                      "%u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n",
-                                      NIPQUAD(rtp_exp->tuple.src.ip),
-                                      ntohs(rtp_exp->tuple.src.u.udp.port),
-                                      NIPQUAD(rtp_exp->tuple.dst.ip),
-                                      ntohs(rtp_exp->tuple.dst.u.udp.port));
-                               DEBUGP("ip_ct_h323: expect RTCP "
-                                      "%u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n",
-                                      NIPQUAD(rtcp_exp->tuple.src.ip),
-                                      ntohs(rtcp_exp->tuple.src.u.udp.port),
-                                      NIPQUAD(rtcp_exp->tuple.dst.ip),
-                                      ntohs(rtcp_exp->tuple.dst.u.udp.port));
-                       } else {
-                               ip_conntrack_unexpect_related(rtp_exp);
-                               ret = -1;
-                       }
-               } else
-                       ret = -1;
-       }
-
-       ip_conntrack_expect_put(rtp_exp);
-       ip_conntrack_expect_put(rtcp_exp);
-
-       return ret;
-}
-
-/****************************************************************************/
-static int expect_t120(struct sk_buff **pskb,
-                      struct ip_conntrack *ct,
-                      enum ip_conntrack_info ctinfo,
-                      unsigned char **data, int dataoff,
-                      H245_TransportAddress * addr)
-{
-       int dir = CTINFO2DIR(ctinfo);
-       int ret = 0;
-       __be32 ip;
-       u_int16_t port;
-       struct ip_conntrack_expect *exp = NULL;
-       typeof(nat_t120_hook) nat_t120;
-
-       /* Read T.120 address */
-       if (!get_h245_addr(*data, addr, &ip, &port) ||
-           ip != ct->tuplehash[dir].tuple.src.ip || port == 0)
-               return 0;
-
-       /* Create expect for T.120 connections */
-       if ((exp = ip_conntrack_expect_alloc(ct)) == NULL)
-               return -1;
-       exp->tuple.src.ip = ct->tuplehash[!dir].tuple.src.ip;
-       exp->tuple.src.u.tcp.port = 0;
-       exp->tuple.dst.ip = ct->tuplehash[!dir].tuple.dst.ip;
-       exp->tuple.dst.u.tcp.port = htons(port);
-       exp->tuple.dst.protonum = IPPROTO_TCP;
-       exp->mask.src.ip = htonl(0xFFFFFFFF);
-       exp->mask.src.u.tcp.port = 0;
-       exp->mask.dst.ip = htonl(0xFFFFFFFF);
-       exp->mask.dst.u.tcp.port = htons(0xFFFF);
-       exp->mask.dst.protonum = 0xFF;
-       exp->flags = IP_CT_EXPECT_PERMANENT;    /* Accept multiple channels */
-
-       if (ct->tuplehash[dir].tuple.src.ip !=
-           ct->tuplehash[!dir].tuple.dst.ip &&
-           (nat_t120 = rcu_dereference(nat_t120_hook))) {
-               /* NAT needed */
-               ret = nat_t120(pskb, ct, ctinfo, data, dataoff, addr,
-                              port, exp);
-       } else {                /* Conntrack only */
-               exp->expectfn = NULL;
-               if (ip_conntrack_expect_related(exp) == 0) {
-                       DEBUGP("ip_ct_h323: expect T.120 "
-                              "%u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n",
-                              NIPQUAD(exp->tuple.src.ip),
-                              ntohs(exp->tuple.src.u.tcp.port),
-                              NIPQUAD(exp->tuple.dst.ip),
-                              ntohs(exp->tuple.dst.u.tcp.port));
-               } else
-                       ret = -1;
-       }
-
-       ip_conntrack_expect_put(exp);
-
-       return ret;
-}
-
-/****************************************************************************/
-static int process_h245_channel(struct sk_buff **pskb,
-                               struct ip_conntrack *ct,
-                               enum ip_conntrack_info ctinfo,
-                               unsigned char **data, int dataoff,
-                               H2250LogicalChannelParameters * channel)
-{
-       int ret;
-
-       if (channel->options & eH2250LogicalChannelParameters_mediaChannel) {
-               /* RTP */
-               ret = expect_rtp_rtcp(pskb, ct, ctinfo, data, dataoff,
-                                     &channel->mediaChannel);
-               if (ret < 0)
-                       return -1;
-       }
-
-       if (channel->
-           options & eH2250LogicalChannelParameters_mediaControlChannel) {
-               /* RTCP */
-               ret = expect_rtp_rtcp(pskb, ct, ctinfo, data, dataoff,
-                                     &channel->mediaControlChannel);
-               if (ret < 0)
-                       return -1;
-       }
-
-       return 0;
-}
-
-/****************************************************************************/
-static int process_olc(struct sk_buff **pskb, struct ip_conntrack *ct,
-                      enum ip_conntrack_info ctinfo,
-                      unsigned char **data, int dataoff,
-                      OpenLogicalChannel * olc)
-{
-       int ret;
-
-       DEBUGP("ip_ct_h323: OpenLogicalChannel\n");
-
-       if (olc->forwardLogicalChannelParameters.multiplexParameters.choice ==
-           eOpenLogicalChannel_forwardLogicalChannelParameters_multiplexParameters_h2250LogicalChannelParameters)
-       {
-               ret = process_h245_channel(pskb, ct, ctinfo, data, dataoff,
-                                          &olc->
-                                          forwardLogicalChannelParameters.
-                                          multiplexParameters.
-                                          h2250LogicalChannelParameters);
-               if (ret < 0)
-                       return -1;
-       }
-
-       if ((olc->options &
-            eOpenLogicalChannel_reverseLogicalChannelParameters) &&
-           (olc->reverseLogicalChannelParameters.options &
-            eOpenLogicalChannel_reverseLogicalChannelParameters_multiplexParameters)
-           && (olc->reverseLogicalChannelParameters.multiplexParameters.
-               choice ==
-               eOpenLogicalChannel_reverseLogicalChannelParameters_multiplexParameters_h2250LogicalChannelParameters))
-       {
-               ret =
-                   process_h245_channel(pskb, ct, ctinfo, data, dataoff,
-                                        &olc->
-                                        reverseLogicalChannelParameters.
-                                        multiplexParameters.
-                                        h2250LogicalChannelParameters);
-               if (ret < 0)
-                       return -1;
-       }
-
-       if ((olc->options & eOpenLogicalChannel_separateStack) &&
-           olc->forwardLogicalChannelParameters.dataType.choice ==
-           eDataType_data &&
-           olc->forwardLogicalChannelParameters.dataType.data.application.
-           choice == eDataApplicationCapability_application_t120 &&
-           olc->forwardLogicalChannelParameters.dataType.data.application.
-           t120.choice == eDataProtocolCapability_separateLANStack &&
-           olc->separateStack.networkAddress.choice ==
-           eNetworkAccessParameters_networkAddress_localAreaAddress) {
-               ret = expect_t120(pskb, ct, ctinfo, data, dataoff,
-                                 &olc->separateStack.networkAddress.
-                                 localAreaAddress);
-               if (ret < 0)
-                       return -1;
-       }
-
-       return 0;
-}
-
-/****************************************************************************/
-static int process_olca(struct sk_buff **pskb, struct ip_conntrack *ct,
-                       enum ip_conntrack_info ctinfo,
-                       unsigned char **data, int dataoff,
-                       OpenLogicalChannelAck * olca)
-{
-       H2250LogicalChannelAckParameters *ack;
-       int ret;
-
-       DEBUGP("ip_ct_h323: OpenLogicalChannelAck\n");
-
-       if ((olca->options &
-            eOpenLogicalChannelAck_reverseLogicalChannelParameters) &&
-           (olca->reverseLogicalChannelParameters.options &
-            eOpenLogicalChannelAck_reverseLogicalChannelParameters_multiplexParameters)
-           && (olca->reverseLogicalChannelParameters.multiplexParameters.
-               choice ==
-               eOpenLogicalChannelAck_reverseLogicalChannelParameters_multiplexParameters_h2250LogicalChannelParameters))
-       {
-               ret = process_h245_channel(pskb, ct, ctinfo, data, dataoff,
-                                          &olca->
-                                          reverseLogicalChannelParameters.
-                                          multiplexParameters.
-                                          h2250LogicalChannelParameters);
-               if (ret < 0)
-                       return -1;
-       }
-
-       if ((olca->options &
-            eOpenLogicalChannelAck_forwardMultiplexAckParameters) &&
-           (olca->forwardMultiplexAckParameters.choice ==
-            eOpenLogicalChannelAck_forwardMultiplexAckParameters_h2250LogicalChannelAckParameters))
-       {
-               ack = &olca->forwardMultiplexAckParameters.
-                   h2250LogicalChannelAckParameters;
-               if (ack->options &
-                   eH2250LogicalChannelAckParameters_mediaChannel) {
-                       /* RTP */
-                       ret = expect_rtp_rtcp(pskb, ct, ctinfo, data, dataoff,
-                                             &ack->mediaChannel);
-                       if (ret < 0)
-                               return -1;
-               }
-
-               if (ack->options &
-                   eH2250LogicalChannelAckParameters_mediaControlChannel) {
-                       /* RTCP */
-                       ret = expect_rtp_rtcp(pskb, ct, ctinfo, data, dataoff,
-                                             &ack->mediaControlChannel);
-                       if (ret < 0)
-                               return -1;
-               }
-       }
-
-       return 0;
-}
-
-/****************************************************************************/
-static int process_h245(struct sk_buff **pskb, struct ip_conntrack *ct,
-                       enum ip_conntrack_info ctinfo,
-                       unsigned char **data, int dataoff,
-                       MultimediaSystemControlMessage * mscm)
-{
-       switch (mscm->choice) {
-       case eMultimediaSystemControlMessage_request:
-               if (mscm->request.choice ==
-                   eRequestMessage_openLogicalChannel) {
-                       return process_olc(pskb, ct, ctinfo, data, dataoff,
-                                          &mscm->request.openLogicalChannel);
-               }
-               DEBUGP("ip_ct_h323: H.245 Request %d\n",
-                      mscm->request.choice);
-               break;
-       case eMultimediaSystemControlMessage_response:
-               if (mscm->response.choice ==
-                   eResponseMessage_openLogicalChannelAck) {
-                       return process_olca(pskb, ct, ctinfo, data, dataoff,
-                                           &mscm->response.
-                                           openLogicalChannelAck);
-               }
-               DEBUGP("ip_ct_h323: H.245 Response %d\n",
-                      mscm->response.choice);
-               break;
-       default:
-               DEBUGP("ip_ct_h323: H.245 signal %d\n", mscm->choice);
-               break;
-       }
-
-       return 0;
-}
-
-/****************************************************************************/
-static int h245_help(struct sk_buff **pskb, struct ip_conntrack *ct,
-                    enum ip_conntrack_info ctinfo)
-{
-       static MultimediaSystemControlMessage mscm;
-       unsigned char *data = NULL;
-       int datalen;
-       int dataoff;
-       int ret;
-
-       /* Until there's been traffic both ways, don't look in packets. */
-       if (ctinfo != IP_CT_ESTABLISHED
-           && ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY) {
-               return NF_ACCEPT;
-       }
-       DEBUGP("ip_ct_h245: skblen = %u\n", (*pskb)->len);
-
-       spin_lock_bh(&ip_h323_lock);
-
-       /* Process each TPKT */
-       while (get_tpkt_data(pskb, ct, ctinfo, &data, &datalen, &dataoff)) {
-               DEBUGP("ip_ct_h245: TPKT %u.%u.%u.%u->%u.%u.%u.%u, len=%d\n",
-                      NIPQUAD((*pskb)->nh.iph->saddr),
-                      NIPQUAD((*pskb)->nh.iph->daddr), datalen);
-
-               /* Decode H.245 signal */
-               ret = DecodeMultimediaSystemControlMessage(data, datalen,
-                                                          &mscm);
-               if (ret < 0) {
-                       if (net_ratelimit())
-                               printk("ip_ct_h245: decoding error: %s\n",
-                                      ret == H323_ERROR_BOUND ?
-                                      "out of bound" : "out of range");
-                       /* We don't drop when decoding error */
-                       break;
-               }
-
-               /* Process H.245 signal */
-               if (process_h245(pskb, ct, ctinfo, &data, dataoff, &mscm) < 0)
-                       goto drop;
-       }
-
-       spin_unlock_bh(&ip_h323_lock);
-       return NF_ACCEPT;
-
-      drop:
-       spin_unlock_bh(&ip_h323_lock);
-       if (net_ratelimit())
-               printk("ip_ct_h245: packet dropped\n");
-       return NF_DROP;
-}
-
-/****************************************************************************/
-static struct ip_conntrack_helper ip_conntrack_helper_h245 = {
-       .name = "H.245",
-       .me = THIS_MODULE,
-       .max_expected = H323_RTP_CHANNEL_MAX * 4 + 2 /* T.120 */ ,
-       .timeout = 240,
-       .tuple = {.dst = {.protonum = IPPROTO_TCP}},
-       .mask = {.src = {.u = {0xFFFF}},
-                .dst = {.protonum = 0xFF}},
-       .help = h245_help
-};
-
-/****************************************************************************/
-void ip_conntrack_h245_expect(struct ip_conntrack *new,
-                             struct ip_conntrack_expect *this)
-{
-       write_lock_bh(&ip_conntrack_lock);
-       new->helper = &ip_conntrack_helper_h245;
-       write_unlock_bh(&ip_conntrack_lock);
-}
-
-/****************************************************************************/
-int get_h225_addr(unsigned char *data, TransportAddress * addr,
-                 __be32 * ip, u_int16_t * port)
-{
-       unsigned char *p;
-
-       if (addr->choice != eTransportAddress_ipAddress)
-               return 0;
-
-       p = data + addr->ipAddress.ip;
-       *ip = htonl((p[0] << 24) | (p[1] << 16) | (p[2] << 8) | (p[3]));
-       *port = (p[4] << 8) | (p[5]);
-
-       return 1;
-}
-
-/****************************************************************************/
-static int expect_h245(struct sk_buff **pskb, struct ip_conntrack *ct,
-                      enum ip_conntrack_info ctinfo,
-                      unsigned char **data, int dataoff,
-                      TransportAddress * addr)
-{
-       int dir = CTINFO2DIR(ctinfo);
-       int ret = 0;
-       __be32 ip;
-       u_int16_t port;
-       struct ip_conntrack_expect *exp = NULL;
-       typeof(nat_h245_hook) nat_h245;
-
-       /* Read h245Address */
-       if (!get_h225_addr(*data, addr, &ip, &port) ||
-           ip != ct->tuplehash[dir].tuple.src.ip || port == 0)
-               return 0;
-
-       /* Create expect for h245 connection */
-       if ((exp = ip_conntrack_expect_alloc(ct)) == NULL)
-               return -1;
-       exp->tuple.src.ip = ct->tuplehash[!dir].tuple.src.ip;
-       exp->tuple.src.u.tcp.port = 0;
-       exp->tuple.dst.ip = ct->tuplehash[!dir].tuple.dst.ip;
-       exp->tuple.dst.u.tcp.port = htons(port);
-       exp->tuple.dst.protonum = IPPROTO_TCP;
-       exp->mask.src.ip = htonl(0xFFFFFFFF);
-       exp->mask.src.u.tcp.port = 0;
-       exp->mask.dst.ip = htonl(0xFFFFFFFF);
-       exp->mask.dst.u.tcp.port = htons(0xFFFF);
-       exp->mask.dst.protonum = 0xFF;
-       exp->flags = 0;
-
-       if (ct->tuplehash[dir].tuple.src.ip !=
-           ct->tuplehash[!dir].tuple.dst.ip &&
-           (nat_h245 = rcu_dereference(nat_h245_hook))) {
-               /* NAT needed */
-               ret = nat_h245(pskb, ct, ctinfo, data, dataoff, addr,
-                              port, exp);
-       } else {                /* Conntrack only */
-               exp->expectfn = ip_conntrack_h245_expect;
-
-               if (ip_conntrack_expect_related(exp) == 0) {
-                       DEBUGP("ip_ct_q931: expect H.245 "
-                              "%u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n",
-                              NIPQUAD(exp->tuple.src.ip),
-                              ntohs(exp->tuple.src.u.tcp.port),
-                              NIPQUAD(exp->tuple.dst.ip),
-                              ntohs(exp->tuple.dst.u.tcp.port));
-               } else
-                       ret = -1;
-       }
-
-       ip_conntrack_expect_put(exp);
-
-       return ret;
-}
-
-/* Forwarding declaration */
-void ip_conntrack_q931_expect(struct ip_conntrack *new,
-                             struct ip_conntrack_expect *this);
-
-/****************************************************************************/
-static int expect_callforwarding(struct sk_buff **pskb,
-                                struct ip_conntrack *ct,
-                                enum ip_conntrack_info ctinfo,
-                                unsigned char **data, int dataoff,
-                                TransportAddress * addr)
-{
-       int dir = CTINFO2DIR(ctinfo);
-       int ret = 0;
-       __be32 ip;
-       u_int16_t port;
-       struct ip_conntrack_expect *exp = NULL;
-       typeof(nat_callforwarding_hook) nat_callforwarding;
-
-       /* Read alternativeAddress */
-       if (!get_h225_addr(*data, addr, &ip, &port) || port == 0)
-               return 0;
-
-       /* If the calling party is on the same side of the forward-to party,
-        * we don't need to track the second call */
-       if (callforward_filter) {
-               struct rtable *rt1, *rt2;
-               struct flowi fl1 = {
-                       .fl4_dst = ip,
-               };
-               struct flowi fl2 = {
-                       .fl4_dst = ct->tuplehash[!dir].tuple.src.ip,
-               };
-
-               if (ip_route_output_key(&rt1, &fl1) == 0) {
-                       if (ip_route_output_key(&rt2, &fl2) == 0) {
-                               if (rt1->rt_gateway == rt2->rt_gateway &&
-                                   rt1->u.dst.dev  == rt2->u.dst.dev)
-                                       ret = 1;
-                               dst_release(&rt2->u.dst);
-                       }
-                       dst_release(&rt1->u.dst);
-               }
-               if (ret) {
-                       DEBUGP("ip_ct_q931: Call Forwarding not tracked\n");
-                       return 0;
-               }
-       }
-
-       /* Create expect for the second call leg */
-       if ((exp = ip_conntrack_expect_alloc(ct)) == NULL)
-               return -1;
-       exp->tuple.src.ip = ct->tuplehash[!dir].tuple.src.ip;
-       exp->tuple.src.u.tcp.port = 0;
-       exp->tuple.dst.ip = ip;
-       exp->tuple.dst.u.tcp.port = htons(port);
-       exp->tuple.dst.protonum = IPPROTO_TCP;
-       exp->mask.src.ip = htonl(0xFFFFFFFF);
-       exp->mask.src.u.tcp.port = 0;
-       exp->mask.dst.ip = htonl(0xFFFFFFFF);
-       exp->mask.dst.u.tcp.port = htons(0xFFFF);
-       exp->mask.dst.protonum = 0xFF;
-       exp->flags = 0;
-
-       if (ct->tuplehash[dir].tuple.src.ip !=
-           ct->tuplehash[!dir].tuple.dst.ip &&
-           (nat_callforwarding = rcu_dereference(nat_callforwarding_hook))) {
-               /* Need NAT */
-               ret = nat_callforwarding(pskb, ct, ctinfo, data, dataoff,
-                                        addr, port, exp);
-       } else {                /* Conntrack only */
-               exp->expectfn = ip_conntrack_q931_expect;
-
-               if (ip_conntrack_expect_related(exp) == 0) {
-                       DEBUGP("ip_ct_q931: expect Call Forwarding "
-                              "%u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n",
-                              NIPQUAD(exp->tuple.src.ip),
-                              ntohs(exp->tuple.src.u.tcp.port),
-                              NIPQUAD(exp->tuple.dst.ip),
-                              ntohs(exp->tuple.dst.u.tcp.port));
-               } else
-                       ret = -1;
-       }
-
-       ip_conntrack_expect_put(exp);
-
-       return ret;
-}
-
-/****************************************************************************/
-static int process_setup(struct sk_buff **pskb, struct ip_conntrack *ct,
-                        enum ip_conntrack_info ctinfo,
-                        unsigned char **data, int dataoff,
-                        Setup_UUIE * setup)
-{
-       int dir = CTINFO2DIR(ctinfo);
-       int ret;
-       int i;
-       __be32 ip;
-       u_int16_t port;
-       typeof(set_h225_addr_hook) set_h225_addr;
-
-       DEBUGP("ip_ct_q931: Setup\n");
-
-       if (setup->options & eSetup_UUIE_h245Address) {
-               ret = expect_h245(pskb, ct, ctinfo, data, dataoff,
-                                 &setup->h245Address);
-               if (ret < 0)
-                       return -1;
-       }
-
-       set_h225_addr = rcu_dereference(set_h225_addr_hook);
-
-       if ((setup->options & eSetup_UUIE_destCallSignalAddress) &&
-           (set_h225_addr) &&
-           get_h225_addr(*data, &setup->destCallSignalAddress, &ip, &port) &&
-           ip != ct->tuplehash[!dir].tuple.src.ip) {
-               DEBUGP("ip_ct_q931: set destCallSignalAddress "
-                      "%u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n",
-                      NIPQUAD(ip), port,
-                      NIPQUAD(ct->tuplehash[!dir].tuple.src.ip),
-                      ntohs(ct->tuplehash[!dir].tuple.src.u.tcp.port));
-               ret = set_h225_addr(pskb, data, dataoff,
-                                   &setup->destCallSignalAddress,
-                                   ct->tuplehash[!dir].tuple.src.ip,
-                                   ntohs(ct->tuplehash[!dir].tuple.src.
-                                         u.tcp.port));
-               if (ret < 0)
-                       return -1;
-       }
-
-       if ((setup->options & eSetup_UUIE_sourceCallSignalAddress) &&
-           (set_h225_addr) &&
-           get_h225_addr(*data, &setup->sourceCallSignalAddress, &ip, &port)
-           && ip != ct->tuplehash[!dir].tuple.dst.ip) {
-               DEBUGP("ip_ct_q931: set sourceCallSignalAddress "
-                      "%u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n",
-                      NIPQUAD(ip), port,
-                      NIPQUAD(ct->tuplehash[!dir].tuple.dst.ip),
-                      ntohs(ct->tuplehash[!dir].tuple.dst.u.tcp.port));
-               ret = set_h225_addr(pskb, data, dataoff,
-                                   &setup->sourceCallSignalAddress,
-                                   ct->tuplehash[!dir].tuple.dst.ip,
-                                   ntohs(ct->tuplehash[!dir].tuple.dst.
-                                         u.tcp.port));
-               if (ret < 0)
-                       return -1;
-       }
-
-       if (setup->options & eSetup_UUIE_fastStart) {
-               for (i = 0; i < setup->fastStart.count; i++) {
-                       ret = process_olc(pskb, ct, ctinfo, data, dataoff,
-                                         &setup->fastStart.item[i]);
-                       if (ret < 0)
-                               return -1;
-               }
-       }
-
-       return 0;
-}
-
-/****************************************************************************/
-static int process_callproceeding(struct sk_buff **pskb,
-                                 struct ip_conntrack *ct,
-                                 enum ip_conntrack_info ctinfo,
-                                 unsigned char **data, int dataoff,
-                                 CallProceeding_UUIE * callproc)
-{
-       int ret;
-       int i;
-
-       DEBUGP("ip_ct_q931: CallProceeding\n");
-
-       if (callproc->options & eCallProceeding_UUIE_h245Address) {
-               ret = expect_h245(pskb, ct, ctinfo, data, dataoff,
-                                 &callproc->h245Address);
-               if (ret < 0)
-                       return -1;
-       }
-
-       if (callproc->options & eCallProceeding_UUIE_fastStart) {
-               for (i = 0; i < callproc->fastStart.count; i++) {
-                       ret = process_olc(pskb, ct, ctinfo, data, dataoff,
-                                         &callproc->fastStart.item[i]);
-                       if (ret < 0)
-                               return -1;
-               }
-       }
-
-       return 0;
-}
-
-/****************************************************************************/
-static int process_connect(struct sk_buff **pskb, struct ip_conntrack *ct,
-                          enum ip_conntrack_info ctinfo,
-                          unsigned char **data, int dataoff,
-                          Connect_UUIE * connect)
-{
-       int ret;
-       int i;
-
-       DEBUGP("ip_ct_q931: Connect\n");
-
-       if (connect->options & eConnect_UUIE_h245Address) {
-               ret = expect_h245(pskb, ct, ctinfo, data, dataoff,
-                                 &connect->h245Address);
-               if (ret < 0)
-                       return -1;
-       }
-
-       if (connect->options & eConnect_UUIE_fastStart) {
-               for (i = 0; i < connect->fastStart.count; i++) {
-                       ret = process_olc(pskb, ct, ctinfo, data, dataoff,
-                                         &connect->fastStart.item[i]);
-                       if (ret < 0)
-                               return -1;
-               }
-       }
-
-       return 0;
-}
-
-/****************************************************************************/
-static int process_alerting(struct sk_buff **pskb, struct ip_conntrack *ct,
-                           enum ip_conntrack_info ctinfo,
-                           unsigned char **data, int dataoff,
-                           Alerting_UUIE * alert)
-{
-       int ret;
-       int i;
-
-       DEBUGP("ip_ct_q931: Alerting\n");
-
-       if (alert->options & eAlerting_UUIE_h245Address) {
-               ret = expect_h245(pskb, ct, ctinfo, data, dataoff,
-                                 &alert->h245Address);
-               if (ret < 0)
-                       return -1;
-       }
-
-       if (alert->options & eAlerting_UUIE_fastStart) {
-               for (i = 0; i < alert->fastStart.count; i++) {
-                       ret = process_olc(pskb, ct, ctinfo, data, dataoff,
-                                         &alert->fastStart.item[i]);
-                       if (ret < 0)
-                               return -1;
-               }
-       }
-
-       return 0;
-}
-
-/****************************************************************************/
-static int process_information(struct sk_buff **pskb,
-                              struct ip_conntrack *ct,
-                              enum ip_conntrack_info ctinfo,
-                              unsigned char **data, int dataoff,
-                              Information_UUIE * info)
-{
-       int ret;
-       int i;
-
-       DEBUGP("ip_ct_q931: Information\n");
-
-       if (info->options & eInformation_UUIE_fastStart) {
-               for (i = 0; i < info->fastStart.count; i++) {
-                       ret = process_olc(pskb, ct, ctinfo, data, dataoff,
-                                         &info->fastStart.item[i]);
-                       if (ret < 0)
-                               return -1;
-               }
-       }
-
-       return 0;
-}
-
-/****************************************************************************/
-static int process_facility(struct sk_buff **pskb, struct ip_conntrack *ct,
-                           enum ip_conntrack_info ctinfo,
-                           unsigned char **data, int dataoff,
-                           Facility_UUIE * facility)
-{
-       int ret;
-       int i;
-
-       DEBUGP("ip_ct_q931: Facility\n");
-
-       if (facility->reason.choice == eFacilityReason_callForwarded) {
-               if (facility->options & eFacility_UUIE_alternativeAddress)
-                       return expect_callforwarding(pskb, ct, ctinfo, data,
-                                                    dataoff,
-                                                    &facility->
-                                                    alternativeAddress);
-               return 0;
-       }
-
-       if (facility->options & eFacility_UUIE_h245Address) {
-               ret = expect_h245(pskb, ct, ctinfo, data, dataoff,
-                                 &facility->h245Address);
-               if (ret < 0)
-                       return -1;
-       }
-
-       if (facility->options & eFacility_UUIE_fastStart) {
-               for (i = 0; i < facility->fastStart.count; i++) {
-                       ret = process_olc(pskb, ct, ctinfo, data, dataoff,
-                                         &facility->fastStart.item[i]);
-                       if (ret < 0)
-                               return -1;
-               }
-       }
-
-       return 0;
-}
-
-/****************************************************************************/
-static int process_progress(struct sk_buff **pskb, struct ip_conntrack *ct,
-                           enum ip_conntrack_info ctinfo,
-                           unsigned char **data, int dataoff,
-                           Progress_UUIE * progress)
-{
-       int ret;
-       int i;
-
-       DEBUGP("ip_ct_q931: Progress\n");
-
-       if (progress->options & eProgress_UUIE_h245Address) {
-               ret = expect_h245(pskb, ct, ctinfo, data, dataoff,
-                                 &progress->h245Address);
-               if (ret < 0)
-                       return -1;
-       }
-
-       if (progress->options & eProgress_UUIE_fastStart) {
-               for (i = 0; i < progress->fastStart.count; i++) {
-                       ret = process_olc(pskb, ct, ctinfo, data, dataoff,
-                                         &progress->fastStart.item[i]);
-                       if (ret < 0)
-                               return -1;
-               }
-       }
-
-       return 0;
-}
-
-/****************************************************************************/
-static int process_q931(struct sk_buff **pskb, struct ip_conntrack *ct,
-                       enum ip_conntrack_info ctinfo,
-                       unsigned char **data, int dataoff, Q931 * q931)
-{
-       H323_UU_PDU *pdu = &q931->UUIE.h323_uu_pdu;
-       int i;
-       int ret = 0;
-
-       switch (pdu->h323_message_body.choice) {
-       case eH323_UU_PDU_h323_message_body_setup:
-               ret = process_setup(pskb, ct, ctinfo, data, dataoff,
-                                   &pdu->h323_message_body.setup);
-               break;
-       case eH323_UU_PDU_h323_message_body_callProceeding:
-               ret = process_callproceeding(pskb, ct, ctinfo, data, dataoff,
-                                            &pdu->h323_message_body.
-                                            callProceeding);
-               break;
-       case eH323_UU_PDU_h323_message_body_connect:
-               ret = process_connect(pskb, ct, ctinfo, data, dataoff,
-                                     &pdu->h323_message_body.connect);
-               break;
-       case eH323_UU_PDU_h323_message_body_alerting:
-               ret = process_alerting(pskb, ct, ctinfo, data, dataoff,
-                                      &pdu->h323_message_body.alerting);
-               break;
-       case eH323_UU_PDU_h323_message_body_information:
-               ret = process_information(pskb, ct, ctinfo, data, dataoff,
-                                         &pdu->h323_message_body.
-                                         information);
-               break;
-       case eH323_UU_PDU_h323_message_body_facility:
-               ret = process_facility(pskb, ct, ctinfo, data, dataoff,
-                                      &pdu->h323_message_body.facility);
-               break;
-       case eH323_UU_PDU_h323_message_body_progress:
-               ret = process_progress(pskb, ct, ctinfo, data, dataoff,
-                                      &pdu->h323_message_body.progress);
-               break;
-       default:
-               DEBUGP("ip_ct_q931: Q.931 signal %d\n",
-                      pdu->h323_message_body.choice);
-               break;
-       }
-
-       if (ret < 0)
-               return -1;
-
-       if (pdu->options & eH323_UU_PDU_h245Control) {
-               for (i = 0; i < pdu->h245Control.count; i++) {
-                       ret = process_h245(pskb, ct, ctinfo, data, dataoff,
-                                          &pdu->h245Control.item[i]);
-                       if (ret < 0)
-                               return -1;
-               }
-       }
-
-       return 0;
-}
-
-/****************************************************************************/
-static int q931_help(struct sk_buff **pskb, struct ip_conntrack *ct,
-                    enum ip_conntrack_info ctinfo)
-{
-       static Q931 q931;
-       unsigned char *data = NULL;
-       int datalen;
-       int dataoff;
-       int ret;
-
-       /* Until there's been traffic both ways, don't look in packets. */
-       if (ctinfo != IP_CT_ESTABLISHED
-           && ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY) {
-               return NF_ACCEPT;
-       }
-       DEBUGP("ip_ct_q931: skblen = %u\n", (*pskb)->len);
-
-       spin_lock_bh(&ip_h323_lock);
-
-       /* Process each TPKT */
-       while (get_tpkt_data(pskb, ct, ctinfo, &data, &datalen, &dataoff)) {
-               DEBUGP("ip_ct_q931: TPKT %u.%u.%u.%u->%u.%u.%u.%u, len=%d\n",
-                      NIPQUAD((*pskb)->nh.iph->saddr),
-                      NIPQUAD((*pskb)->nh.iph->daddr), datalen);
-
-               /* Decode Q.931 signal */
-               ret = DecodeQ931(data, datalen, &q931);
-               if (ret < 0) {
-                       if (net_ratelimit())
-                               printk("ip_ct_q931: decoding error: %s\n",
-                                      ret == H323_ERROR_BOUND ?
-                                      "out of bound" : "out of range");
-                       /* We don't drop when decoding error */
-                       break;
-               }
-
-               /* Process Q.931 signal */
-               if (process_q931(pskb, ct, ctinfo, &data, dataoff, &q931) < 0)
-                       goto drop;
-       }
-
-       spin_unlock_bh(&ip_h323_lock);
-       return NF_ACCEPT;
-
-      drop:
-       spin_unlock_bh(&ip_h323_lock);
-       if (net_ratelimit())
-               printk("ip_ct_q931: packet dropped\n");
-       return NF_DROP;
-}
-
-/****************************************************************************/
-static struct ip_conntrack_helper ip_conntrack_helper_q931 = {
-       .name = "Q.931",
-       .me = THIS_MODULE,
-       .max_expected = H323_RTP_CHANNEL_MAX * 4 + 4 /* T.120 and H.245 */ ,
-       .timeout = 240,
-       .tuple = {.src = {.u = {.tcp = {.port = __constant_htons(Q931_PORT)}}},
-                 .dst = {.protonum = IPPROTO_TCP}},
-       .mask = {.src = {.u = {0xFFFF}},
-                .dst = {.protonum = 0xFF}},
-       .help = q931_help
-};
-
-/****************************************************************************/
-void ip_conntrack_q931_expect(struct ip_conntrack *new,
-                             struct ip_conntrack_expect *this)
-{
-       write_lock_bh(&ip_conntrack_lock);
-       new->helper = &ip_conntrack_helper_q931;
-       write_unlock_bh(&ip_conntrack_lock);
-}
-
-/****************************************************************************/
-static unsigned char *get_udp_data(struct sk_buff **pskb, int *datalen)
-{
-       struct udphdr _uh, *uh;
-       int dataoff;
-
-       uh = skb_header_pointer(*pskb, (*pskb)->nh.iph->ihl * 4, sizeof(_uh),
-                               &_uh);
-       if (uh == NULL)
-               return NULL;
-       dataoff = (*pskb)->nh.iph->ihl * 4 + sizeof(_uh);
-       if (dataoff >= (*pskb)->len)
-               return NULL;
-       *datalen = (*pskb)->len - dataoff;
-       return skb_header_pointer(*pskb, dataoff, *datalen, h323_buffer);
-}
-
-/****************************************************************************/
-static struct ip_conntrack_expect *find_expect(struct ip_conntrack *ct,
-                                              __be32 ip, u_int16_t port)
-{
-       struct ip_conntrack_expect *exp;
-       struct ip_conntrack_tuple tuple;
-
-       tuple.src.ip = 0;
-       tuple.src.u.tcp.port = 0;
-       tuple.dst.ip = ip;
-       tuple.dst.u.tcp.port = htons(port);
-       tuple.dst.protonum = IPPROTO_TCP;
-
-       exp = __ip_conntrack_expect_find(&tuple);
-       if (exp && exp->master == ct)
-               return exp;
-       return NULL;
-}
-
-/****************************************************************************/
-static int set_expect_timeout(struct ip_conntrack_expect *exp,
-                             unsigned timeout)
-{
-       if (!exp || !del_timer(&exp->timeout))
-               return 0;
-
-       exp->timeout.expires = jiffies + timeout * HZ;
-       add_timer(&exp->timeout);
-
-       return 1;
-}
-
-/****************************************************************************/
-static int expect_q931(struct sk_buff **pskb, struct ip_conntrack *ct,
-                      enum ip_conntrack_info ctinfo,
-                      unsigned char **data,
-                      TransportAddress * addr, int count)
-{
-       struct ip_ct_h323_master *info = &ct->help.ct_h323_info;
-       int dir = CTINFO2DIR(ctinfo);
-       int ret = 0;
-       int i;
-       __be32 ip;
-       u_int16_t port;
-       struct ip_conntrack_expect *exp;
-       typeof(nat_q931_hook) nat_q931;
-
-       /* Look for the first related address */
-       for (i = 0; i < count; i++) {
-               if (get_h225_addr(*data, &addr[i], &ip, &port) &&
-                   ip == ct->tuplehash[dir].tuple.src.ip && port != 0)
-                       break;
-       }
-
-       if (i >= count)         /* Not found */
-               return 0;
-
-       /* Create expect for Q.931 */
-       if ((exp = ip_conntrack_expect_alloc(ct)) == NULL)
-               return -1;
-       exp->tuple.src.ip = gkrouted_only ?     /* only accept calls from GK? */
-           ct->tuplehash[!dir].tuple.src.ip : 0;
-       exp->tuple.src.u.tcp.port = 0;
-       exp->tuple.dst.ip = ct->tuplehash[!dir].tuple.dst.ip;
-       exp->tuple.dst.u.tcp.port = htons(port);
-       exp->tuple.dst.protonum = IPPROTO_TCP;
-       exp->mask.src.ip = gkrouted_only ? htonl(0xFFFFFFFF) : 0;
-       exp->mask.src.u.tcp.port = 0;
-       exp->mask.dst.ip = htonl(0xFFFFFFFF);
-       exp->mask.dst.u.tcp.port = htons(0xFFFF);
-       exp->mask.dst.protonum = 0xFF;
-       exp->flags = IP_CT_EXPECT_PERMANENT;    /* Accept multiple calls */
-
-       nat_q931 = rcu_dereference(nat_q931_hook);
-       if (nat_q931) { /* Need NAT */
-               ret = nat_q931(pskb, ct, ctinfo, data, addr, i, port, exp);
-       } else {                /* Conntrack only */
-               exp->expectfn = ip_conntrack_q931_expect;
-
-               if (ip_conntrack_expect_related(exp) == 0) {
-                       DEBUGP("ip_ct_ras: expect Q.931 "
-                              "%u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n",
-                              NIPQUAD(exp->tuple.src.ip),
-                              ntohs(exp->tuple.src.u.tcp.port),
-                              NIPQUAD(exp->tuple.dst.ip),
-                              ntohs(exp->tuple.dst.u.tcp.port));
-
-                       /* Save port for looking up expect in processing RCF */
-                       info->sig_port[dir] = port;
-               } else
-                       ret = -1;
-       }
-
-       ip_conntrack_expect_put(exp);
-
-       return ret;
-}
-
-/****************************************************************************/
-static int process_grq(struct sk_buff **pskb, struct ip_conntrack *ct,
-                      enum ip_conntrack_info ctinfo,
-                      unsigned char **data, GatekeeperRequest * grq)
-{
-       typeof(set_ras_addr_hook) set_ras_addr;
-
-       DEBUGP("ip_ct_ras: GRQ\n");
-
-       set_ras_addr = rcu_dereference(set_ras_addr_hook);
-       if (set_ras_addr)       /* NATed */
-               return set_ras_addr(pskb, ct, ctinfo, data,
-                                   &grq->rasAddress, 1);
-       return 0;
-}
-
-/* Declare before using */
-static void ip_conntrack_ras_expect(struct ip_conntrack *new,
-                                   struct ip_conntrack_expect *this);
-
-/****************************************************************************/
-static int process_gcf(struct sk_buff **pskb, struct ip_conntrack *ct,
-                      enum ip_conntrack_info ctinfo,
-                      unsigned char **data, GatekeeperConfirm * gcf)
-{
-       int dir = CTINFO2DIR(ctinfo);
-       int ret = 0;
-       __be32 ip;
-       u_int16_t port;
-       struct ip_conntrack_expect *exp;
-
-       DEBUGP("ip_ct_ras: GCF\n");
-
-       if (!get_h225_addr(*data, &gcf->rasAddress, &ip, &port))
-               return 0;
-
-       /* Registration port is the same as discovery port */
-       if (ip == ct->tuplehash[dir].tuple.src.ip &&
-           port == ntohs(ct->tuplehash[dir].tuple.src.u.udp.port))
-               return 0;
-
-       /* Avoid RAS expectation loops. A GCF is never expected. */
-       if (test_bit(IPS_EXPECTED_BIT, &ct->status))
-               return 0;
-
-       /* Need new expect */
-       if ((exp = ip_conntrack_expect_alloc(ct)) == NULL)
-               return -1;
-       exp->tuple.src.ip = ct->tuplehash[!dir].tuple.src.ip;
-       exp->tuple.src.u.tcp.port = 0;
-       exp->tuple.dst.ip = ip;
-       exp->tuple.dst.u.tcp.port = htons(port);
-       exp->tuple.dst.protonum = IPPROTO_UDP;
-       exp->mask.src.ip = htonl(0xFFFFFFFF);
-       exp->mask.src.u.tcp.port = 0;
-       exp->mask.dst.ip = htonl(0xFFFFFFFF);
-       exp->mask.dst.u.tcp.port = htons(0xFFFF);
-       exp->mask.dst.protonum = 0xFF;
-       exp->flags = 0;
-       exp->expectfn = ip_conntrack_ras_expect;
-       if (ip_conntrack_expect_related(exp) == 0) {
-               DEBUGP("ip_ct_ras: expect RAS "
-                      "%u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n",
-                      NIPQUAD(exp->tuple.src.ip),
-                      ntohs(exp->tuple.src.u.tcp.port),
-                      NIPQUAD(exp->tuple.dst.ip),
-                      ntohs(exp->tuple.dst.u.tcp.port));
-       } else
-               ret = -1;
-
-       ip_conntrack_expect_put(exp);
-
-       return ret;
-}
-
-/****************************************************************************/
-static int process_rrq(struct sk_buff **pskb, struct ip_conntrack *ct,
-                      enum ip_conntrack_info ctinfo,
-                      unsigned char **data, RegistrationRequest * rrq)
-{
-       struct ip_ct_h323_master *info = &ct->help.ct_h323_info;
-       int ret;
-       typeof(set_ras_addr_hook) set_ras_addr;
-
-       DEBUGP("ip_ct_ras: RRQ\n");
-
-       ret = expect_q931(pskb, ct, ctinfo, data,
-                         rrq->callSignalAddress.item,
-                         rrq->callSignalAddress.count);
-       if (ret < 0)
-               return -1;
-
-       set_ras_addr = rcu_dereference(set_ras_addr_hook);
-       if (set_ras_addr) {
-               ret = set_ras_addr(pskb, ct, ctinfo, data,
-                                  rrq->rasAddress.item,
-                                  rrq->rasAddress.count);
-               if (ret < 0)
-                       return -1;
-       }
-
-       if (rrq->options & eRegistrationRequest_timeToLive) {
-               DEBUGP("ip_ct_ras: RRQ TTL = %u seconds\n", rrq->timeToLive);
-               info->timeout = rrq->timeToLive;
-       } else
-               info->timeout = default_rrq_ttl;
-
-       return 0;
-}
-
-/****************************************************************************/
-static int process_rcf(struct sk_buff **pskb, struct ip_conntrack *ct,
-                      enum ip_conntrack_info ctinfo,
-                      unsigned char **data, RegistrationConfirm * rcf)
-{
-       struct ip_ct_h323_master *info = &ct->help.ct_h323_info;
-       int dir = CTINFO2DIR(ctinfo);
-       int ret;
-       struct ip_conntrack_expect *exp;
-       typeof(set_sig_addr_hook) set_sig_addr;
-
-       DEBUGP("ip_ct_ras: RCF\n");
-
-       set_sig_addr = rcu_dereference(set_sig_addr_hook);
-       if (set_sig_addr) {
-               ret = set_sig_addr(pskb, ct, ctinfo, data,
-                                  rcf->callSignalAddress.item,
-                                  rcf->callSignalAddress.count);
-               if (ret < 0)
-                       return -1;
-       }
-
-       if (rcf->options & eRegistrationConfirm_timeToLive) {
-               DEBUGP("ip_ct_ras: RCF TTL = %u seconds\n", rcf->timeToLive);
-               info->timeout = rcf->timeToLive;
-       }
-
-       if (info->timeout > 0) {
-               DEBUGP
-                   ("ip_ct_ras: set RAS connection timeout to %u seconds\n",
-                    info->timeout);
-               ip_ct_refresh(ct, *pskb, info->timeout * HZ);
-
-               /* Set expect timeout */
-               read_lock_bh(&ip_conntrack_lock);
-               exp = find_expect(ct, ct->tuplehash[dir].tuple.dst.ip,
-                                 info->sig_port[!dir]);
-               if (exp) {
-                       DEBUGP("ip_ct_ras: set Q.931 expect "
-                              "(%u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu) "
-                              "timeout to %u seconds\n",
-                              NIPQUAD(exp->tuple.src.ip),
-                              ntohs(exp->tuple.src.u.tcp.port),
-                              NIPQUAD(exp->tuple.dst.ip),
-                              ntohs(exp->tuple.dst.u.tcp.port),
-                              info->timeout);
-                       set_expect_timeout(exp, info->timeout);
-               }
-               read_unlock_bh(&ip_conntrack_lock);
-       }
-
-       return 0;
-}
-
-/****************************************************************************/
-static int process_urq(struct sk_buff **pskb, struct ip_conntrack *ct,
-                      enum ip_conntrack_info ctinfo,
-                      unsigned char **data, UnregistrationRequest * urq)
-{
-       struct ip_ct_h323_master *info = &ct->help.ct_h323_info;
-       int dir = CTINFO2DIR(ctinfo);
-       int ret;
-       typeof(set_sig_addr_hook) set_sig_addr;
-
-       DEBUGP("ip_ct_ras: URQ\n");
-
-       set_sig_addr = rcu_dereference(set_sig_addr_hook);
-       if (set_sig_addr) {
-               ret = set_sig_addr(pskb, ct, ctinfo, data,
-                                  urq->callSignalAddress.item,
-                                  urq->callSignalAddress.count);
-               if (ret < 0)
-                       return -1;
-       }
-
-       /* Clear old expect */
-       ip_ct_remove_expectations(ct);
-       info->sig_port[dir] = 0;
-       info->sig_port[!dir] = 0;
-
-       /* Give it 30 seconds for UCF or URJ */
-       ip_ct_refresh(ct, *pskb, 30 * HZ);
-
-       return 0;
-}
-
-/****************************************************************************/
-static int process_arq(struct sk_buff **pskb, struct ip_conntrack *ct,
-                      enum ip_conntrack_info ctinfo,
-                      unsigned char **data, AdmissionRequest * arq)
-{
-       struct ip_ct_h323_master *info = &ct->help.ct_h323_info;
-       int dir = CTINFO2DIR(ctinfo);
-       __be32 ip;
-       u_int16_t port;
-       typeof(set_h225_addr_hook) set_h225_addr;
-
-       DEBUGP("ip_ct_ras: ARQ\n");
-
-       set_h225_addr = rcu_dereference(set_h225_addr_hook);
-       if ((arq->options & eAdmissionRequest_destCallSignalAddress) &&
-           get_h225_addr(*data, &arq->destCallSignalAddress, &ip, &port) &&
-           ip == ct->tuplehash[dir].tuple.src.ip &&
-           port == info->sig_port[dir] && set_h225_addr) {
-               /* Answering ARQ */
-               return set_h225_addr(pskb, data, 0,
-                                    &arq->destCallSignalAddress,
-                                    ct->tuplehash[!dir].tuple.dst.ip,
-                                    info->sig_port[!dir]);
-       }
-
-       if ((arq->options & eAdmissionRequest_srcCallSignalAddress) &&
-           get_h225_addr(*data, &arq->srcCallSignalAddress, &ip, &port) &&
-           ip == ct->tuplehash[dir].tuple.src.ip && set_h225_addr) {
-               /* Calling ARQ */
-               return set_h225_addr(pskb, data, 0,
-                                    &arq->srcCallSignalAddress,
-                                    ct->tuplehash[!dir].tuple.dst.ip,
-                                    port);
-       }
-
-       return 0;
-}
-
-/****************************************************************************/
-static int process_acf(struct sk_buff **pskb, struct ip_conntrack *ct,
-                      enum ip_conntrack_info ctinfo,
-                      unsigned char **data, AdmissionConfirm * acf)
-{
-       int dir = CTINFO2DIR(ctinfo);
-       int ret = 0;
-       __be32 ip;
-       u_int16_t port;
-       struct ip_conntrack_expect *exp;
-       typeof(set_sig_addr_hook) set_sig_addr;
-
-       DEBUGP("ip_ct_ras: ACF\n");
-
-       if (!get_h225_addr(*data, &acf->destCallSignalAddress, &ip, &port))
-               return 0;
-
-       if (ip == ct->tuplehash[dir].tuple.dst.ip) {    /* Answering ACF */
-               set_sig_addr = rcu_dereference(set_sig_addr_hook);
-               if (set_sig_addr)
-                       return set_sig_addr(pskb, ct, ctinfo, data,
-                                           &acf->destCallSignalAddress, 1);
-               return 0;
-       }
-
-       /* Need new expect */
-       if ((exp = ip_conntrack_expect_alloc(ct)) == NULL)
-               return -1;
-       exp->tuple.src.ip = ct->tuplehash[!dir].tuple.src.ip;
-       exp->tuple.src.u.tcp.port = 0;
-       exp->tuple.dst.ip = ip;
-       exp->tuple.dst.u.tcp.port = htons(port);
-       exp->tuple.dst.protonum = IPPROTO_TCP;
-       exp->mask.src.ip = htonl(0xFFFFFFFF);
-       exp->mask.src.u.tcp.port = 0;
-       exp->mask.dst.ip = htonl(0xFFFFFFFF);
-       exp->mask.dst.u.tcp.port = htons(0xFFFF);
-       exp->mask.dst.protonum = 0xFF;
-       exp->flags = IP_CT_EXPECT_PERMANENT;
-       exp->expectfn = ip_conntrack_q931_expect;
-
-       if (ip_conntrack_expect_related(exp) == 0) {
-               DEBUGP("ip_ct_ras: expect Q.931 "
-                      "%u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n",
-                      NIPQUAD(exp->tuple.src.ip),
-                      ntohs(exp->tuple.src.u.tcp.port),
-                      NIPQUAD(exp->tuple.dst.ip),
-                      ntohs(exp->tuple.dst.u.tcp.port));
-       } else
-               ret = -1;
-
-       ip_conntrack_expect_put(exp);
-
-       return ret;
-}
-
-/****************************************************************************/
-static int process_lrq(struct sk_buff **pskb, struct ip_conntrack *ct,
-                      enum ip_conntrack_info ctinfo,
-                      unsigned char **data, LocationRequest * lrq)
-{
-       typeof(set_ras_addr_hook) set_ras_addr;
-
-       DEBUGP("ip_ct_ras: LRQ\n");
-
-       set_ras_addr = rcu_dereference(set_ras_addr_hook);
-       if (set_ras_addr)
-               return set_ras_addr(pskb, ct, ctinfo, data,
-                                   &lrq->replyAddress, 1);
-       return 0;
-}
-
-/****************************************************************************/
-static int process_lcf(struct sk_buff **pskb, struct ip_conntrack *ct,
-                      enum ip_conntrack_info ctinfo,
-                      unsigned char **data, LocationConfirm * lcf)
-{
-       int dir = CTINFO2DIR(ctinfo);
-       int ret = 0;
-       __be32 ip;
-       u_int16_t port;
-       struct ip_conntrack_expect *exp = NULL;
-
-       DEBUGP("ip_ct_ras: LCF\n");
-
-       if (!get_h225_addr(*data, &lcf->callSignalAddress, &ip, &port))
-               return 0;
-
-       /* Need new expect for call signal */
-       if ((exp = ip_conntrack_expect_alloc(ct)) == NULL)
-               return -1;
-       exp->tuple.src.ip = ct->tuplehash[!dir].tuple.src.ip;
-       exp->tuple.src.u.tcp.port = 0;
-       exp->tuple.dst.ip = ip;
-       exp->tuple.dst.u.tcp.port = htons(port);
-       exp->tuple.dst.protonum = IPPROTO_TCP;
-       exp->mask.src.ip = htonl(0xFFFFFFFF);
-       exp->mask.src.u.tcp.port = 0;
-       exp->mask.dst.ip = htonl(0xFFFFFFFF);
-       exp->mask.dst.u.tcp.port = htons(0xFFFF);
-       exp->mask.dst.protonum = 0xFF;
-       exp->flags = IP_CT_EXPECT_PERMANENT;
-       exp->expectfn = ip_conntrack_q931_expect;
-
-       if (ip_conntrack_expect_related(exp) == 0) {
-               DEBUGP("ip_ct_ras: expect Q.931 "
-                      "%u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n",
-                      NIPQUAD(exp->tuple.src.ip),
-                      ntohs(exp->tuple.src.u.tcp.port),
-                      NIPQUAD(exp->tuple.dst.ip),
-                      ntohs(exp->tuple.dst.u.tcp.port));
-       } else
-               ret = -1;
-
-       ip_conntrack_expect_put(exp);
-
-       /* Ignore rasAddress */
-
-       return ret;
-}
-
-/****************************************************************************/
-static int process_irr(struct sk_buff **pskb, struct ip_conntrack *ct,
-                      enum ip_conntrack_info ctinfo,
-                      unsigned char **data, InfoRequestResponse * irr)
-{
-       int ret;
-       typeof(set_ras_addr_hook) set_ras_addr;
-       typeof(set_sig_addr_hook) set_sig_addr;
-
-       DEBUGP("ip_ct_ras: IRR\n");
-
-       set_ras_addr = rcu_dereference(set_ras_addr_hook);
-       if (set_ras_addr) {
-               ret = set_ras_addr(pskb, ct, ctinfo, data,
-                                  &irr->rasAddress, 1);
-               if (ret < 0)
-                       return -1;
-       }
-
-       set_sig_addr = rcu_dereference(set_sig_addr_hook);
-       if (set_sig_addr) {
-               ret = set_sig_addr(pskb, ct, ctinfo, data,
-                                  irr->callSignalAddress.item,
-                                  irr->callSignalAddress.count);
-               if (ret < 0)
-                       return -1;
-       }
-
-       return 0;
-}
-
-/****************************************************************************/
-static int process_ras(struct sk_buff **pskb, struct ip_conntrack *ct,
-                      enum ip_conntrack_info ctinfo,
-                      unsigned char **data, RasMessage * ras)
-{
-       switch (ras->choice) {
-       case eRasMessage_gatekeeperRequest:
-               return process_grq(pskb, ct, ctinfo, data,
-                                  &ras->gatekeeperRequest);
-       case eRasMessage_gatekeeperConfirm:
-               return process_gcf(pskb, ct, ctinfo, data,
-                                  &ras->gatekeeperConfirm);
-       case eRasMessage_registrationRequest:
-               return process_rrq(pskb, ct, ctinfo, data,
-                                  &ras->registrationRequest);
-       case eRasMessage_registrationConfirm:
-               return process_rcf(pskb, ct, ctinfo, data,
-                                  &ras->registrationConfirm);
-       case eRasMessage_unregistrationRequest:
-               return process_urq(pskb, ct, ctinfo, data,
-                                  &ras->unregistrationRequest);
-       case eRasMessage_admissionRequest:
-               return process_arq(pskb, ct, ctinfo, data,
-                                  &ras->admissionRequest);
-       case eRasMessage_admissionConfirm:
-               return process_acf(pskb, ct, ctinfo, data,
-                                  &ras->admissionConfirm);
-       case eRasMessage_locationRequest:
-               return process_lrq(pskb, ct, ctinfo, data,
-                                  &ras->locationRequest);
-       case eRasMessage_locationConfirm:
-               return process_lcf(pskb, ct, ctinfo, data,
-                                  &ras->locationConfirm);
-       case eRasMessage_infoRequestResponse:
-               return process_irr(pskb, ct, ctinfo, data,
-                                  &ras->infoRequestResponse);
-       default:
-               DEBUGP("ip_ct_ras: RAS message %d\n", ras->choice);
-               break;
-       }
-
-       return 0;
-}
-
-/****************************************************************************/
-static int ras_help(struct sk_buff **pskb, struct ip_conntrack *ct,
-                   enum ip_conntrack_info ctinfo)
-{
-       static RasMessage ras;
-       unsigned char *data;
-       int datalen = 0;
-       int ret;
-
-       DEBUGP("ip_ct_ras: skblen = %u\n", (*pskb)->len);
-
-       spin_lock_bh(&ip_h323_lock);
-
-       /* Get UDP data */
-       data = get_udp_data(pskb, &datalen);
-       if (data == NULL)
-               goto accept;
-       DEBUGP("ip_ct_ras: RAS message %u.%u.%u.%u->%u.%u.%u.%u, len=%d\n",
-              NIPQUAD((*pskb)->nh.iph->saddr),
-              NIPQUAD((*pskb)->nh.iph->daddr), datalen);
-
-       /* Decode RAS message */
-       ret = DecodeRasMessage(data, datalen, &ras);
-       if (ret < 0) {
-               if (net_ratelimit())
-                       printk("ip_ct_ras: decoding error: %s\n",
-                              ret == H323_ERROR_BOUND ?
-                              "out of bound" : "out of range");
-               goto accept;
-       }
-
-       /* Process RAS message */
-       if (process_ras(pskb, ct, ctinfo, &data, &ras) < 0)
-               goto drop;
-
-      accept:
-       spin_unlock_bh(&ip_h323_lock);
-       return NF_ACCEPT;
-
-      drop:
-       spin_unlock_bh(&ip_h323_lock);
-       if (net_ratelimit())
-               printk("ip_ct_ras: packet dropped\n");
-       return NF_DROP;
-}
-
-/****************************************************************************/
-static struct ip_conntrack_helper ip_conntrack_helper_ras = {
-       .name = "RAS",
-       .me = THIS_MODULE,
-       .max_expected = 32,
-       .timeout = 240,
-       .tuple = {.src = {.u = {.tcp = {.port = __constant_htons(RAS_PORT)}}},
-                 .dst = {.protonum = IPPROTO_UDP}},
-       .mask = {.src = {.u = {0xFFFE}},
-                .dst = {.protonum = 0xFF}},
-       .help = ras_help,
-};
-
-/****************************************************************************/
-static void ip_conntrack_ras_expect(struct ip_conntrack *new,
-                                   struct ip_conntrack_expect *this)
-{
-       write_lock_bh(&ip_conntrack_lock);
-       new->helper = &ip_conntrack_helper_ras;
-       write_unlock_bh(&ip_conntrack_lock);
-}
-
-/****************************************************************************/
-/* Not __exit - called from init() */
-static void fini(void)
-{
-       ip_conntrack_helper_unregister(&ip_conntrack_helper_ras);
-       ip_conntrack_helper_unregister(&ip_conntrack_helper_q931);
-       kfree(h323_buffer);
-       DEBUGP("ip_ct_h323: fini\n");
-}
-
-/****************************************************************************/
-static int __init init(void)
-{
-       int ret;
-
-       h323_buffer = kmalloc(65536, GFP_KERNEL);
-       if (!h323_buffer)
-               return -ENOMEM;
-       if ((ret = ip_conntrack_helper_register(&ip_conntrack_helper_q931)) ||
-           (ret = ip_conntrack_helper_register(&ip_conntrack_helper_ras))) {
-               fini();
-               return ret;
-       }
-       DEBUGP("ip_ct_h323: init success\n");
-       return 0;
-}
-
-/****************************************************************************/
-module_init(init);
-module_exit(fini);
-
-EXPORT_SYMBOL_GPL(get_h225_addr);
-EXPORT_SYMBOL_GPL(ip_conntrack_h245_expect);
-EXPORT_SYMBOL_GPL(ip_conntrack_q931_expect);
-EXPORT_SYMBOL_GPL(set_h245_addr_hook);
-EXPORT_SYMBOL_GPL(set_h225_addr_hook);
-EXPORT_SYMBOL_GPL(set_sig_addr_hook);
-EXPORT_SYMBOL_GPL(set_ras_addr_hook);
-EXPORT_SYMBOL_GPL(nat_rtp_rtcp_hook);
-EXPORT_SYMBOL_GPL(nat_t120_hook);
-EXPORT_SYMBOL_GPL(nat_h245_hook);
-EXPORT_SYMBOL_GPL(nat_callforwarding_hook);
-EXPORT_SYMBOL_GPL(nat_q931_hook);
-
-MODULE_AUTHOR("Jing Min Zhao <zhaojingmin@users.sourceforge.net>");
-MODULE_DESCRIPTION("H.323 connection tracking helper");
-MODULE_LICENSE("GPL");
diff --git a/net/ipv4/netfilter/ip_conntrack_helper_pptp.c b/net/ipv4/netfilter/ip_conntrack_helper_pptp.c
deleted file mode 100644 (file)
index 2b760c5..0000000
+++ /dev/null
@@ -1,684 +0,0 @@
-/*
- * ip_conntrack_pptp.c - Version 3.0
- *
- * Connection tracking support for PPTP (Point to Point Tunneling Protocol).
- * PPTP is a a protocol for creating virtual private networks.
- * It is a specification defined by Microsoft and some vendors
- * working with Microsoft.  PPTP is built on top of a modified
- * version of the Internet Generic Routing Encapsulation Protocol.
- * GRE is defined in RFC 1701 and RFC 1702.  Documentation of
- * PPTP can be found in RFC 2637
- *
- * (C) 2000-2005 by Harald Welte <laforge@gnumonks.org>
- *
- * Development of this code funded by Astaro AG (http://www.astaro.com/)
- *
- * Limitations:
- *      - We blindly assume that control connections are always
- *        established in PNS->PAC direction.  This is a violation
- *        of RFFC2673
- *      - We can only support one single call within each session
- *
- * TODO:
- *      - testing of incoming PPTP calls
- *
- * Changes:
- *     2002-02-05 - Version 1.3
- *       - Call ip_conntrack_unexpect_related() from
- *         pptp_destroy_siblings() to destroy expectations in case
- *         CALL_DISCONNECT_NOTIFY or tcp fin packet was seen
- *         (Philip Craig <philipc@snapgear.com>)
- *       - Add Version information at module loadtime
- *     2002-02-10 - Version 1.6
- *       - move to C99 style initializers
- *       - remove second expectation if first arrives
- *     2004-10-22 - Version 2.0
- *       - merge Mandrake's 2.6.x port with recent 2.6.x API changes
- *       - fix lots of linear skb assumptions from Mandrake's port
- *     2005-06-10 - Version 2.1
- *       - use ip_conntrack_expect_free() instead of kfree() on the
- *         expect's (which are from the slab for quite some time)
- *     2005-06-10 - Version 3.0
- *       - port helper to post-2.6.11 API changes,
- *         funded by Oxcoda NetBox Blue (http://www.netboxblue.com/)
- *     2005-07-30 - Version 3.1
- *       - port helper to 2.6.13 API changes
- *
- */
-
-#include <linux/module.h>
-#include <linux/netfilter.h>
-#include <linux/ip.h>
-#include <net/checksum.h>
-#include <net/tcp.h>
-
-#include <linux/netfilter_ipv4/ip_conntrack.h>
-#include <linux/netfilter_ipv4/ip_conntrack_core.h>
-#include <linux/netfilter_ipv4/ip_conntrack_helper.h>
-#include <linux/netfilter_ipv4/ip_conntrack_proto_gre.h>
-#include <linux/netfilter_ipv4/ip_conntrack_pptp.h>
-
-#define IP_CT_PPTP_VERSION "3.1"
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Harald Welte <laforge@gnumonks.org>");
-MODULE_DESCRIPTION("Netfilter connection tracking helper module for PPTP");
-
-static DEFINE_SPINLOCK(ip_pptp_lock);
-
-int
-(*ip_nat_pptp_hook_outbound)(struct sk_buff **pskb,
-                         struct ip_conntrack *ct,
-                         enum ip_conntrack_info ctinfo,
-                         struct PptpControlHeader *ctlh,
-                         union pptp_ctrl_union *pptpReq);
-
-int
-(*ip_nat_pptp_hook_inbound)(struct sk_buff **pskb,
-                         struct ip_conntrack *ct,
-                         enum ip_conntrack_info ctinfo,
-                         struct PptpControlHeader *ctlh,
-                         union pptp_ctrl_union *pptpReq);
-
-void
-(*ip_nat_pptp_hook_exp_gre)(struct ip_conntrack_expect *expect_orig,
-                           struct ip_conntrack_expect *expect_reply);
-
-void
-(*ip_nat_pptp_hook_expectfn)(struct ip_conntrack *ct,
-                            struct ip_conntrack_expect *exp);
-
-#if 0
-/* PptpControlMessageType names */
-const char *pptp_msg_name[] = {
-       "UNKNOWN_MESSAGE",
-       "START_SESSION_REQUEST",
-       "START_SESSION_REPLY",
-       "STOP_SESSION_REQUEST",
-       "STOP_SESSION_REPLY",
-       "ECHO_REQUEST",
-       "ECHO_REPLY",
-       "OUT_CALL_REQUEST",
-       "OUT_CALL_REPLY",
-       "IN_CALL_REQUEST",
-       "IN_CALL_REPLY",
-       "IN_CALL_CONNECT",
-       "CALL_CLEAR_REQUEST",
-       "CALL_DISCONNECT_NOTIFY",
-       "WAN_ERROR_NOTIFY",
-       "SET_LINK_INFO"
-};
-EXPORT_SYMBOL(pptp_msg_name);
-#define DEBUGP(format, args...)        printk(KERN_DEBUG "%s:%s: " format, __FILE__, __FUNCTION__, ## args)
-#else
-#define DEBUGP(format, args...)
-#endif
-
-#define SECS *HZ
-#define MINS * 60 SECS
-#define HOURS * 60 MINS
-
-#define PPTP_GRE_TIMEOUT               (10 MINS)
-#define PPTP_GRE_STREAM_TIMEOUT        (5 HOURS)
-
-static void pptp_expectfn(struct ip_conntrack *ct,
-                        struct ip_conntrack_expect *exp)
-{
-       typeof(ip_nat_pptp_hook_expectfn) ip_nat_pptp_expectfn;
-
-       DEBUGP("increasing timeouts\n");
-
-       /* increase timeout of GRE data channel conntrack entry */
-       ct->proto.gre.timeout = PPTP_GRE_TIMEOUT;
-       ct->proto.gre.stream_timeout = PPTP_GRE_STREAM_TIMEOUT;
-
-       /* Can you see how rusty this code is, compared with the pre-2.6.11
-        * one? That's what happened to my shiny newnat of 2002 ;( -HW */
-
-       rcu_read_lock();
-       ip_nat_pptp_expectfn = rcu_dereference(ip_nat_pptp_hook_expectfn);
-       if (!ip_nat_pptp_expectfn) {
-               struct ip_conntrack_tuple inv_t;
-               struct ip_conntrack_expect *exp_other;
-
-               /* obviously this tuple inversion only works until you do NAT */
-               invert_tuplepr(&inv_t, &exp->tuple);
-               DEBUGP("trying to unexpect other dir: ");
-               DUMP_TUPLE(&inv_t);
-
-               exp_other = ip_conntrack_expect_find_get(&inv_t);
-               if (exp_other) {
-                       /* delete other expectation.  */
-                       DEBUGP("found\n");
-                       ip_conntrack_unexpect_related(exp_other);
-                       ip_conntrack_expect_put(exp_other);
-               } else {
-                       DEBUGP("not found\n");
-               }
-       } else {
-               /* we need more than simple inversion */
-               ip_nat_pptp_expectfn(ct, exp);
-       }
-       rcu_read_unlock();
-}
-
-static int destroy_sibling_or_exp(const struct ip_conntrack_tuple *t)
-{
-       struct ip_conntrack_tuple_hash *h;
-       struct ip_conntrack_expect *exp;
-
-       DEBUGP("trying to timeout ct or exp for tuple ");
-       DUMP_TUPLE(t);
-
-       h = ip_conntrack_find_get(t, NULL);
-       if (h)  {
-               struct ip_conntrack *sibling = tuplehash_to_ctrack(h);
-               DEBUGP("setting timeout of conntrack %p to 0\n", sibling);
-               sibling->proto.gre.timeout = 0;
-               sibling->proto.gre.stream_timeout = 0;
-               if (del_timer(&sibling->timeout))
-                       sibling->timeout.function((unsigned long)sibling);
-               ip_conntrack_put(sibling);
-               return 1;
-       } else {
-               exp = ip_conntrack_expect_find_get(t);
-               if (exp) {
-                       DEBUGP("unexpect_related of expect %p\n", exp);
-                       ip_conntrack_unexpect_related(exp);
-                       ip_conntrack_expect_put(exp);
-                       return 1;
-               }
-       }
-
-       return 0;
-}
-
-
-/* timeout GRE data connections */
-static void pptp_destroy_siblings(struct ip_conntrack *ct)
-{
-       struct ip_conntrack_tuple t;
-
-       ip_ct_gre_keymap_destroy(ct);
-       /* Since ct->sibling_list has literally rusted away in 2.6.11,
-        * we now need another way to find out about our sibling
-        * contrack and expects... -HW */
-
-       /* try original (pns->pac) tuple */
-       memcpy(&t, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, sizeof(t));
-       t.dst.protonum = IPPROTO_GRE;
-       t.src.u.gre.key = ct->help.ct_pptp_info.pns_call_id;
-       t.dst.u.gre.key = ct->help.ct_pptp_info.pac_call_id;
-
-       if (!destroy_sibling_or_exp(&t))
-               DEBUGP("failed to timeout original pns->pac ct/exp\n");
-
-       /* try reply (pac->pns) tuple */
-       memcpy(&t, &ct->tuplehash[IP_CT_DIR_REPLY].tuple, sizeof(t));
-       t.dst.protonum = IPPROTO_GRE;
-       t.src.u.gre.key = ct->help.ct_pptp_info.pac_call_id;
-       t.dst.u.gre.key = ct->help.ct_pptp_info.pns_call_id;
-
-       if (!destroy_sibling_or_exp(&t))
-               DEBUGP("failed to timeout reply pac->pns ct/exp\n");
-}
-
-/* expect GRE connections (PNS->PAC and PAC->PNS direction) */
-static inline int
-exp_gre(struct ip_conntrack *ct,
-       __be16 callid,
-       __be16 peer_callid)
-{
-       struct ip_conntrack_expect *exp_orig, *exp_reply;
-       int ret = 1;
-       typeof(ip_nat_pptp_hook_exp_gre) ip_nat_pptp_exp_gre;
-
-       exp_orig = ip_conntrack_expect_alloc(ct);
-       if (exp_orig == NULL)
-               goto out;
-
-       exp_reply = ip_conntrack_expect_alloc(ct);
-       if (exp_reply == NULL)
-               goto out_put_orig;
-
-       /* original direction, PNS->PAC */
-       exp_orig->tuple.src.ip = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.ip;
-       exp_orig->tuple.src.u.gre.key = peer_callid;
-       exp_orig->tuple.dst.ip = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.ip;
-       exp_orig->tuple.dst.u.gre.key = callid;
-       exp_orig->tuple.dst.protonum = IPPROTO_GRE;
-
-       exp_orig->mask.src.ip = htonl(0xffffffff);
-       exp_orig->mask.src.u.all = 0;
-       exp_orig->mask.dst.u.gre.key = htons(0xffff);
-       exp_orig->mask.dst.ip = htonl(0xffffffff);
-       exp_orig->mask.dst.protonum = 0xff;
-
-       exp_orig->master = ct;
-       exp_orig->expectfn = pptp_expectfn;
-       exp_orig->flags = 0;
-
-       /* both expectations are identical apart from tuple */
-       memcpy(exp_reply, exp_orig, sizeof(*exp_reply));
-
-       /* reply direction, PAC->PNS */
-       exp_reply->tuple.src.ip = ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.ip;
-       exp_reply->tuple.src.u.gre.key = callid;
-       exp_reply->tuple.dst.ip = ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.ip;
-       exp_reply->tuple.dst.u.gre.key = peer_callid;
-       exp_reply->tuple.dst.protonum = IPPROTO_GRE;
-
-       ip_nat_pptp_exp_gre = rcu_dereference(ip_nat_pptp_hook_exp_gre);
-       if (ip_nat_pptp_exp_gre)
-               ip_nat_pptp_exp_gre(exp_orig, exp_reply);
-       if (ip_conntrack_expect_related(exp_orig) != 0)
-               goto out_put_both;
-       if (ip_conntrack_expect_related(exp_reply) != 0)
-               goto out_unexpect_orig;
-
-       /* Add GRE keymap entries */
-       if (ip_ct_gre_keymap_add(ct, &exp_orig->tuple, 0) != 0)
-               goto out_unexpect_both;
-       if (ip_ct_gre_keymap_add(ct, &exp_reply->tuple, 1) != 0) {
-               ip_ct_gre_keymap_destroy(ct);
-               goto out_unexpect_both;
-       }
-       ret = 0;
-
-out_put_both:
-       ip_conntrack_expect_put(exp_reply);
-out_put_orig:
-       ip_conntrack_expect_put(exp_orig);
-out:
-       return ret;
-
-out_unexpect_both:
-       ip_conntrack_unexpect_related(exp_reply);
-out_unexpect_orig:
-       ip_conntrack_unexpect_related(exp_orig);
-       goto out_put_both;
-}
-
-static inline int
-pptp_inbound_pkt(struct sk_buff **pskb,
-                struct PptpControlHeader *ctlh,
-                union pptp_ctrl_union *pptpReq,
-                unsigned int reqlen,
-                struct ip_conntrack *ct,
-                enum ip_conntrack_info ctinfo)
-{
-       struct ip_ct_pptp_master *info = &ct->help.ct_pptp_info;
-       u_int16_t msg;
-       __be16 cid = 0, pcid = 0;
-       typeof(ip_nat_pptp_hook_inbound) ip_nat_pptp_inbound;
-
-       msg = ntohs(ctlh->messageType);
-       DEBUGP("inbound control message %s\n", pptp_msg_name[msg]);
-
-       switch (msg) {
-       case PPTP_START_SESSION_REPLY:
-               /* server confirms new control session */
-               if (info->sstate < PPTP_SESSION_REQUESTED)
-                       goto invalid;
-               if (pptpReq->srep.resultCode == PPTP_START_OK)
-                       info->sstate = PPTP_SESSION_CONFIRMED;
-               else
-                       info->sstate = PPTP_SESSION_ERROR;
-               break;
-
-       case PPTP_STOP_SESSION_REPLY:
-               /* server confirms end of control session */
-               if (info->sstate > PPTP_SESSION_STOPREQ)
-                       goto invalid;
-               if (pptpReq->strep.resultCode == PPTP_STOP_OK)
-                       info->sstate = PPTP_SESSION_NONE;
-               else
-                       info->sstate = PPTP_SESSION_ERROR;
-               break;
-
-       case PPTP_OUT_CALL_REPLY:
-               /* server accepted call, we now expect GRE frames */
-               if (info->sstate != PPTP_SESSION_CONFIRMED)
-                       goto invalid;
-               if (info->cstate != PPTP_CALL_OUT_REQ &&
-                   info->cstate != PPTP_CALL_OUT_CONF)
-                       goto invalid;
-
-               cid = pptpReq->ocack.callID;
-               pcid = pptpReq->ocack.peersCallID;
-               if (info->pns_call_id != pcid)
-                       goto invalid;
-               DEBUGP("%s, CID=%X, PCID=%X\n", pptp_msg_name[msg],
-                       ntohs(cid), ntohs(pcid));
-
-               if (pptpReq->ocack.resultCode == PPTP_OUTCALL_CONNECT) {
-                       info->cstate = PPTP_CALL_OUT_CONF;
-                       info->pac_call_id = cid;
-                       exp_gre(ct, cid, pcid);
-               } else
-                       info->cstate = PPTP_CALL_NONE;
-               break;
-
-       case PPTP_IN_CALL_REQUEST:
-               /* server tells us about incoming call request */
-               if (info->sstate != PPTP_SESSION_CONFIRMED)
-                       goto invalid;
-
-               cid = pptpReq->icreq.callID;
-               DEBUGP("%s, CID=%X\n", pptp_msg_name[msg], ntohs(cid));
-               info->cstate = PPTP_CALL_IN_REQ;
-               info->pac_call_id = cid;
-               break;
-
-       case PPTP_IN_CALL_CONNECT:
-               /* server tells us about incoming call established */
-               if (info->sstate != PPTP_SESSION_CONFIRMED)
-                       goto invalid;
-               if (info->cstate != PPTP_CALL_IN_REP &&
-                   info->cstate != PPTP_CALL_IN_CONF)
-                       goto invalid;
-
-               pcid = pptpReq->iccon.peersCallID;
-               cid = info->pac_call_id;
-
-               if (info->pns_call_id != pcid)
-                       goto invalid;
-
-               DEBUGP("%s, PCID=%X\n", pptp_msg_name[msg], ntohs(pcid));
-               info->cstate = PPTP_CALL_IN_CONF;
-
-               /* we expect a GRE connection from PAC to PNS */
-               exp_gre(ct, cid, pcid);
-               break;
-
-       case PPTP_CALL_DISCONNECT_NOTIFY:
-               /* server confirms disconnect */
-               cid = pptpReq->disc.callID;
-               DEBUGP("%s, CID=%X\n", pptp_msg_name[msg], ntohs(cid));
-               info->cstate = PPTP_CALL_NONE;
-
-               /* untrack this call id, unexpect GRE packets */
-               pptp_destroy_siblings(ct);
-               break;
-
-       case PPTP_WAN_ERROR_NOTIFY:
-       case PPTP_ECHO_REQUEST:
-       case PPTP_ECHO_REPLY:
-               /* I don't have to explain these ;) */
-               break;
-       default:
-               goto invalid;
-       }
-
-       ip_nat_pptp_inbound = rcu_dereference(ip_nat_pptp_hook_inbound);
-       if (ip_nat_pptp_inbound)
-               return ip_nat_pptp_inbound(pskb, ct, ctinfo, ctlh, pptpReq);
-       return NF_ACCEPT;
-
-invalid:
-       DEBUGP("invalid %s: type=%d cid=%u pcid=%u "
-              "cstate=%d sstate=%d pns_cid=%u pac_cid=%u\n",
-              msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] : pptp_msg_name[0],
-              msg, ntohs(cid), ntohs(pcid),  info->cstate, info->sstate,
-              ntohs(info->pns_call_id), ntohs(info->pac_call_id));
-       return NF_ACCEPT;
-}
-
-static inline int
-pptp_outbound_pkt(struct sk_buff **pskb,
-                 struct PptpControlHeader *ctlh,
-                 union pptp_ctrl_union *pptpReq,
-                 unsigned int reqlen,
-                 struct ip_conntrack *ct,
-                 enum ip_conntrack_info ctinfo)
-{
-       struct ip_ct_pptp_master *info = &ct->help.ct_pptp_info;
-       u_int16_t msg;
-       __be16 cid = 0, pcid = 0;
-       typeof(ip_nat_pptp_hook_outbound) ip_nat_pptp_outbound;
-
-       msg = ntohs(ctlh->messageType);
-       DEBUGP("outbound control message %s\n", pptp_msg_name[msg]);
-
-       switch (msg) {
-       case PPTP_START_SESSION_REQUEST:
-               /* client requests for new control session */
-               if (info->sstate != PPTP_SESSION_NONE)
-                       goto invalid;
-               info->sstate = PPTP_SESSION_REQUESTED;
-               break;
-       case PPTP_STOP_SESSION_REQUEST:
-               /* client requests end of control session */
-               info->sstate = PPTP_SESSION_STOPREQ;
-               break;
-
-       case PPTP_OUT_CALL_REQUEST:
-               /* client initiating connection to server */
-               if (info->sstate != PPTP_SESSION_CONFIRMED)
-                       goto invalid;
-               info->cstate = PPTP_CALL_OUT_REQ;
-               /* track PNS call id */
-               cid = pptpReq->ocreq.callID;
-               DEBUGP("%s, CID=%X\n", pptp_msg_name[msg], ntohs(cid));
-               info->pns_call_id = cid;
-               break;
-       case PPTP_IN_CALL_REPLY:
-               /* client answers incoming call */
-               if (info->cstate != PPTP_CALL_IN_REQ &&
-                   info->cstate != PPTP_CALL_IN_REP)
-                       goto invalid;
-
-               cid = pptpReq->icack.callID;
-               pcid = pptpReq->icack.peersCallID;
-               if (info->pac_call_id != pcid)
-                       goto invalid;
-               DEBUGP("%s, CID=%X PCID=%X\n", pptp_msg_name[msg],
-                      ntohs(cid), ntohs(pcid));
-
-               if (pptpReq->icack.resultCode == PPTP_INCALL_ACCEPT) {
-                       /* part two of the three-way handshake */
-                       info->cstate = PPTP_CALL_IN_REP;
-                       info->pns_call_id = cid;
-               } else
-                       info->cstate = PPTP_CALL_NONE;
-               break;
-
-       case PPTP_CALL_CLEAR_REQUEST:
-               /* client requests hangup of call */
-               if (info->sstate != PPTP_SESSION_CONFIRMED)
-                       goto invalid;
-               /* FUTURE: iterate over all calls and check if
-                * call ID is valid.  We don't do this without newnat,
-                * because we only know about last call */
-               info->cstate = PPTP_CALL_CLEAR_REQ;
-               break;
-       case PPTP_SET_LINK_INFO:
-       case PPTP_ECHO_REQUEST:
-       case PPTP_ECHO_REPLY:
-               /* I don't have to explain these ;) */
-               break;
-       default:
-               goto invalid;
-       }
-
-       ip_nat_pptp_outbound = rcu_dereference(ip_nat_pptp_hook_outbound);
-       if (ip_nat_pptp_outbound)
-               return ip_nat_pptp_outbound(pskb, ct, ctinfo, ctlh, pptpReq);
-       return NF_ACCEPT;
-
-invalid:
-       DEBUGP("invalid %s: type=%d cid=%u pcid=%u "
-              "cstate=%d sstate=%d pns_cid=%u pac_cid=%u\n",
-              msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] : pptp_msg_name[0],
-              msg, ntohs(cid), ntohs(pcid),  info->cstate, info->sstate,
-              ntohs(info->pns_call_id), ntohs(info->pac_call_id));
-       return NF_ACCEPT;
-}
-
-static const unsigned int pptp_msg_size[] = {
-       [PPTP_START_SESSION_REQUEST]  = sizeof(struct PptpStartSessionRequest),
-       [PPTP_START_SESSION_REPLY]    = sizeof(struct PptpStartSessionReply),
-       [PPTP_STOP_SESSION_REQUEST]   = sizeof(struct PptpStopSessionRequest),
-       [PPTP_STOP_SESSION_REPLY]     = sizeof(struct PptpStopSessionReply),
-       [PPTP_OUT_CALL_REQUEST]       = sizeof(struct PptpOutCallRequest),
-       [PPTP_OUT_CALL_REPLY]         = sizeof(struct PptpOutCallReply),
-       [PPTP_IN_CALL_REQUEST]        = sizeof(struct PptpInCallRequest),
-       [PPTP_IN_CALL_REPLY]          = sizeof(struct PptpInCallReply),
-       [PPTP_IN_CALL_CONNECT]        = sizeof(struct PptpInCallConnected),
-       [PPTP_CALL_CLEAR_REQUEST]     = sizeof(struct PptpClearCallRequest),
-       [PPTP_CALL_DISCONNECT_NOTIFY] = sizeof(struct PptpCallDisconnectNotify),
-       [PPTP_WAN_ERROR_NOTIFY]       = sizeof(struct PptpWanErrorNotify),
-       [PPTP_SET_LINK_INFO]          = sizeof(struct PptpSetLinkInfo),
-};
-
-/* track caller id inside control connection, call expect_related */
-static int
-conntrack_pptp_help(struct sk_buff **pskb,
-                   struct ip_conntrack *ct, enum ip_conntrack_info ctinfo)
-
-{
-       int dir = CTINFO2DIR(ctinfo);
-       struct ip_ct_pptp_master *info = &ct->help.ct_pptp_info;
-       struct tcphdr _tcph, *tcph;
-       struct pptp_pkt_hdr _pptph, *pptph;
-       struct PptpControlHeader _ctlh, *ctlh;
-       union pptp_ctrl_union _pptpReq, *pptpReq;
-       unsigned int tcplen = (*pskb)->len - (*pskb)->nh.iph->ihl * 4;
-       unsigned int datalen, reqlen, nexthdr_off;
-       int oldsstate, oldcstate;
-       int ret;
-       u_int16_t msg;
-
-       /* don't do any tracking before tcp handshake complete */
-       if (ctinfo != IP_CT_ESTABLISHED
-           && ctinfo != IP_CT_ESTABLISHED+IP_CT_IS_REPLY) {
-               DEBUGP("ctinfo = %u, skipping\n", ctinfo);
-               return NF_ACCEPT;
-       }
-
-       nexthdr_off = (*pskb)->nh.iph->ihl*4;
-       tcph = skb_header_pointer(*pskb, nexthdr_off, sizeof(_tcph), &_tcph);
-       BUG_ON(!tcph);
-       nexthdr_off += tcph->doff * 4;
-       datalen = tcplen - tcph->doff * 4;
-
-       pptph = skb_header_pointer(*pskb, nexthdr_off, sizeof(_pptph), &_pptph);
-       if (!pptph) {
-               DEBUGP("no full PPTP header, can't track\n");
-               return NF_ACCEPT;
-       }
-       nexthdr_off += sizeof(_pptph);
-       datalen -= sizeof(_pptph);
-
-       /* if it's not a control message we can't do anything with it */
-       if (ntohs(pptph->packetType) != PPTP_PACKET_CONTROL ||
-           ntohl(pptph->magicCookie) != PPTP_MAGIC_COOKIE) {
-               DEBUGP("not a control packet\n");
-               return NF_ACCEPT;
-       }
-
-       ctlh = skb_header_pointer(*pskb, nexthdr_off, sizeof(_ctlh), &_ctlh);
-       if (!ctlh)
-               return NF_ACCEPT;
-       nexthdr_off += sizeof(_ctlh);
-       datalen -= sizeof(_ctlh);
-
-       reqlen = datalen;
-       msg = ntohs(ctlh->messageType);
-       if (msg > 0 && msg <= PPTP_MSG_MAX && reqlen < pptp_msg_size[msg])
-               return NF_ACCEPT;
-       if (reqlen > sizeof(*pptpReq))
-               reqlen = sizeof(*pptpReq);
-
-       pptpReq = skb_header_pointer(*pskb, nexthdr_off, reqlen, &_pptpReq);
-       if (!pptpReq)
-               return NF_ACCEPT;
-
-       oldsstate = info->sstate;
-       oldcstate = info->cstate;
-
-       spin_lock_bh(&ip_pptp_lock);
-
-       /* FIXME: We just blindly assume that the control connection is always
-        * established from PNS->PAC.  However, RFC makes no guarantee */
-       if (dir == IP_CT_DIR_ORIGINAL)
-               /* client -> server (PNS -> PAC) */
-               ret = pptp_outbound_pkt(pskb, ctlh, pptpReq, reqlen, ct,
-                                       ctinfo);
-       else
-               /* server -> client (PAC -> PNS) */
-               ret = pptp_inbound_pkt(pskb, ctlh, pptpReq, reqlen, ct,
-                                      ctinfo);
-       DEBUGP("sstate: %d->%d, cstate: %d->%d\n",
-               oldsstate, info->sstate, oldcstate, info->cstate);
-       spin_unlock_bh(&ip_pptp_lock);
-
-       return ret;
-}
-
-/* control protocol helper */
-static struct ip_conntrack_helper pptp = {
-       .list = { NULL, NULL },
-       .name = "pptp",
-       .me = THIS_MODULE,
-       .max_expected = 2,
-       .timeout = 5 * 60,
-       .tuple = { .src = { .ip = 0,
-                           .u = { .tcp = { .port =
-                                   __constant_htons(PPTP_CONTROL_PORT) } }
-                         },
-                  .dst = { .ip = 0,
-                           .u = { .all = 0 },
-                           .protonum = IPPROTO_TCP
-                         }
-                },
-       .mask = { .src = { .ip = 0,
-                          .u = { .tcp = { .port = __constant_htons(0xffff) } }
-                        },
-                 .dst = { .ip = 0,
-                          .u = { .all = 0 },
-                          .protonum = 0xff
-                        }
-               },
-       .help = conntrack_pptp_help,
-       .destroy = pptp_destroy_siblings,
-};
-
-extern void ip_ct_proto_gre_fini(void);
-extern int __init ip_ct_proto_gre_init(void);
-
-/* ip_conntrack_pptp initialization */
-static int __init ip_conntrack_helper_pptp_init(void)
-{
-       int retcode;
-
-       retcode = ip_ct_proto_gre_init();
-       if (retcode < 0)
-               return retcode;
-
-       DEBUGP(" registering helper\n");
-       if ((retcode = ip_conntrack_helper_register(&pptp))) {
-               printk(KERN_ERR "Unable to register conntrack application "
-                               "helper for pptp: %d\n", retcode);
-               ip_ct_proto_gre_fini();
-               return retcode;
-       }
-
-       printk("ip_conntrack_pptp version %s loaded\n", IP_CT_PPTP_VERSION);
-       return 0;
-}
-
-static void __exit ip_conntrack_helper_pptp_fini(void)
-{
-       ip_conntrack_helper_unregister(&pptp);
-       ip_ct_proto_gre_fini();
-       printk("ip_conntrack_pptp version %s unloaded\n", IP_CT_PPTP_VERSION);
-}
-
-module_init(ip_conntrack_helper_pptp_init);
-module_exit(ip_conntrack_helper_pptp_fini);
-
-EXPORT_SYMBOL(ip_nat_pptp_hook_outbound);
-EXPORT_SYMBOL(ip_nat_pptp_hook_inbound);
-EXPORT_SYMBOL(ip_nat_pptp_hook_exp_gre);
-EXPORT_SYMBOL(ip_nat_pptp_hook_expectfn);
diff --git a/net/ipv4/netfilter/ip_conntrack_irc.c b/net/ipv4/netfilter/ip_conntrack_irc.c
deleted file mode 100644 (file)
index 053e591..0000000
+++ /dev/null
@@ -1,314 +0,0 @@
-/* IRC extension for IP connection tracking, Version 1.21
- * (C) 2000-2002 by Harald Welte <laforge@gnumonks.org>
- * based on RR's ip_conntrack_ftp.c
- *
- * ip_conntrack_irc.c,v 1.21 2002/02/05 14:49:26 laforge Exp
- *
- *      This program is free software; you can redistribute it and/or
- *      modify it under the terms of the GNU General Public License
- *      as published by the Free Software Foundation; either version
- *      2 of the License, or (at your option) any later version.
- **
- *     Module load syntax:
- *     insmod ip_conntrack_irc.o ports=port1,port2,...port<MAX_PORTS>
- *                         max_dcc_channels=n dcc_timeout=secs
- *
- *     please give the ports of all IRC servers You wish to connect to.
- *     If You don't specify ports, the default will be port 6667.
- *     With max_dcc_channels you can define the maximum number of not
- *     yet answered DCC channels per IRC session (default 8).
- *     With dcc_timeout you can specify how long the system waits for
- *     an expected DCC channel (default 300 seconds).
- *
- */
-
-#include <linux/module.h>
-#include <linux/netfilter.h>
-#include <linux/ip.h>
-#include <net/checksum.h>
-#include <net/tcp.h>
-
-#include <linux/netfilter_ipv4/ip_conntrack_helper.h>
-#include <linux/netfilter_ipv4/ip_conntrack_irc.h>
-#include <linux/moduleparam.h>
-
-#define MAX_PORTS 8
-static unsigned short ports[MAX_PORTS];
-static int ports_c;
-static unsigned int max_dcc_channels = 8;
-static unsigned int dcc_timeout = 300;
-/* This is slow, but it's simple. --RR */
-static char *irc_buffer;
-static DEFINE_SPINLOCK(irc_buffer_lock);
-
-unsigned int (*ip_nat_irc_hook)(struct sk_buff **pskb,
-                               enum ip_conntrack_info ctinfo,
-                               unsigned int matchoff,
-                               unsigned int matchlen,
-                               struct ip_conntrack_expect *exp);
-EXPORT_SYMBOL_GPL(ip_nat_irc_hook);
-
-MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
-MODULE_DESCRIPTION("IRC (DCC) connection tracking helper");
-MODULE_LICENSE("GPL");
-module_param_array(ports, ushort, &ports_c, 0400);
-MODULE_PARM_DESC(ports, "port numbers of IRC servers");
-module_param(max_dcc_channels, uint, 0400);
-MODULE_PARM_DESC(max_dcc_channels, "max number of expected DCC channels per IRC session");
-module_param(dcc_timeout, uint, 0400);
-MODULE_PARM_DESC(dcc_timeout, "timeout on for unestablished DCC channels");
-
-static const char *dccprotos[] = { "SEND ", "CHAT ", "MOVE ", "TSEND ", "SCHAT " };
-#define MINMATCHLEN    5
-
-#if 0
-#define DEBUGP(format, args...) printk(KERN_DEBUG "%s:%s:" format, \
-                                      __FILE__, __FUNCTION__ , ## args)
-#else
-#define DEBUGP(format, args...)
-#endif
-
-static int parse_dcc(char *data, char *data_end, u_int32_t *ip,
-                    u_int16_t *port, char **ad_beg_p, char **ad_end_p)
-/* tries to get the ip_addr and port out of a dcc command
-   return value: -1 on failure, 0 on success
-       data            pointer to first byte of DCC command data
-       data_end        pointer to last byte of dcc command data
-       ip              returns parsed ip of dcc command
-       port            returns parsed port of dcc command
-       ad_beg_p        returns pointer to first byte of addr data
-       ad_end_p        returns pointer to last byte of addr data */
-{
-
-       /* at least 12: "AAAAAAAA P\1\n" */
-       while (*data++ != ' ')
-               if (data > data_end - 12)
-                       return -1;
-
-       *ad_beg_p = data;
-       *ip = simple_strtoul(data, &data, 10);
-
-       /* skip blanks between ip and port */
-       while (*data == ' ') {
-               if (data >= data_end)
-                       return -1;
-               data++;
-       }
-
-       *port = simple_strtoul(data, &data, 10);
-       *ad_end_p = data;
-
-       return 0;
-}
-
-static int help(struct sk_buff **pskb,
-               struct ip_conntrack *ct, enum ip_conntrack_info ctinfo)
-{
-       unsigned int dataoff;
-       struct tcphdr _tcph, *th;
-       char *data, *data_limit, *ib_ptr;
-       int dir = CTINFO2DIR(ctinfo);
-       struct ip_conntrack_expect *exp;
-       u32 seq;
-       u_int32_t dcc_ip;
-       u_int16_t dcc_port;
-       int i, ret = NF_ACCEPT;
-       char *addr_beg_p, *addr_end_p;
-       typeof(ip_nat_irc_hook) ip_nat_irc;
-
-       DEBUGP("entered\n");
-
-       /* If packet is coming from IRC server */
-       if (dir == IP_CT_DIR_REPLY)
-               return NF_ACCEPT;
-
-       /* Until there's been traffic both ways, don't look in packets. */
-       if (ctinfo != IP_CT_ESTABLISHED
-           && ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY) {
-               DEBUGP("Conntrackinfo = %u\n", ctinfo);
-               return NF_ACCEPT;
-       }
-
-       /* Not a full tcp header? */
-       th = skb_header_pointer(*pskb, (*pskb)->nh.iph->ihl*4,
-                               sizeof(_tcph), &_tcph);
-       if (th == NULL)
-               return NF_ACCEPT;
-
-       /* No data? */
-       dataoff = (*pskb)->nh.iph->ihl*4 + th->doff*4;
-       if (dataoff >= (*pskb)->len)
-               return NF_ACCEPT;
-
-       spin_lock_bh(&irc_buffer_lock);
-       ib_ptr = skb_header_pointer(*pskb, dataoff,
-                                   (*pskb)->len - dataoff, irc_buffer);
-       BUG_ON(ib_ptr == NULL);
-
-       data = ib_ptr;
-       data_limit = ib_ptr + (*pskb)->len - dataoff;
-
-       /* strlen("\1DCC SENT t AAAAAAAA P\1\n")=24
-        * 5+MINMATCHLEN+strlen("t AAAAAAAA P\1\n")=14 */
-       while (data < (data_limit - (19 + MINMATCHLEN))) {
-               if (memcmp(data, "\1DCC ", 5)) {
-                       data++;
-                       continue;
-               }
-
-               data += 5;
-               /* we have at least (19+MINMATCHLEN)-5 bytes valid data left */
-
-               DEBUGP("DCC found in master %u.%u.%u.%u:%u %u.%u.%u.%u:%u...\n",
-                       NIPQUAD(iph->saddr), ntohs(th->source),
-                       NIPQUAD(iph->daddr), ntohs(th->dest));
-
-               for (i = 0; i < ARRAY_SIZE(dccprotos); i++) {
-                       if (memcmp(data, dccprotos[i], strlen(dccprotos[i]))) {
-                               /* no match */
-                               continue;
-                       }
-
-                       DEBUGP("DCC %s detected\n", dccprotos[i]);
-                       data += strlen(dccprotos[i]);
-                       /* we have at least
-                        * (19+MINMATCHLEN)-5-dccprotos[i].matchlen bytes valid
-                        * data left (== 14/13 bytes) */
-                       if (parse_dcc((char *)data, data_limit, &dcc_ip,
-                                      &dcc_port, &addr_beg_p, &addr_end_p)) {
-                               /* unable to parse */
-                               DEBUGP("unable to parse dcc command\n");
-                               continue;
-                       }
-                       DEBUGP("DCC bound ip/port: %u.%u.%u.%u:%u\n",
-                               HIPQUAD(dcc_ip), dcc_port);
-
-                       /* dcc_ip can be the internal OR external (NAT'ed) IP
-                        * Tiago Sousa <mirage@kaotik.org> */
-                       if (ct->tuplehash[dir].tuple.src.ip != htonl(dcc_ip)
-                           && ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.ip != htonl(dcc_ip)) {
-                               if (net_ratelimit())
-                                       printk(KERN_WARNING
-                                               "Forged DCC command from "
-                                               "%u.%u.%u.%u: %u.%u.%u.%u:%u\n",
-                               NIPQUAD(ct->tuplehash[dir].tuple.src.ip),
-                                               HIPQUAD(dcc_ip), dcc_port);
-
-                               continue;
-                       }
-
-                       exp = ip_conntrack_expect_alloc(ct);
-                       if (exp == NULL) {
-                               ret = NF_DROP;
-                               goto out;
-                       }
-
-                       /* save position of address in dcc string,
-                        * necessary for NAT */
-                       DEBUGP("tcph->seq = %u\n", th->seq);
-                       seq = ntohl(th->seq) + (addr_beg_p - ib_ptr);
-
-                       /* We refer to the reverse direction ("!dir")
-                        * tuples here, because we're expecting
-                        * something in the other * direction.
-                        * Doesn't matter unless NAT is happening.  */
-                       exp->tuple = ((struct ip_conntrack_tuple)
-                               { { 0, { 0 } },
-                                 { ct->tuplehash[!dir].tuple.dst.ip,
-                                   { .tcp = { htons(dcc_port) } },
-                                   IPPROTO_TCP }});
-                       exp->mask = ((struct ip_conntrack_tuple)
-                               { { 0, { 0 } },
-                                 { htonl(0xFFFFFFFF),
-                                       { .tcp = { htons(0xFFFF) } }, 0xFF }});
-                       exp->expectfn = NULL;
-                       exp->flags = 0;
-                       ip_nat_irc = rcu_dereference(ip_nat_irc_hook);
-                       if (ip_nat_irc)
-                               ret = ip_nat_irc(pskb, ctinfo,
-                                                addr_beg_p - ib_ptr,
-                                                addr_end_p - addr_beg_p,
-                                                exp);
-                       else if (ip_conntrack_expect_related(exp) != 0)
-                               ret = NF_DROP;
-                       ip_conntrack_expect_put(exp);
-                       goto out;
-               } /* for .. NUM_DCCPROTO */
-       } /* while data < ... */
-
- out:
-       spin_unlock_bh(&irc_buffer_lock);
-       return ret;
-}
-
-static struct ip_conntrack_helper irc_helpers[MAX_PORTS];
-static char irc_names[MAX_PORTS][sizeof("irc-65535")];
-
-static void ip_conntrack_irc_fini(void);
-
-static int __init ip_conntrack_irc_init(void)
-{
-       int i, ret;
-       struct ip_conntrack_helper *hlpr;
-       char *tmpname;
-
-       if (max_dcc_channels < 1) {
-               printk("ip_conntrack_irc: max_dcc_channels must be a positive integer\n");
-               return -EBUSY;
-       }
-
-       irc_buffer = kmalloc(65536, GFP_KERNEL);
-       if (!irc_buffer)
-               return -ENOMEM;
-
-       /* If no port given, default to standard irc port */
-       if (ports_c == 0)
-               ports[ports_c++] = IRC_PORT;
-
-       for (i = 0; i < ports_c; i++) {
-               hlpr = &irc_helpers[i];
-               hlpr->tuple.src.u.tcp.port = htons(ports[i]);
-               hlpr->tuple.dst.protonum = IPPROTO_TCP;
-               hlpr->mask.src.u.tcp.port = htons(0xFFFF);
-               hlpr->mask.dst.protonum = 0xFF;
-               hlpr->max_expected = max_dcc_channels;
-               hlpr->timeout = dcc_timeout;
-               hlpr->me = THIS_MODULE;
-               hlpr->help = help;
-
-               tmpname = &irc_names[i][0];
-               if (ports[i] == IRC_PORT)
-                       sprintf(tmpname, "irc");
-               else
-                       sprintf(tmpname, "irc-%d", i);
-               hlpr->name = tmpname;
-
-               DEBUGP("port #%d: %d\n", i, ports[i]);
-
-               ret = ip_conntrack_helper_register(hlpr);
-
-               if (ret) {
-                       printk("ip_conntrack_irc: ERROR registering port %d\n",
-                               ports[i]);
-                       ip_conntrack_irc_fini();
-                       return -EBUSY;
-               }
-       }
-       return 0;
-}
-
-/* This function is intentionally _NOT_ defined as __exit, because
- * it is needed by the init function */
-static void ip_conntrack_irc_fini(void)
-{
-       int i;
-       for (i = 0; i < ports_c; i++) {
-               DEBUGP("unregistering port %d\n",
-                      ports[i]);
-               ip_conntrack_helper_unregister(&irc_helpers[i]);
-       }
-       kfree(irc_buffer);
-}
-
-module_init(ip_conntrack_irc_init);
-module_exit(ip_conntrack_irc_fini);
diff --git a/net/ipv4/netfilter/ip_conntrack_netbios_ns.c b/net/ipv4/netfilter/ip_conntrack_netbios_ns.c
deleted file mode 100644 (file)
index cc6dd49..0000000
+++ /dev/null
@@ -1,143 +0,0 @@
-/*
- *      NetBIOS name service broadcast connection tracking helper
- *
- *      (c) 2005 Patrick McHardy <kaber@trash.net>
- *
- *      This program is free software; you can redistribute it and/or
- *      modify it under the terms of the GNU General Public License
- *      as published by the Free Software Foundation; either version
- *      2 of the License, or (at your option) any later version.
- */
-/*
- *      This helper tracks locally originating NetBIOS name service
- *      requests by issuing permanent expectations (valid until
- *      timing out) matching all reply connections from the
- *      destination network. The only NetBIOS specific thing is
- *      actually the port number.
- */
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/inetdevice.h>
-#include <linux/if_addr.h>
-#include <linux/in.h>
-#include <linux/ip.h>
-#include <net/route.h>
-
-#include <linux/netfilter.h>
-#include <linux/netfilter_ipv4.h>
-#include <linux/netfilter_ipv4/ip_conntrack.h>
-#include <linux/netfilter_ipv4/ip_conntrack_helper.h>
-
-#define NMBD_PORT      137
-
-MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
-MODULE_DESCRIPTION("NetBIOS name service broadcast connection tracking helper");
-MODULE_LICENSE("GPL");
-
-static unsigned int timeout = 3;
-module_param(timeout, uint, 0400);
-MODULE_PARM_DESC(timeout, "timeout for master connection/replies in seconds");
-
-static int help(struct sk_buff **pskb,
-               struct ip_conntrack *ct, enum ip_conntrack_info ctinfo)
-{
-       struct ip_conntrack_expect *exp;
-       struct iphdr *iph = (*pskb)->nh.iph;
-       struct rtable *rt = (struct rtable *)(*pskb)->dst;
-       struct in_device *in_dev;
-       __be32 mask = 0;
-
-       /* we're only interested in locally generated packets */
-       if ((*pskb)->sk == NULL)
-               goto out;
-       if (rt == NULL || !(rt->rt_flags & RTCF_BROADCAST))
-               goto out;
-       if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
-               goto out;
-
-       rcu_read_lock();
-       in_dev = __in_dev_get_rcu(rt->u.dst.dev);
-       if (in_dev != NULL) {
-               for_primary_ifa(in_dev) {
-                       if (ifa->ifa_broadcast == iph->daddr) {
-                               mask = ifa->ifa_mask;
-                               break;
-                       }
-               } endfor_ifa(in_dev);
-       }
-       rcu_read_unlock();
-
-       if (mask == 0)
-               goto out;
-
-       exp = ip_conntrack_expect_alloc(ct);
-       if (exp == NULL)
-               goto out;
-
-       exp->tuple                = ct->tuplehash[IP_CT_DIR_REPLY].tuple;
-       exp->tuple.src.u.udp.port = htons(NMBD_PORT);
-
-       exp->mask.src.ip          = mask;
-       exp->mask.src.u.udp.port  = htons(0xFFFF);
-       exp->mask.dst.ip          = htonl(0xFFFFFFFF);
-       exp->mask.dst.u.udp.port  = htons(0xFFFF);
-       exp->mask.dst.protonum    = 0xFF;
-
-       exp->expectfn             = NULL;
-       exp->flags                = IP_CT_EXPECT_PERMANENT;
-
-       ip_conntrack_expect_related(exp);
-       ip_conntrack_expect_put(exp);
-
-       ip_ct_refresh(ct, *pskb, timeout * HZ);
-out:
-       return NF_ACCEPT;
-}
-
-static struct ip_conntrack_helper helper = {
-       .name                   = "netbios-ns",
-       .tuple = {
-               .src = {
-                       .u = {
-                               .udp = {
-                                       .port   = __constant_htons(NMBD_PORT),
-                               }
-                       }
-               },
-               .dst = {
-                       .protonum       = IPPROTO_UDP,
-               },
-       },
-       .mask = {
-               .src = {
-                       .u = {
-                               .udp = {
-                                       .port   = __constant_htons(0xFFFF),
-                               }
-                       }
-               },
-               .dst = {
-                       .protonum       = 0xFF,
-               },
-       },
-       .max_expected           = 1,
-       .me                     = THIS_MODULE,
-       .help                   = help,
-};
-
-static int __init ip_conntrack_netbios_ns_init(void)
-{
-       helper.timeout = timeout;
-       return ip_conntrack_helper_register(&helper);
-}
-
-static void __exit ip_conntrack_netbios_ns_fini(void)
-{
-       ip_conntrack_helper_unregister(&helper);
-}
-
-module_init(ip_conntrack_netbios_ns_init);
-module_exit(ip_conntrack_netbios_ns_fini);
diff --git a/net/ipv4/netfilter/ip_conntrack_netlink.c b/net/ipv4/netfilter/ip_conntrack_netlink.c
deleted file mode 100644 (file)
index 9228b76..0000000
+++ /dev/null
@@ -1,1577 +0,0 @@
-/* Connection tracking via netlink socket. Allows for user space
- * protocol helpers and general trouble making from userspace.
- *
- * (C) 2001 by Jay Schulist <jschlst@samba.org>
- * (C) 2002-2005 by Harald Welte <laforge@gnumonks.org>
- * (C) 2003 by Patrick Mchardy <kaber@trash.net>
- * (C) 2005-2006 by Pablo Neira Ayuso <pablo@eurodev.net>
- *
- * I've reworked this stuff to use attributes instead of conntrack
- * structures. 5.44 am. I need more tea. --pablo 05/07/11.
- *
- * Initial connection tracking via netlink development funded and
- * generally made possible by Network Robots, Inc. (www.networkrobots.com)
- *
- * Further development of this code funded by Astaro AG (http://www.astaro.com)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/timer.h>
-#include <linux/skbuff.h>
-#include <linux/errno.h>
-#include <linux/netlink.h>
-#include <linux/spinlock.h>
-#include <linux/interrupt.h>
-#include <linux/notifier.h>
-
-#include <linux/netfilter.h>
-#include <linux/netfilter_ipv4/ip_conntrack.h>
-#include <linux/netfilter_ipv4/ip_conntrack_core.h>
-#include <linux/netfilter_ipv4/ip_conntrack_helper.h>
-#include <linux/netfilter_ipv4/ip_conntrack_protocol.h>
-#include <linux/netfilter_ipv4/ip_nat_protocol.h>
-
-#include <linux/netfilter/nfnetlink.h>
-#include <linux/netfilter/nfnetlink_conntrack.h>
-
-MODULE_LICENSE("GPL");
-
-static char __initdata version[] = "0.90";
-
-static inline int
-ctnetlink_dump_tuples_proto(struct sk_buff *skb,
-                           const struct ip_conntrack_tuple *tuple,
-                           struct ip_conntrack_protocol *proto)
-{
-       int ret = 0;
-       struct nfattr *nest_parms = NFA_NEST(skb, CTA_TUPLE_PROTO);
-
-       NFA_PUT(skb, CTA_PROTO_NUM, sizeof(u_int8_t), &tuple->dst.protonum);
-
-       if (likely(proto->tuple_to_nfattr))
-               ret = proto->tuple_to_nfattr(skb, tuple);
-
-       NFA_NEST_END(skb, nest_parms);
-
-       return ret;
-
-nfattr_failure:
-       return -1;
-}
-
-static inline int
-ctnetlink_dump_tuples_ip(struct sk_buff *skb,
-                        const struct ip_conntrack_tuple *tuple)
-{
-       struct nfattr *nest_parms = NFA_NEST(skb, CTA_TUPLE_IP);
-
-       NFA_PUT(skb, CTA_IP_V4_SRC, sizeof(__be32), &tuple->src.ip);
-       NFA_PUT(skb, CTA_IP_V4_DST, sizeof(__be32), &tuple->dst.ip);
-
-       NFA_NEST_END(skb, nest_parms);
-
-       return 0;
-
-nfattr_failure:
-       return -1;
-}
-
-static inline int
-ctnetlink_dump_tuples(struct sk_buff *skb,
-                     const struct ip_conntrack_tuple *tuple)
-{
-       int ret;
-       struct ip_conntrack_protocol *proto;
-
-       ret = ctnetlink_dump_tuples_ip(skb, tuple);
-       if (unlikely(ret < 0))
-               return ret;
-
-       proto = ip_conntrack_proto_find_get(tuple->dst.protonum);
-       ret = ctnetlink_dump_tuples_proto(skb, tuple, proto);
-       ip_conntrack_proto_put(proto);
-
-       return ret;
-}
-
-static inline int
-ctnetlink_dump_status(struct sk_buff *skb, const struct ip_conntrack *ct)
-{
-       __be32 status = htonl((u_int32_t) ct->status);
-       NFA_PUT(skb, CTA_STATUS, sizeof(status), &status);
-       return 0;
-
-nfattr_failure:
-       return -1;
-}
-
-static inline int
-ctnetlink_dump_timeout(struct sk_buff *skb, const struct ip_conntrack *ct)
-{
-       long timeout_l = ct->timeout.expires - jiffies;
-       __be32 timeout;
-
-       if (timeout_l < 0)
-               timeout = 0;
-       else
-               timeout = htonl(timeout_l / HZ);
-
-       NFA_PUT(skb, CTA_TIMEOUT, sizeof(timeout), &timeout);
-       return 0;
-
-nfattr_failure:
-       return -1;
-}
-
-static inline int
-ctnetlink_dump_protoinfo(struct sk_buff *skb, const struct ip_conntrack *ct)
-{
-       struct ip_conntrack_protocol *proto = ip_conntrack_proto_find_get(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum);
-
-       struct nfattr *nest_proto;
-       int ret;
-
-       if (!proto->to_nfattr) {
-               ip_conntrack_proto_put(proto);
-               return 0;
-       }
-
-       nest_proto = NFA_NEST(skb, CTA_PROTOINFO);
-
-       ret = proto->to_nfattr(skb, nest_proto, ct);
-
-       ip_conntrack_proto_put(proto);
-
-       NFA_NEST_END(skb, nest_proto);
-
-       return ret;
-
-nfattr_failure:
-       ip_conntrack_proto_put(proto);
-       return -1;
-}
-
-static inline int
-ctnetlink_dump_helpinfo(struct sk_buff *skb, const struct ip_conntrack *ct)
-{
-       struct nfattr *nest_helper;
-
-       if (!ct->helper)
-               return 0;
-
-       nest_helper = NFA_NEST(skb, CTA_HELP);
-       NFA_PUT(skb, CTA_HELP_NAME, strlen(ct->helper->name), ct->helper->name);
-
-       if (ct->helper->to_nfattr)
-               ct->helper->to_nfattr(skb, ct);
-
-       NFA_NEST_END(skb, nest_helper);
-
-       return 0;
-
-nfattr_failure:
-       return -1;
-}
-
-#ifdef CONFIG_IP_NF_CT_ACCT
-static inline int
-ctnetlink_dump_counters(struct sk_buff *skb, const struct ip_conntrack *ct,
-                       enum ip_conntrack_dir dir)
-{
-       enum ctattr_type type = dir ? CTA_COUNTERS_REPLY: CTA_COUNTERS_ORIG;
-       struct nfattr *nest_count = NFA_NEST(skb, type);
-       __be32 tmp;
-
-       tmp = htonl(ct->counters[dir].packets);
-       NFA_PUT(skb, CTA_COUNTERS32_PACKETS, sizeof(__be32), &tmp);
-
-       tmp = htonl(ct->counters[dir].bytes);
-       NFA_PUT(skb, CTA_COUNTERS32_BYTES, sizeof(__be32), &tmp);
-
-       NFA_NEST_END(skb, nest_count);
-
-       return 0;
-
-nfattr_failure:
-       return -1;
-}
-#else
-#define ctnetlink_dump_counters(a, b, c) (0)
-#endif
-
-#ifdef CONFIG_IP_NF_CONNTRACK_MARK
-static inline int
-ctnetlink_dump_mark(struct sk_buff *skb, const struct ip_conntrack *ct)
-{
-       __be32 mark = htonl(ct->mark);
-
-       NFA_PUT(skb, CTA_MARK, sizeof(__be32), &mark);
-       return 0;
-
-nfattr_failure:
-       return -1;
-}
-#else
-#define ctnetlink_dump_mark(a, b) (0)
-#endif
-
-static inline int
-ctnetlink_dump_id(struct sk_buff *skb, const struct ip_conntrack *ct)
-{
-       __be32 id = htonl(ct->id);
-       NFA_PUT(skb, CTA_ID, sizeof(__be32), &id);
-       return 0;
-
-nfattr_failure:
-       return -1;
-}
-
-static inline int
-ctnetlink_dump_use(struct sk_buff *skb, const struct ip_conntrack *ct)
-{
-       __be32 use = htonl(atomic_read(&ct->ct_general.use));
-
-       NFA_PUT(skb, CTA_USE, sizeof(__be32), &use);
-       return 0;
-
-nfattr_failure:
-       return -1;
-}
-
-#define tuple(ct, dir) (&(ct)->tuplehash[dir].tuple)
-
-static int
-ctnetlink_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
-                   int event, int nowait,
-                   const struct ip_conntrack *ct)
-{
-       struct nlmsghdr *nlh;
-       struct nfgenmsg *nfmsg;
-       struct nfattr *nest_parms;
-       unsigned char *b;
-
-       b = skb->tail;
-
-       event |= NFNL_SUBSYS_CTNETLINK << 8;
-       nlh    = NLMSG_PUT(skb, pid, seq, event, sizeof(struct nfgenmsg));
-       nfmsg  = NLMSG_DATA(nlh);
-
-       nlh->nlmsg_flags    = (nowait && pid) ? NLM_F_MULTI : 0;
-       nfmsg->nfgen_family = AF_INET;
-       nfmsg->version      = NFNETLINK_V0;
-       nfmsg->res_id       = 0;
-
-       nest_parms = NFA_NEST(skb, CTA_TUPLE_ORIG);
-       if (ctnetlink_dump_tuples(skb, tuple(ct, IP_CT_DIR_ORIGINAL)) < 0)
-               goto nfattr_failure;
-       NFA_NEST_END(skb, nest_parms);
-
-       nest_parms = NFA_NEST(skb, CTA_TUPLE_REPLY);
-       if (ctnetlink_dump_tuples(skb, tuple(ct, IP_CT_DIR_REPLY)) < 0)
-               goto nfattr_failure;
-       NFA_NEST_END(skb, nest_parms);
-
-       if (ctnetlink_dump_status(skb, ct) < 0 ||
-           ctnetlink_dump_timeout(skb, ct) < 0 ||
-           ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL) < 0 ||
-           ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY) < 0 ||
-           ctnetlink_dump_protoinfo(skb, ct) < 0 ||
-           ctnetlink_dump_helpinfo(skb, ct) < 0 ||
-           ctnetlink_dump_mark(skb, ct) < 0 ||
-           ctnetlink_dump_id(skb, ct) < 0 ||
-           ctnetlink_dump_use(skb, ct) < 0)
-               goto nfattr_failure;
-
-       nlh->nlmsg_len = skb->tail - b;
-       return skb->len;
-
-nlmsg_failure:
-nfattr_failure:
-       skb_trim(skb, b - skb->data);
-       return -1;
-}
-
-#ifdef CONFIG_IP_NF_CONNTRACK_EVENTS
-static int ctnetlink_conntrack_event(struct notifier_block *this,
-                                    unsigned long events, void *ptr)
-{
-       struct nlmsghdr *nlh;
-       struct nfgenmsg *nfmsg;
-       struct nfattr *nest_parms;
-       struct ip_conntrack *ct = (struct ip_conntrack *)ptr;
-       struct sk_buff *skb;
-       unsigned int type;
-       unsigned char *b;
-       unsigned int flags = 0, group;
-
-       /* ignore our fake conntrack entry */
-       if (ct == &ip_conntrack_untracked)
-               return NOTIFY_DONE;
-
-       if (events & IPCT_DESTROY) {
-               type = IPCTNL_MSG_CT_DELETE;
-               group = NFNLGRP_CONNTRACK_DESTROY;
-       } else if (events & (IPCT_NEW | IPCT_RELATED)) {
-               type = IPCTNL_MSG_CT_NEW;
-               flags = NLM_F_CREATE|NLM_F_EXCL;
-               group = NFNLGRP_CONNTRACK_NEW;
-       } else if (events & (IPCT_STATUS | IPCT_PROTOINFO)) {
-               type = IPCTNL_MSG_CT_NEW;
-               group = NFNLGRP_CONNTRACK_UPDATE;
-       } else
-               return NOTIFY_DONE;
-
-       if (!nfnetlink_has_listeners(group))
-               return NOTIFY_DONE;
-
-       skb = alloc_skb(NLMSG_GOODSIZE, GFP_ATOMIC);
-       if (!skb)
-               return NOTIFY_DONE;
-
-       b = skb->tail;
-
-       type |= NFNL_SUBSYS_CTNETLINK << 8;
-       nlh   = NLMSG_PUT(skb, 0, 0, type, sizeof(struct nfgenmsg));
-       nfmsg = NLMSG_DATA(nlh);
-
-       nlh->nlmsg_flags    = flags;
-       nfmsg->nfgen_family = AF_INET;
-       nfmsg->version  = NFNETLINK_V0;
-       nfmsg->res_id   = 0;
-
-       nest_parms = NFA_NEST(skb, CTA_TUPLE_ORIG);
-       if (ctnetlink_dump_tuples(skb, tuple(ct, IP_CT_DIR_ORIGINAL)) < 0)
-               goto nfattr_failure;
-       NFA_NEST_END(skb, nest_parms);
-
-       nest_parms = NFA_NEST(skb, CTA_TUPLE_REPLY);
-       if (ctnetlink_dump_tuples(skb, tuple(ct, IP_CT_DIR_REPLY)) < 0)
-               goto nfattr_failure;
-       NFA_NEST_END(skb, nest_parms);
-
-       if (events & IPCT_DESTROY) {
-               if (ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL) < 0 ||
-                   ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY) < 0)
-                       goto nfattr_failure;
-       } else {
-               if (ctnetlink_dump_status(skb, ct) < 0)
-                       goto nfattr_failure;
-
-               if (ctnetlink_dump_timeout(skb, ct) < 0)
-                       goto nfattr_failure;
-
-               if (events & IPCT_PROTOINFO
-                   && ctnetlink_dump_protoinfo(skb, ct) < 0)
-                       goto nfattr_failure;
-
-               if ((events & IPCT_HELPER || ct->helper)
-                   && ctnetlink_dump_helpinfo(skb, ct) < 0)
-                       goto nfattr_failure;
-
-#ifdef CONFIG_IP_NF_CONNTRACK_MARK
-               if ((events & IPCT_MARK || ct->mark)
-                   && ctnetlink_dump_mark(skb, ct) < 0)
-                       goto nfattr_failure;
-#endif
-
-               if (events & IPCT_COUNTER_FILLING &&
-                   (ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL) < 0 ||
-                    ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY) < 0))
-                       goto nfattr_failure;
-       }
-
-       nlh->nlmsg_len = skb->tail - b;
-       nfnetlink_send(skb, 0, group, 0);
-       return NOTIFY_DONE;
-
-nlmsg_failure:
-nfattr_failure:
-       kfree_skb(skb);
-       return NOTIFY_DONE;
-}
-#endif /* CONFIG_IP_NF_CONNTRACK_EVENTS */
-
-static int ctnetlink_done(struct netlink_callback *cb)
-{
-       if (cb->args[1])
-               ip_conntrack_put((struct ip_conntrack *)cb->args[1]);
-       return 0;
-}
-
-static int
-ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
-{
-       struct ip_conntrack *ct, *last;
-       struct ip_conntrack_tuple_hash *h;
-       struct list_head *i;
-
-       read_lock_bh(&ip_conntrack_lock);
-       last = (struct ip_conntrack *)cb->args[1];
-       for (; cb->args[0] < ip_conntrack_htable_size; cb->args[0]++) {
-restart:
-               list_for_each_prev(i, &ip_conntrack_hash[cb->args[0]]) {
-                       h = (struct ip_conntrack_tuple_hash *) i;
-                       if (DIRECTION(h) != IP_CT_DIR_ORIGINAL)
-                               continue;
-                       ct = tuplehash_to_ctrack(h);
-                       if (cb->args[1]) {
-                               if (ct != last)
-                                       continue;
-                               cb->args[1] = 0;
-                       }
-                       if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid,
-                                               cb->nlh->nlmsg_seq,
-                                               IPCTNL_MSG_CT_NEW,
-                                               1, ct) < 0) {
-                               nf_conntrack_get(&ct->ct_general);
-                               cb->args[1] = (unsigned long)ct;
-                               goto out;
-                       }
-#ifdef CONFIG_NF_CT_ACCT
-                       if (NFNL_MSG_TYPE(cb->nlh->nlmsg_type) ==
-                                               IPCTNL_MSG_CT_GET_CTRZERO)
-                               memset(&ct->counters, 0, sizeof(ct->counters));
-#endif
-               }
-               if (cb->args[1]) {
-                       cb->args[1] = 0;
-                       goto restart;
-               }
-       }
-out:
-       read_unlock_bh(&ip_conntrack_lock);
-       if (last)
-               ip_conntrack_put(last);
-
-       return skb->len;
-}
-
-static const size_t cta_min_ip[CTA_IP_MAX] = {
-       [CTA_IP_V4_SRC-1]       = sizeof(__be32),
-       [CTA_IP_V4_DST-1]       = sizeof(__be32),
-};
-
-static inline int
-ctnetlink_parse_tuple_ip(struct nfattr *attr, struct ip_conntrack_tuple *tuple)
-{
-       struct nfattr *tb[CTA_IP_MAX];
-
-       nfattr_parse_nested(tb, CTA_IP_MAX, attr);
-
-       if (nfattr_bad_size(tb, CTA_IP_MAX, cta_min_ip))
-               return -EINVAL;
-
-       if (!tb[CTA_IP_V4_SRC-1])
-               return -EINVAL;
-       tuple->src.ip = *(__be32 *)NFA_DATA(tb[CTA_IP_V4_SRC-1]);
-
-       if (!tb[CTA_IP_V4_DST-1])
-               return -EINVAL;
-       tuple->dst.ip = *(__be32 *)NFA_DATA(tb[CTA_IP_V4_DST-1]);
-
-       return 0;
-}
-
-static const size_t cta_min_proto[CTA_PROTO_MAX] = {
-       [CTA_PROTO_NUM-1]       = sizeof(u_int8_t),
-       [CTA_PROTO_SRC_PORT-1]  = sizeof(u_int16_t),
-       [CTA_PROTO_DST_PORT-1]  = sizeof(u_int16_t),
-       [CTA_PROTO_ICMP_TYPE-1] = sizeof(u_int8_t),
-       [CTA_PROTO_ICMP_CODE-1] = sizeof(u_int8_t),
-       [CTA_PROTO_ICMP_ID-1]   = sizeof(u_int16_t),
-};
-
-static inline int
-ctnetlink_parse_tuple_proto(struct nfattr *attr,
-                           struct ip_conntrack_tuple *tuple)
-{
-       struct nfattr *tb[CTA_PROTO_MAX];
-       struct ip_conntrack_protocol *proto;
-       int ret = 0;
-
-       nfattr_parse_nested(tb, CTA_PROTO_MAX, attr);
-
-       if (nfattr_bad_size(tb, CTA_PROTO_MAX, cta_min_proto))
-               return -EINVAL;
-
-       if (!tb[CTA_PROTO_NUM-1])
-               return -EINVAL;
-       tuple->dst.protonum = *(u_int8_t *)NFA_DATA(tb[CTA_PROTO_NUM-1]);
-
-       proto = ip_conntrack_proto_find_get(tuple->dst.protonum);
-
-       if (likely(proto->nfattr_to_tuple))
-               ret = proto->nfattr_to_tuple(tb, tuple);
-
-       ip_conntrack_proto_put(proto);
-
-       return ret;
-}
-
-static inline int
-ctnetlink_parse_tuple(struct nfattr *cda[], struct ip_conntrack_tuple *tuple,
-                     enum ctattr_tuple type)
-{
-       struct nfattr *tb[CTA_TUPLE_MAX];
-       int err;
-
-       memset(tuple, 0, sizeof(*tuple));
-
-       nfattr_parse_nested(tb, CTA_TUPLE_MAX, cda[type-1]);
-
-       if (!tb[CTA_TUPLE_IP-1])
-               return -EINVAL;
-
-       err = ctnetlink_parse_tuple_ip(tb[CTA_TUPLE_IP-1], tuple);
-       if (err < 0)
-               return err;
-
-       if (!tb[CTA_TUPLE_PROTO-1])
-               return -EINVAL;
-
-       err = ctnetlink_parse_tuple_proto(tb[CTA_TUPLE_PROTO-1], tuple);
-       if (err < 0)
-               return err;
-
-       /* orig and expect tuples get DIR_ORIGINAL */
-       if (type == CTA_TUPLE_REPLY)
-               tuple->dst.dir = IP_CT_DIR_REPLY;
-       else
-               tuple->dst.dir = IP_CT_DIR_ORIGINAL;
-
-       return 0;
-}
-
-#ifdef CONFIG_IP_NF_NAT_NEEDED
-static const size_t cta_min_protonat[CTA_PROTONAT_MAX] = {
-       [CTA_PROTONAT_PORT_MIN-1]       = sizeof(u_int16_t),
-       [CTA_PROTONAT_PORT_MAX-1]       = sizeof(u_int16_t),
-};
-
-static int ctnetlink_parse_nat_proto(struct nfattr *attr,
-                                    const struct ip_conntrack *ct,
-                                    struct ip_nat_range *range)
-{
-       struct nfattr *tb[CTA_PROTONAT_MAX];
-       struct ip_nat_protocol *npt;
-
-       nfattr_parse_nested(tb, CTA_PROTONAT_MAX, attr);
-
-       if (nfattr_bad_size(tb, CTA_PROTONAT_MAX, cta_min_protonat))
-               return -EINVAL;
-
-       npt = ip_nat_proto_find_get(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum);
-
-       if (!npt->nfattr_to_range) {
-               ip_nat_proto_put(npt);
-               return 0;
-       }
-
-       /* nfattr_to_range returns 1 if it parsed, 0 if not, neg. on error */
-       if (npt->nfattr_to_range(tb, range) > 0)
-               range->flags |= IP_NAT_RANGE_PROTO_SPECIFIED;
-
-       ip_nat_proto_put(npt);
-
-       return 0;
-}
-
-static const size_t cta_min_nat[CTA_NAT_MAX] = {
-       [CTA_NAT_MINIP-1]       = sizeof(__be32),
-       [CTA_NAT_MAXIP-1]       = sizeof(__be32),
-};
-
-static inline int
-ctnetlink_parse_nat(struct nfattr *nat,
-                   const struct ip_conntrack *ct, struct ip_nat_range *range)
-{
-       struct nfattr *tb[CTA_NAT_MAX];
-       int err;
-
-       memset(range, 0, sizeof(*range));
-
-       nfattr_parse_nested(tb, CTA_NAT_MAX, nat);
-
-       if (nfattr_bad_size(tb, CTA_NAT_MAX, cta_min_nat))
-               return -EINVAL;
-
-       if (tb[CTA_NAT_MINIP-1])
-               range->min_ip = *(__be32 *)NFA_DATA(tb[CTA_NAT_MINIP-1]);
-
-       if (!tb[CTA_NAT_MAXIP-1])
-               range->max_ip = range->min_ip;
-       else
-               range->max_ip = *(__be32 *)NFA_DATA(tb[CTA_NAT_MAXIP-1]);
-
-       if (range->min_ip)
-               range->flags |= IP_NAT_RANGE_MAP_IPS;
-
-       if (!tb[CTA_NAT_PROTO-1])
-               return 0;
-
-       err = ctnetlink_parse_nat_proto(tb[CTA_NAT_PROTO-1], ct, range);
-       if (err < 0)
-               return err;
-
-       return 0;
-}
-#endif
-
-static inline int
-ctnetlink_parse_help(struct nfattr *attr, char **helper_name)
-{
-       struct nfattr *tb[CTA_HELP_MAX];
-
-       nfattr_parse_nested(tb, CTA_HELP_MAX, attr);
-
-       if (!tb[CTA_HELP_NAME-1])
-               return -EINVAL;
-
-       *helper_name = NFA_DATA(tb[CTA_HELP_NAME-1]);
-
-       return 0;
-}
-
-static const size_t cta_min[CTA_MAX] = {
-       [CTA_STATUS-1]          = sizeof(__be32),
-       [CTA_TIMEOUT-1]         = sizeof(__be32),
-       [CTA_MARK-1]            = sizeof(__be32),
-       [CTA_USE-1]             = sizeof(__be32),
-       [CTA_ID-1]              = sizeof(__be32)
-};
-
-static int
-ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
-                       struct nlmsghdr *nlh, struct nfattr *cda[], int *errp)
-{
-       struct ip_conntrack_tuple_hash *h;
-       struct ip_conntrack_tuple tuple;
-       struct ip_conntrack *ct;
-       int err = 0;
-
-       if (nfattr_bad_size(cda, CTA_MAX, cta_min))
-               return -EINVAL;
-
-       if (cda[CTA_TUPLE_ORIG-1])
-               err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG);
-       else if (cda[CTA_TUPLE_REPLY-1])
-               err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY);
-       else {
-               /* Flush the whole table */
-               ip_conntrack_flush();
-               return 0;
-       }
-
-       if (err < 0)
-               return err;
-
-       h = ip_conntrack_find_get(&tuple, NULL);
-       if (!h)
-               return -ENOENT;
-
-       ct = tuplehash_to_ctrack(h);
-
-       if (cda[CTA_ID-1]) {
-               u_int32_t id = ntohl(*(__be32 *)NFA_DATA(cda[CTA_ID-1]));
-               if (ct->id != id) {
-                       ip_conntrack_put(ct);
-                       return -ENOENT;
-               }
-       }
-       if (del_timer(&ct->timeout))
-               ct->timeout.function((unsigned long)ct);
-
-       ip_conntrack_put(ct);
-
-       return 0;
-}
-
-static int
-ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb,
-                       struct nlmsghdr *nlh, struct nfattr *cda[], int *errp)
-{
-       struct ip_conntrack_tuple_hash *h;
-       struct ip_conntrack_tuple tuple;
-       struct ip_conntrack *ct;
-       struct sk_buff *skb2 = NULL;
-       int err = 0;
-
-       if (nlh->nlmsg_flags & NLM_F_DUMP) {
-               struct nfgenmsg *msg = NLMSG_DATA(nlh);
-               u32 rlen;
-
-               if (msg->nfgen_family != AF_INET)
-                       return -EAFNOSUPPORT;
-
-#ifndef CONFIG_IP_NF_CT_ACCT
-               if (NFNL_MSG_TYPE(nlh->nlmsg_type) == IPCTNL_MSG_CT_GET_CTRZERO)
-                       return -ENOTSUPP;
-#endif
-               if ((*errp = netlink_dump_start(ctnl, skb, nlh,
-                                               ctnetlink_dump_table,
-                                               ctnetlink_done)) != 0)
-                       return -EINVAL;
-
-               rlen = NLMSG_ALIGN(nlh->nlmsg_len);
-               if (rlen > skb->len)
-                       rlen = skb->len;
-               skb_pull(skb, rlen);
-               return 0;
-       }
-
-       if (nfattr_bad_size(cda, CTA_MAX, cta_min))
-               return -EINVAL;
-
-       if (cda[CTA_TUPLE_ORIG-1])
-               err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG);
-       else if (cda[CTA_TUPLE_REPLY-1])
-               err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY);
-       else
-               return -EINVAL;
-
-       if (err < 0)
-               return err;
-
-       h = ip_conntrack_find_get(&tuple, NULL);
-       if (!h)
-               return -ENOENT;
-
-       ct = tuplehash_to_ctrack(h);
-
-       err = -ENOMEM;
-       skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
-       if (!skb2) {
-               ip_conntrack_put(ct);
-               return -ENOMEM;
-       }
-
-       err = ctnetlink_fill_info(skb2, NETLINK_CB(skb).pid, nlh->nlmsg_seq,
-                                 IPCTNL_MSG_CT_NEW, 1, ct);
-       ip_conntrack_put(ct);
-       if (err <= 0)
-               goto free;
-
-       err = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
-       if (err < 0)
-               goto out;
-
-       return 0;
-
-free:
-       kfree_skb(skb2);
-out:
-       return err;
-}
-
-static inline int
-ctnetlink_change_status(struct ip_conntrack *ct, struct nfattr *cda[])
-{
-       unsigned long d;
-       unsigned status = ntohl(*(__be32 *)NFA_DATA(cda[CTA_STATUS-1]));
-       d = ct->status ^ status;
-
-       if (d & (IPS_EXPECTED|IPS_CONFIRMED|IPS_DYING))
-               /* unchangeable */
-               return -EINVAL;
-
-       if (d & IPS_SEEN_REPLY && !(status & IPS_SEEN_REPLY))
-               /* SEEN_REPLY bit can only be set */
-               return -EINVAL;
-
-
-       if (d & IPS_ASSURED && !(status & IPS_ASSURED))
-               /* ASSURED bit can only be set */
-               return -EINVAL;
-
-       if (cda[CTA_NAT_SRC-1] || cda[CTA_NAT_DST-1]) {
-#ifndef CONFIG_IP_NF_NAT_NEEDED
-               return -EINVAL;
-#else
-               struct ip_nat_range range;
-
-               if (cda[CTA_NAT_DST-1]) {
-                       if (ctnetlink_parse_nat(cda[CTA_NAT_DST-1], ct,
-                                               &range) < 0)
-                               return -EINVAL;
-                       if (ip_nat_initialized(ct,
-                                              HOOK2MANIP(NF_IP_PRE_ROUTING)))
-                               return -EEXIST;
-                       ip_nat_setup_info(ct, &range, NF_IP_PRE_ROUTING);
-               }
-               if (cda[CTA_NAT_SRC-1]) {
-                       if (ctnetlink_parse_nat(cda[CTA_NAT_SRC-1], ct,
-                                               &range) < 0)
-                               return -EINVAL;
-                       if (ip_nat_initialized(ct,
-                                              HOOK2MANIP(NF_IP_POST_ROUTING)))
-                               return -EEXIST;
-                       ip_nat_setup_info(ct, &range, NF_IP_POST_ROUTING);
-               }
-#endif
-       }
-
-       /* Be careful here, modifying NAT bits can screw up things,
-        * so don't let users modify them directly if they don't pass
-        * ip_nat_range. */
-       ct->status |= status & ~(IPS_NAT_DONE_MASK | IPS_NAT_MASK);
-       return 0;
-}
-
-
-static inline int
-ctnetlink_change_helper(struct ip_conntrack *ct, struct nfattr *cda[])
-{
-       struct ip_conntrack_helper *helper;
-       char *helpname;
-       int err;
-
-       /* don't change helper of sibling connections */
-       if (ct->master)
-               return -EINVAL;
-
-       err = ctnetlink_parse_help(cda[CTA_HELP-1], &helpname);
-       if (err < 0)
-               return err;
-
-       helper = __ip_conntrack_helper_find_byname(helpname);
-       if (!helper) {
-               if (!strcmp(helpname, ""))
-                       helper = NULL;
-               else
-                       return -EINVAL;
-       }
-
-       if (ct->helper) {
-               if (!helper) {
-                       /* we had a helper before ... */
-                       ip_ct_remove_expectations(ct);
-                       ct->helper = NULL;
-               } else {
-                       /* need to zero data of old helper */
-                       memset(&ct->help, 0, sizeof(ct->help));
-               }
-       }
-
-       ct->helper = helper;
-
-       return 0;
-}
-
-static inline int
-ctnetlink_change_timeout(struct ip_conntrack *ct, struct nfattr *cda[])
-{
-       u_int32_t timeout = ntohl(*(__be32 *)NFA_DATA(cda[CTA_TIMEOUT-1]));
-
-       if (!del_timer(&ct->timeout))
-               return -ETIME;
-
-       ct->timeout.expires = jiffies + timeout * HZ;
-       add_timer(&ct->timeout);
-
-       return 0;
-}
-
-static inline int
-ctnetlink_change_protoinfo(struct ip_conntrack *ct, struct nfattr *cda[])
-{
-       struct nfattr *tb[CTA_PROTOINFO_MAX], *attr = cda[CTA_PROTOINFO-1];
-       struct ip_conntrack_protocol *proto;
-       u_int16_t npt = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum;
-       int err = 0;
-
-       nfattr_parse_nested(tb, CTA_PROTOINFO_MAX, attr);
-
-       proto = ip_conntrack_proto_find_get(npt);
-
-       if (proto->from_nfattr)
-               err = proto->from_nfattr(tb, ct);
-       ip_conntrack_proto_put(proto);
-
-       return err;
-}
-
-static int
-ctnetlink_change_conntrack(struct ip_conntrack *ct, struct nfattr *cda[])
-{
-       int err;
-
-       if (cda[CTA_HELP-1]) {
-               err = ctnetlink_change_helper(ct, cda);
-               if (err < 0)
-                       return err;
-       }
-
-       if (cda[CTA_TIMEOUT-1]) {
-               err = ctnetlink_change_timeout(ct, cda);
-               if (err < 0)
-                       return err;
-       }
-
-       if (cda[CTA_STATUS-1]) {
-               err = ctnetlink_change_status(ct, cda);
-               if (err < 0)
-                       return err;
-       }
-
-       if (cda[CTA_PROTOINFO-1]) {
-               err = ctnetlink_change_protoinfo(ct, cda);
-               if (err < 0)
-                       return err;
-       }
-
-#if defined(CONFIG_IP_NF_CONNTRACK_MARK)
-       if (cda[CTA_MARK-1])
-               ct->mark = ntohl(*(__be32 *)NFA_DATA(cda[CTA_MARK-1]));
-#endif
-
-       return 0;
-}
-
-static int
-ctnetlink_create_conntrack(struct nfattr *cda[],
-                          struct ip_conntrack_tuple *otuple,
-                          struct ip_conntrack_tuple *rtuple)
-{
-       struct ip_conntrack *ct;
-       int err = -EINVAL;
-
-       ct = ip_conntrack_alloc(otuple, rtuple);
-       if (ct == NULL || IS_ERR(ct))
-               return -ENOMEM;
-
-       if (!cda[CTA_TIMEOUT-1])
-               goto err;
-       ct->timeout.expires = ntohl(*(__be32 *)NFA_DATA(cda[CTA_TIMEOUT-1]));
-
-       ct->timeout.expires = jiffies + ct->timeout.expires * HZ;
-       ct->status |= IPS_CONFIRMED;
-
-       if (cda[CTA_STATUS-1]) {
-               err = ctnetlink_change_status(ct, cda);
-               if (err < 0)
-                       goto err;
-       }
-
-       if (cda[CTA_PROTOINFO-1]) {
-               err = ctnetlink_change_protoinfo(ct, cda);
-               if (err < 0)
-                       goto err;
-       }
-
-#if defined(CONFIG_IP_NF_CONNTRACK_MARK)
-       if (cda[CTA_MARK-1])
-               ct->mark = ntohl(*(__be32 *)NFA_DATA(cda[CTA_MARK-1]));
-#endif
-
-       ct->helper = ip_conntrack_helper_find_get(rtuple);
-
-       add_timer(&ct->timeout);
-       ip_conntrack_hash_insert(ct);
-
-       if (ct->helper)
-               ip_conntrack_helper_put(ct->helper);
-
-       return 0;
-
-err:
-       ip_conntrack_free(ct);
-       return err;
-}
-
-static int
-ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
-                       struct nlmsghdr *nlh, struct nfattr *cda[], int *errp)
-{
-       struct ip_conntrack_tuple otuple, rtuple;
-       struct ip_conntrack_tuple_hash *h = NULL;
-       int err = 0;
-
-       if (nfattr_bad_size(cda, CTA_MAX, cta_min))
-               return -EINVAL;
-
-       if (cda[CTA_TUPLE_ORIG-1]) {
-               err = ctnetlink_parse_tuple(cda, &otuple, CTA_TUPLE_ORIG);
-               if (err < 0)
-                       return err;
-       }
-
-       if (cda[CTA_TUPLE_REPLY-1]) {
-               err = ctnetlink_parse_tuple(cda, &rtuple, CTA_TUPLE_REPLY);
-               if (err < 0)
-                       return err;
-       }
-
-       write_lock_bh(&ip_conntrack_lock);
-       if (cda[CTA_TUPLE_ORIG-1])
-               h = __ip_conntrack_find(&otuple, NULL);
-       else if (cda[CTA_TUPLE_REPLY-1])
-               h = __ip_conntrack_find(&rtuple, NULL);
-
-       if (h == NULL) {
-               write_unlock_bh(&ip_conntrack_lock);
-               err = -ENOENT;
-               if (nlh->nlmsg_flags & NLM_F_CREATE)
-                       err = ctnetlink_create_conntrack(cda, &otuple, &rtuple);
-               return err;
-       }
-       /* implicit 'else' */
-
-       /* we only allow nat config for new conntracks */
-       if (cda[CTA_NAT_SRC-1] || cda[CTA_NAT_DST-1]) {
-               err = -EINVAL;
-               goto out_unlock;
-       }
-
-       /* We manipulate the conntrack inside the global conntrack table lock,
-        * so there's no need to increase the refcount */
-       err = -EEXIST;
-       if (!(nlh->nlmsg_flags & NLM_F_EXCL))
-               err = ctnetlink_change_conntrack(tuplehash_to_ctrack(h), cda);
-
-out_unlock:
-       write_unlock_bh(&ip_conntrack_lock);
-       return err;
-}
-
-/***********************************************************************
- * EXPECT
- ***********************************************************************/
-
-static inline int
-ctnetlink_exp_dump_tuple(struct sk_buff *skb,
-                        const struct ip_conntrack_tuple *tuple,
-                        enum ctattr_expect type)
-{
-       struct nfattr *nest_parms = NFA_NEST(skb, type);
-
-       if (ctnetlink_dump_tuples(skb, tuple) < 0)
-               goto nfattr_failure;
-
-       NFA_NEST_END(skb, nest_parms);
-
-       return 0;
-
-nfattr_failure:
-       return -1;
-}
-
-static inline int
-ctnetlink_exp_dump_mask(struct sk_buff *skb,
-                       const struct ip_conntrack_tuple *tuple,
-                       const struct ip_conntrack_tuple *mask)
-{
-       int ret;
-       struct ip_conntrack_protocol *proto;
-       struct nfattr *nest_parms = NFA_NEST(skb, CTA_EXPECT_MASK);
-
-       ret = ctnetlink_dump_tuples_ip(skb, mask);
-       if (unlikely(ret < 0))
-               goto nfattr_failure;
-
-       proto = ip_conntrack_proto_find_get(tuple->dst.protonum);
-       ret = ctnetlink_dump_tuples_proto(skb, mask, proto);
-       ip_conntrack_proto_put(proto);
-       if (unlikely(ret < 0))
-               goto nfattr_failure;
-
-       NFA_NEST_END(skb, nest_parms);
-
-       return 0;
-
-nfattr_failure:
-       return -1;
-}
-
-static inline int
-ctnetlink_exp_dump_expect(struct sk_buff *skb,
-                         const struct ip_conntrack_expect *exp)
-{
-       struct ip_conntrack *master = exp->master;
-       __be32 timeout = htonl((exp->timeout.expires - jiffies) / HZ);
-       __be32 id = htonl(exp->id);
-
-       if (ctnetlink_exp_dump_tuple(skb, &exp->tuple, CTA_EXPECT_TUPLE) < 0)
-               goto nfattr_failure;
-       if (ctnetlink_exp_dump_mask(skb, &exp->tuple, &exp->mask) < 0)
-               goto nfattr_failure;
-       if (ctnetlink_exp_dump_tuple(skb,
-                                &master->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
-                                CTA_EXPECT_MASTER) < 0)
-               goto nfattr_failure;
-
-       NFA_PUT(skb, CTA_EXPECT_TIMEOUT, sizeof(__be32), &timeout);
-       NFA_PUT(skb, CTA_EXPECT_ID, sizeof(__be32), &id);
-
-       return 0;
-
-nfattr_failure:
-       return -1;
-}
-
-static int
-ctnetlink_exp_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
-                   int event,
-                   int nowait,
-                   const struct ip_conntrack_expect *exp)
-{
-       struct nlmsghdr *nlh;
-       struct nfgenmsg *nfmsg;
-       unsigned char *b;
-
-       b = skb->tail;
-
-       event |= NFNL_SUBSYS_CTNETLINK_EXP << 8;
-       nlh    = NLMSG_PUT(skb, pid, seq, event, sizeof(struct nfgenmsg));
-       nfmsg  = NLMSG_DATA(nlh);
-
-       nlh->nlmsg_flags    = (nowait && pid) ? NLM_F_MULTI : 0;
-       nfmsg->nfgen_family = AF_INET;
-       nfmsg->version      = NFNETLINK_V0;
-       nfmsg->res_id       = 0;
-
-       if (ctnetlink_exp_dump_expect(skb, exp) < 0)
-               goto nfattr_failure;
-
-       nlh->nlmsg_len = skb->tail - b;
-       return skb->len;
-
-nlmsg_failure:
-nfattr_failure:
-       skb_trim(skb, b - skb->data);
-       return -1;
-}
-
-#ifdef CONFIG_IP_NF_CONNTRACK_EVENTS
-static int ctnetlink_expect_event(struct notifier_block *this,
-                                 unsigned long events, void *ptr)
-{
-       struct nlmsghdr *nlh;
-       struct nfgenmsg *nfmsg;
-       struct ip_conntrack_expect *exp = (struct ip_conntrack_expect *)ptr;
-       struct sk_buff *skb;
-       unsigned int type;
-       unsigned char *b;
-       int flags = 0;
-
-       if (events & IPEXP_NEW) {
-               type = IPCTNL_MSG_EXP_NEW;
-               flags = NLM_F_CREATE|NLM_F_EXCL;
-       } else
-               return NOTIFY_DONE;
-
-       if (!nfnetlink_has_listeners(NFNLGRP_CONNTRACK_EXP_NEW))
-               return NOTIFY_DONE;
-
-       skb = alloc_skb(NLMSG_GOODSIZE, GFP_ATOMIC);
-       if (!skb)
-               return NOTIFY_DONE;
-
-       b = skb->tail;
-
-       type |= NFNL_SUBSYS_CTNETLINK_EXP << 8;
-       nlh   = NLMSG_PUT(skb, 0, 0, type, sizeof(struct nfgenmsg));
-       nfmsg = NLMSG_DATA(nlh);
-
-       nlh->nlmsg_flags    = flags;
-       nfmsg->nfgen_family = AF_INET;
-       nfmsg->version      = NFNETLINK_V0;
-       nfmsg->res_id       = 0;
-
-       if (ctnetlink_exp_dump_expect(skb, exp) < 0)
-               goto nfattr_failure;
-
-       nlh->nlmsg_len = skb->tail - b;
-       nfnetlink_send(skb, 0, NFNLGRP_CONNTRACK_EXP_NEW, 0);
-       return NOTIFY_DONE;
-
-nlmsg_failure:
-nfattr_failure:
-       kfree_skb(skb);
-       return NOTIFY_DONE;
-}
-#endif
-
-static int
-ctnetlink_exp_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
-{
-       struct ip_conntrack_expect *exp = NULL;
-       struct list_head *i;
-       u_int32_t *id = (u_int32_t *) &cb->args[0];
-
-       read_lock_bh(&ip_conntrack_lock);
-       list_for_each_prev(i, &ip_conntrack_expect_list) {
-               exp = (struct ip_conntrack_expect *) i;
-               if (exp->id <= *id)
-                       continue;
-               if (ctnetlink_exp_fill_info(skb, NETLINK_CB(cb->skb).pid,
-                                           cb->nlh->nlmsg_seq,
-                                           IPCTNL_MSG_EXP_NEW,
-                                           1, exp) < 0)
-                       goto out;
-               *id = exp->id;
-       }
-out:
-       read_unlock_bh(&ip_conntrack_lock);
-
-       return skb->len;
-}
-
-static const size_t cta_min_exp[CTA_EXPECT_MAX] = {
-       [CTA_EXPECT_TIMEOUT-1]          = sizeof(__be32),
-       [CTA_EXPECT_ID-1]               = sizeof(__be32)
-};
-
-static int
-ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
-                    struct nlmsghdr *nlh, struct nfattr *cda[], int *errp)
-{
-       struct ip_conntrack_tuple tuple;
-       struct ip_conntrack_expect *exp;
-       struct sk_buff *skb2;
-       int err = 0;
-
-       if (nfattr_bad_size(cda, CTA_EXPECT_MAX, cta_min_exp))
-               return -EINVAL;
-
-       if (nlh->nlmsg_flags & NLM_F_DUMP) {
-               struct nfgenmsg *msg = NLMSG_DATA(nlh);
-               u32 rlen;
-
-               if (msg->nfgen_family != AF_INET)
-                       return -EAFNOSUPPORT;
-
-               if ((*errp = netlink_dump_start(ctnl, skb, nlh,
-                                               ctnetlink_exp_dump_table,
-                                               ctnetlink_done)) != 0)
-                       return -EINVAL;
-               rlen = NLMSG_ALIGN(nlh->nlmsg_len);
-               if (rlen > skb->len)
-                       rlen = skb->len;
-               skb_pull(skb, rlen);
-               return 0;
-       }
-
-       if (cda[CTA_EXPECT_MASTER-1])
-               err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER);
-       else
-               return -EINVAL;
-
-       if (err < 0)
-               return err;
-
-       exp = ip_conntrack_expect_find_get(&tuple);
-       if (!exp)
-               return -ENOENT;
-
-       if (cda[CTA_EXPECT_ID-1]) {
-               __be32 id = *(__be32 *)NFA_DATA(cda[CTA_EXPECT_ID-1]);
-               if (exp->id != ntohl(id)) {
-                       ip_conntrack_expect_put(exp);
-                       return -ENOENT;
-               }
-       }
-
-       err = -ENOMEM;
-       skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
-       if (!skb2)
-               goto out;
-
-       err = ctnetlink_exp_fill_info(skb2, NETLINK_CB(skb).pid,
-                                     nlh->nlmsg_seq, IPCTNL_MSG_EXP_NEW,
-                                     1, exp);
-       if (err <= 0)
-               goto free;
-
-       ip_conntrack_expect_put(exp);
-
-       return netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
-
-free:
-       kfree_skb(skb2);
-out:
-       ip_conntrack_expect_put(exp);
-       return err;
-}
-
-static int
-ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
-                    struct nlmsghdr *nlh, struct nfattr *cda[], int *errp)
-{
-       struct ip_conntrack_expect *exp, *tmp;
-       struct ip_conntrack_tuple tuple;
-       struct ip_conntrack_helper *h;
-       int err;
-
-       if (nfattr_bad_size(cda, CTA_EXPECT_MAX, cta_min_exp))
-               return -EINVAL;
-
-       if (cda[CTA_EXPECT_TUPLE-1]) {
-               /* delete a single expect by tuple */
-               err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE);
-               if (err < 0)
-                       return err;
-
-               /* bump usage count to 2 */
-               exp = ip_conntrack_expect_find_get(&tuple);
-               if (!exp)
-                       return -ENOENT;
-
-               if (cda[CTA_EXPECT_ID-1]) {
-                       __be32 id =
-                               *(__be32 *)NFA_DATA(cda[CTA_EXPECT_ID-1]);
-                       if (exp->id != ntohl(id)) {
-                               ip_conntrack_expect_put(exp);
-                               return -ENOENT;
-                       }
-               }
-
-               /* after list removal, usage count == 1 */
-               ip_conntrack_unexpect_related(exp);
-               /* have to put what we 'get' above.
-                * after this line usage count == 0 */
-               ip_conntrack_expect_put(exp);
-       } else if (cda[CTA_EXPECT_HELP_NAME-1]) {
-               char *name = NFA_DATA(cda[CTA_EXPECT_HELP_NAME-1]);
-
-               /* delete all expectations for this helper */
-               write_lock_bh(&ip_conntrack_lock);
-               h = __ip_conntrack_helper_find_byname(name);
-               if (!h) {
-                       write_unlock_bh(&ip_conntrack_lock);
-                       return -EINVAL;
-               }
-               list_for_each_entry_safe(exp, tmp, &ip_conntrack_expect_list,
-                                        list) {
-                       if (exp->master->helper == h
-                           && del_timer(&exp->timeout)) {
-                               ip_ct_unlink_expect(exp);
-                               ip_conntrack_expect_put(exp);
-                       }
-               }
-               write_unlock_bh(&ip_conntrack_lock);
-       } else {
-               /* This basically means we have to flush everything*/
-               write_lock_bh(&ip_conntrack_lock);
-               list_for_each_entry_safe(exp, tmp, &ip_conntrack_expect_list,
-                                        list) {
-                       if (del_timer(&exp->timeout)) {
-                               ip_ct_unlink_expect(exp);
-                               ip_conntrack_expect_put(exp);
-                       }
-               }
-               write_unlock_bh(&ip_conntrack_lock);
-       }
-
-       return 0;
-}
-static int
-ctnetlink_change_expect(struct ip_conntrack_expect *x, struct nfattr *cda[])
-{
-       return -EOPNOTSUPP;
-}
-
-static int
-ctnetlink_create_expect(struct nfattr *cda[])
-{
-       struct ip_conntrack_tuple tuple, mask, master_tuple;
-       struct ip_conntrack_tuple_hash *h = NULL;
-       struct ip_conntrack_expect *exp;
-       struct ip_conntrack *ct;
-       int err = 0;
-
-       /* caller guarantees that those three CTA_EXPECT_* exist */
-       err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE);
-       if (err < 0)
-               return err;
-       err = ctnetlink_parse_tuple(cda, &mask, CTA_EXPECT_MASK);
-       if (err < 0)
-               return err;
-       err = ctnetlink_parse_tuple(cda, &master_tuple, CTA_EXPECT_MASTER);
-       if (err < 0)
-               return err;
-
-       /* Look for master conntrack of this expectation */
-       h = ip_conntrack_find_get(&master_tuple, NULL);
-       if (!h)
-               return -ENOENT;
-       ct = tuplehash_to_ctrack(h);
-
-       if (!ct->helper) {
-               /* such conntrack hasn't got any helper, abort */
-               err = -EINVAL;
-               goto out;
-       }
-
-       exp = ip_conntrack_expect_alloc(ct);
-       if (!exp) {
-               err = -ENOMEM;
-               goto out;
-       }
-
-       exp->expectfn = NULL;
-       exp->flags = 0;
-       exp->master = ct;
-       memcpy(&exp->tuple, &tuple, sizeof(struct ip_conntrack_tuple));
-       memcpy(&exp->mask, &mask, sizeof(struct ip_conntrack_tuple));
-
-       err = ip_conntrack_expect_related(exp);
-       ip_conntrack_expect_put(exp);
-
-out:
-       ip_conntrack_put(tuplehash_to_ctrack(h));
-       return err;
-}
-
-static int
-ctnetlink_new_expect(struct sock *ctnl, struct sk_buff *skb,
-                    struct nlmsghdr *nlh, struct nfattr *cda[], int *errp)
-{
-       struct ip_conntrack_tuple tuple;
-       struct ip_conntrack_expect *exp;
-       int err = 0;
-
-       if (nfattr_bad_size(cda, CTA_EXPECT_MAX, cta_min_exp))
-               return -EINVAL;
-
-       if (!cda[CTA_EXPECT_TUPLE-1]
-           || !cda[CTA_EXPECT_MASK-1]
-           || !cda[CTA_EXPECT_MASTER-1])
-               return -EINVAL;
-
-       err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE);
-       if (err < 0)
-               return err;
-
-       write_lock_bh(&ip_conntrack_lock);
-       exp = __ip_conntrack_expect_find(&tuple);
-
-       if (!exp) {
-               write_unlock_bh(&ip_conntrack_lock);
-               err = -ENOENT;
-               if (nlh->nlmsg_flags & NLM_F_CREATE)
-                       err = ctnetlink_create_expect(cda);
-               return err;
-       }
-
-       err = -EEXIST;
-       if (!(nlh->nlmsg_flags & NLM_F_EXCL))
-               err = ctnetlink_change_expect(exp, cda);
-       write_unlock_bh(&ip_conntrack_lock);
-
-       return err;
-}
-
-#ifdef CONFIG_IP_NF_CONNTRACK_EVENTS
-static struct notifier_block ctnl_notifier = {
-       .notifier_call  = ctnetlink_conntrack_event,
-};
-
-static struct notifier_block ctnl_notifier_exp = {
-       .notifier_call  = ctnetlink_expect_event,
-};
-#endif
-
-static struct nfnl_callback ctnl_cb[IPCTNL_MSG_MAX] = {
-       [IPCTNL_MSG_CT_NEW]             = { .call = ctnetlink_new_conntrack,
-                                           .attr_count = CTA_MAX, },
-       [IPCTNL_MSG_CT_GET]             = { .call = ctnetlink_get_conntrack,
-                                           .attr_count = CTA_MAX, },
-       [IPCTNL_MSG_CT_DELETE]          = { .call = ctnetlink_del_conntrack,
-                                           .attr_count = CTA_MAX, },
-       [IPCTNL_MSG_CT_GET_CTRZERO]     = { .call = ctnetlink_get_conntrack,
-                                           .attr_count = CTA_MAX, },
-};
-
-static struct nfnl_callback ctnl_exp_cb[IPCTNL_MSG_EXP_MAX] = {
-       [IPCTNL_MSG_EXP_GET]            = { .call = ctnetlink_get_expect,
-                                           .attr_count = CTA_EXPECT_MAX, },
-       [IPCTNL_MSG_EXP_NEW]            = { .call = ctnetlink_new_expect,
-                                           .attr_count = CTA_EXPECT_MAX, },
-       [IPCTNL_MSG_EXP_DELETE]         = { .call = ctnetlink_del_expect,
-                                           .attr_count = CTA_EXPECT_MAX, },
-};
-
-static struct nfnetlink_subsystem ctnl_subsys = {
-       .name                           = "conntrack",
-       .subsys_id                      = NFNL_SUBSYS_CTNETLINK,
-       .cb_count                       = IPCTNL_MSG_MAX,
-       .cb                             = ctnl_cb,
-};
-
-static struct nfnetlink_subsystem ctnl_exp_subsys = {
-       .name                           = "conntrack_expect",
-       .subsys_id                      = NFNL_SUBSYS_CTNETLINK_EXP,
-       .cb_count                       = IPCTNL_MSG_EXP_MAX,
-       .cb                             = ctnl_exp_cb,
-};
-
-MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK);
-MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK_EXP);
-
-static int __init ctnetlink_init(void)
-{
-       int ret;
-
-       printk("ctnetlink v%s: registering with nfnetlink.\n", version);
-       ret = nfnetlink_subsys_register(&ctnl_subsys);
-       if (ret < 0) {
-               printk("ctnetlink_init: cannot register with nfnetlink.\n");
-               goto err_out;
-       }
-
-       ret = nfnetlink_subsys_register(&ctnl_exp_subsys);
-       if (ret < 0) {
-               printk("ctnetlink_init: cannot register exp with nfnetlink.\n");
-               goto err_unreg_subsys;
-       }
-
-#ifdef CONFIG_IP_NF_CONNTRACK_EVENTS
-       ret = ip_conntrack_register_notifier(&ctnl_notifier);
-       if (ret < 0) {
-               printk("ctnetlink_init: cannot register notifier.\n");
-               goto err_unreg_exp_subsys;
-       }
-
-       ret = ip_conntrack_expect_register_notifier(&ctnl_notifier_exp);
-       if (ret < 0) {
-               printk("ctnetlink_init: cannot expect register notifier.\n");
-               goto err_unreg_notifier;
-       }
-#endif
-
-       return 0;
-
-#ifdef CONFIG_IP_NF_CONNTRACK_EVENTS
-err_unreg_notifier:
-       ip_conntrack_unregister_notifier(&ctnl_notifier);
-err_unreg_exp_subsys:
-       nfnetlink_subsys_unregister(&ctnl_exp_subsys);
-#endif
-err_unreg_subsys:
-       nfnetlink_subsys_unregister(&ctnl_subsys);
-err_out:
-       return ret;
-}
-
-static void __exit ctnetlink_exit(void)
-{
-       printk("ctnetlink: unregistering from nfnetlink.\n");
-
-#ifdef CONFIG_IP_NF_CONNTRACK_EVENTS
-       ip_conntrack_expect_unregister_notifier(&ctnl_notifier_exp);
-       ip_conntrack_unregister_notifier(&ctnl_notifier);
-#endif
-
-       nfnetlink_subsys_unregister(&ctnl_exp_subsys);
-       nfnetlink_subsys_unregister(&ctnl_subsys);
-       return;
-}
-
-module_init(ctnetlink_init);
-module_exit(ctnetlink_exit);
diff --git a/net/ipv4/netfilter/ip_conntrack_proto_generic.c b/net/ipv4/netfilter/ip_conntrack_proto_generic.c
deleted file mode 100644 (file)
index 88af82e..0000000
+++ /dev/null
@@ -1,74 +0,0 @@
-/* (C) 1999-2001 Paul `Rusty' Russell
- * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/types.h>
-#include <linux/timer.h>
-#include <linux/netfilter.h>
-#include <linux/netfilter_ipv4/ip_conntrack_protocol.h>
-
-unsigned int ip_ct_generic_timeout __read_mostly = 600*HZ;
-
-static int generic_pkt_to_tuple(const struct sk_buff *skb,
-                               unsigned int dataoff,
-                               struct ip_conntrack_tuple *tuple)
-{
-       tuple->src.u.all = 0;
-       tuple->dst.u.all = 0;
-
-       return 1;
-}
-
-static int generic_invert_tuple(struct ip_conntrack_tuple *tuple,
-                               const struct ip_conntrack_tuple *orig)
-{
-       tuple->src.u.all = 0;
-       tuple->dst.u.all = 0;
-
-       return 1;
-}
-
-/* Print out the per-protocol part of the tuple. */
-static int generic_print_tuple(struct seq_file *s,
-                              const struct ip_conntrack_tuple *tuple)
-{
-       return 0;
-}
-
-/* Print out the private part of the conntrack. */
-static int generic_print_conntrack(struct seq_file *s,
-                                  const struct ip_conntrack *state)
-{
-       return 0;
-}
-
-/* Returns verdict for packet, or -1 for invalid. */
-static int packet(struct ip_conntrack *conntrack,
-                 const struct sk_buff *skb,
-                 enum ip_conntrack_info ctinfo)
-{
-       ip_ct_refresh_acct(conntrack, ctinfo, skb, ip_ct_generic_timeout);
-       return NF_ACCEPT;
-}
-
-/* Called when a new connection for this protocol found. */
-static int new(struct ip_conntrack *conntrack, const struct sk_buff *skb)
-{
-       return 1;
-}
-
-struct ip_conntrack_protocol ip_conntrack_generic_protocol =
-{
-       .proto                  = 0,
-       .name                   = "unknown",
-       .pkt_to_tuple           = generic_pkt_to_tuple,
-       .invert_tuple           = generic_invert_tuple,
-       .print_tuple            = generic_print_tuple,
-       .print_conntrack        = generic_print_conntrack,
-       .packet                 = packet,
-       .new                    = new,
-};
diff --git a/net/ipv4/netfilter/ip_conntrack_proto_gre.c b/net/ipv4/netfilter/ip_conntrack_proto_gre.c
deleted file mode 100644 (file)
index ac1c49e..0000000
+++ /dev/null
@@ -1,328 +0,0 @@
-/*
- * ip_conntrack_proto_gre.c - Version 3.0
- *
- * Connection tracking protocol helper module for GRE.
- *
- * GRE is a generic encapsulation protocol, which is generally not very
- * suited for NAT, as it has no protocol-specific part as port numbers.
- *
- * It has an optional key field, which may help us distinguishing two
- * connections between the same two hosts.
- *
- * GRE is defined in RFC 1701 and RFC 1702, as well as RFC 2784
- *
- * PPTP is built on top of a modified version of GRE, and has a mandatory
- * field called "CallID", which serves us for the same purpose as the key
- * field in plain GRE.
- *
- * Documentation about PPTP can be found in RFC 2637
- *
- * (C) 2000-2005 by Harald Welte <laforge@gnumonks.org>
- *
- * Development of this code funded by Astaro AG (http://www.astaro.com/)
- *
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/timer.h>
-#include <linux/netfilter.h>
-#include <linux/ip.h>
-#include <linux/in.h>
-#include <linux/list.h>
-#include <linux/seq_file.h>
-#include <linux/interrupt.h>
-
-static DEFINE_RWLOCK(ip_ct_gre_lock);
-
-#include <linux/netfilter_ipv4/ip_conntrack_protocol.h>
-#include <linux/netfilter_ipv4/ip_conntrack_helper.h>
-#include <linux/netfilter_ipv4/ip_conntrack_core.h>
-
-#include <linux/netfilter_ipv4/ip_conntrack_proto_gre.h>
-#include <linux/netfilter_ipv4/ip_conntrack_pptp.h>
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Harald Welte <laforge@gnumonks.org>");
-MODULE_DESCRIPTION("netfilter connection tracking protocol helper for GRE");
-
-/* shamelessly stolen from ip_conntrack_proto_udp.c */
-#define GRE_TIMEOUT            (30*HZ)
-#define GRE_STREAM_TIMEOUT     (180*HZ)
-
-#if 0
-#define DEBUGP(format, args...)        printk(KERN_DEBUG "%s:%s: " format, __FILE__, __FUNCTION__, ## args)
-#define DUMP_TUPLE_GRE(x) printk("%u.%u.%u.%u:0x%x -> %u.%u.%u.%u:0x%x\n", \
-                       NIPQUAD((x)->src.ip), ntohs((x)->src.u.gre.key), \
-                       NIPQUAD((x)->dst.ip), ntohs((x)->dst.u.gre.key))
-#else
-#define DEBUGP(x, args...)
-#define DUMP_TUPLE_GRE(x)
-#endif
-
-/* GRE KEYMAP HANDLING FUNCTIONS */
-static LIST_HEAD(gre_keymap_list);
-
-static inline int gre_key_cmpfn(const struct ip_ct_gre_keymap *km,
-                               const struct ip_conntrack_tuple *t)
-{
-       return ((km->tuple.src.ip == t->src.ip) &&
-               (km->tuple.dst.ip == t->dst.ip) &&
-               (km->tuple.dst.protonum == t->dst.protonum) &&
-               (km->tuple.dst.u.all == t->dst.u.all));
-}
-
-/* look up the source key for a given tuple */
-static __be16 gre_keymap_lookup(struct ip_conntrack_tuple *t)
-{
-       struct ip_ct_gre_keymap *km;
-       __be16 key = 0;
-
-       read_lock_bh(&ip_ct_gre_lock);
-       list_for_each_entry(km, &gre_keymap_list, list) {
-               if (gre_key_cmpfn(km, t)) {
-                       key = km->tuple.src.u.gre.key;
-                       break;
-               }
-       }
-       read_unlock_bh(&ip_ct_gre_lock);
-
-       DEBUGP("lookup src key 0x%x up key for ", key);
-       DUMP_TUPLE_GRE(t);
-
-       return key;
-}
-
-/* add a single keymap entry, associate with specified master ct */
-int
-ip_ct_gre_keymap_add(struct ip_conntrack *ct,
-                    struct ip_conntrack_tuple *t, int reply)
-{
-       struct ip_ct_gre_keymap **exist_km, *km;
-
-       if (!ct->helper || strcmp(ct->helper->name, "pptp")) {
-               DEBUGP("refusing to add GRE keymap to non-pptp session\n");
-               return -1;
-       }
-
-       if (!reply)
-               exist_km = &ct->help.ct_pptp_info.keymap_orig;
-       else
-               exist_km = &ct->help.ct_pptp_info.keymap_reply;
-
-       if (*exist_km) {
-               /* check whether it's a retransmission */
-               list_for_each_entry(km, &gre_keymap_list, list) {
-                       if (gre_key_cmpfn(km, t) && km == *exist_km)
-                               return 0;
-               }
-               DEBUGP("trying to override keymap_%s for ct %p\n",
-                       reply? "reply":"orig", ct);
-               return -EEXIST;
-       }
-
-       km = kmalloc(sizeof(*km), GFP_ATOMIC);
-       if (!km)
-               return -ENOMEM;
-
-       memcpy(&km->tuple, t, sizeof(*t));
-       *exist_km = km;
-
-       DEBUGP("adding new entry %p: ", km);
-       DUMP_TUPLE_GRE(&km->tuple);
-
-       write_lock_bh(&ip_ct_gre_lock);
-       list_add_tail(&km->list, &gre_keymap_list);
-       write_unlock_bh(&ip_ct_gre_lock);
-
-       return 0;
-}
-
-/* destroy the keymap entries associated with specified master ct */
-void ip_ct_gre_keymap_destroy(struct ip_conntrack *ct)
-{
-       DEBUGP("entering for ct %p\n", ct);
-
-       if (!ct->helper || strcmp(ct->helper->name, "pptp")) {
-               DEBUGP("refusing to destroy GRE keymap to non-pptp session\n");
-               return;
-       }
-
-       write_lock_bh(&ip_ct_gre_lock);
-       if (ct->help.ct_pptp_info.keymap_orig) {
-               DEBUGP("removing %p from list\n",
-                       ct->help.ct_pptp_info.keymap_orig);
-               list_del(&ct->help.ct_pptp_info.keymap_orig->list);
-               kfree(ct->help.ct_pptp_info.keymap_orig);
-               ct->help.ct_pptp_info.keymap_orig = NULL;
-       }
-       if (ct->help.ct_pptp_info.keymap_reply) {
-               DEBUGP("removing %p from list\n",
-                       ct->help.ct_pptp_info.keymap_reply);
-               list_del(&ct->help.ct_pptp_info.keymap_reply->list);
-               kfree(ct->help.ct_pptp_info.keymap_reply);
-               ct->help.ct_pptp_info.keymap_reply = NULL;
-       }
-       write_unlock_bh(&ip_ct_gre_lock);
-}
-
-
-/* PUBLIC CONNTRACK PROTO HELPER FUNCTIONS */
-
-/* invert gre part of tuple */
-static int gre_invert_tuple(struct ip_conntrack_tuple *tuple,
-                           const struct ip_conntrack_tuple *orig)
-{
-       tuple->dst.u.gre.key = orig->src.u.gre.key;
-       tuple->src.u.gre.key = orig->dst.u.gre.key;
-
-       return 1;
-}
-
-/* gre hdr info to tuple */
-static int gre_pkt_to_tuple(const struct sk_buff *skb,
-                          unsigned int dataoff,
-                          struct ip_conntrack_tuple *tuple)
-{
-       struct gre_hdr_pptp _pgrehdr, *pgrehdr;
-       __be16 srckey;
-       struct gre_hdr _grehdr, *grehdr;
-
-       /* first only delinearize old RFC1701 GRE header */
-       grehdr = skb_header_pointer(skb, dataoff, sizeof(_grehdr), &_grehdr);
-       if (!grehdr || grehdr->version != GRE_VERSION_PPTP) {
-               /* try to behave like "ip_conntrack_proto_generic" */
-               tuple->src.u.all = 0;
-               tuple->dst.u.all = 0;
-               return 1;
-       }
-
-       /* PPTP header is variable length, only need up to the call_id field */
-       pgrehdr = skb_header_pointer(skb, dataoff, 8, &_pgrehdr);
-       if (!pgrehdr)
-               return 1;
-
-       if (ntohs(grehdr->protocol) != GRE_PROTOCOL_PPTP) {
-               DEBUGP("GRE_VERSION_PPTP but unknown proto\n");
-               return 0;
-       }
-
-       tuple->dst.u.gre.key = pgrehdr->call_id;
-       srckey = gre_keymap_lookup(tuple);
-       tuple->src.u.gre.key = srckey;
-
-       return 1;
-}
-
-/* print gre part of tuple */
-static int gre_print_tuple(struct seq_file *s,
-                          const struct ip_conntrack_tuple *tuple)
-{
-       return seq_printf(s, "srckey=0x%x dstkey=0x%x ",
-                         ntohs(tuple->src.u.gre.key),
-                         ntohs(tuple->dst.u.gre.key));
-}
-
-/* print private data for conntrack */
-static int gre_print_conntrack(struct seq_file *s,
-                              const struct ip_conntrack *ct)
-{
-       return seq_printf(s, "timeout=%u, stream_timeout=%u ",
-                         (ct->proto.gre.timeout / HZ),
-                         (ct->proto.gre.stream_timeout / HZ));
-}
-
-/* Returns verdict for packet, and may modify conntrack */
-static int gre_packet(struct ip_conntrack *ct,
-                     const struct sk_buff *skb,
-                     enum ip_conntrack_info conntrackinfo)
-{
-       /* If we've seen traffic both ways, this is a GRE connection.
-        * Extend timeout. */
-       if (ct->status & IPS_SEEN_REPLY) {
-               ip_ct_refresh_acct(ct, conntrackinfo, skb,
-                                  ct->proto.gre.stream_timeout);
-               /* Also, more likely to be important, and not a probe. */
-               set_bit(IPS_ASSURED_BIT, &ct->status);
-               ip_conntrack_event_cache(IPCT_STATUS, skb);
-       } else
-               ip_ct_refresh_acct(ct, conntrackinfo, skb,
-                                  ct->proto.gre.timeout);
-
-       return NF_ACCEPT;
-}
-
-/* Called when a new connection for this protocol found. */
-static int gre_new(struct ip_conntrack *ct,
-                  const struct sk_buff *skb)
-{
-       DEBUGP(": ");
-       DUMP_TUPLE_GRE(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
-
-       /* initialize to sane value.  Ideally a conntrack helper
-        * (e.g. in case of pptp) is increasing them */
-       ct->proto.gre.stream_timeout = GRE_STREAM_TIMEOUT;
-       ct->proto.gre.timeout = GRE_TIMEOUT;
-
-       return 1;
-}
-
-/* Called when a conntrack entry has already been removed from the hashes
- * and is about to be deleted from memory */
-static void gre_destroy(struct ip_conntrack *ct)
-{
-       struct ip_conntrack *master = ct->master;
-       DEBUGP(" entering\n");
-
-       if (!master)
-               DEBUGP("no master !?!\n");
-       else
-               ip_ct_gre_keymap_destroy(master);
-}
-
-/* protocol helper struct */
-static struct ip_conntrack_protocol gre = {
-       .proto           = IPPROTO_GRE,
-       .name            = "gre",
-       .pkt_to_tuple    = gre_pkt_to_tuple,
-       .invert_tuple    = gre_invert_tuple,
-       .print_tuple     = gre_print_tuple,
-       .print_conntrack = gre_print_conntrack,
-       .packet          = gre_packet,
-       .new             = gre_new,
-       .destroy         = gre_destroy,
-       .me              = THIS_MODULE,
-#if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \
-    defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE)
-       .tuple_to_nfattr = ip_ct_port_tuple_to_nfattr,
-       .nfattr_to_tuple = ip_ct_port_nfattr_to_tuple,
-#endif
-};
-
-/* ip_conntrack_proto_gre initialization */
-int __init ip_ct_proto_gre_init(void)
-{
-       return ip_conntrack_protocol_register(&gre);
-}
-
-/* This cannot be __exit, as it is invoked from ip_conntrack_helper_pptp.c's
- * init() code on errors.
- */
-void ip_ct_proto_gre_fini(void)
-{
-       struct list_head *pos, *n;
-
-       /* delete all keymap entries */
-       write_lock_bh(&ip_ct_gre_lock);
-       list_for_each_safe(pos, n, &gre_keymap_list) {
-               DEBUGP("deleting keymap %p at module unload time\n", pos);
-               list_del(pos);
-               kfree(pos);
-       }
-       write_unlock_bh(&ip_ct_gre_lock);
-
-       ip_conntrack_protocol_unregister(&gre);
-}
-
-EXPORT_SYMBOL(ip_ct_gre_keymap_add);
-EXPORT_SYMBOL(ip_ct_gre_keymap_destroy);
diff --git a/net/ipv4/netfilter/ip_conntrack_proto_icmp.c b/net/ipv4/netfilter/ip_conntrack_proto_icmp.c
deleted file mode 100644 (file)
index ad70c81..0000000
+++ /dev/null
@@ -1,315 +0,0 @@
-/* (C) 1999-2001 Paul `Rusty' Russell
- * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/types.h>
-#include <linux/timer.h>
-#include <linux/netfilter.h>
-#include <linux/in.h>
-#include <linux/icmp.h>
-#include <linux/seq_file.h>
-#include <linux/skbuff.h>
-#include <net/ip.h>
-#include <net/checksum.h>
-#include <linux/netfilter_ipv4.h>
-#include <linux/netfilter_ipv4/ip_conntrack.h>
-#include <linux/netfilter_ipv4/ip_conntrack_core.h>
-#include <linux/netfilter_ipv4/ip_conntrack_protocol.h>
-
-unsigned int ip_ct_icmp_timeout __read_mostly = 30*HZ;
-
-#if 0
-#define DEBUGP printk
-#else
-#define DEBUGP(format, args...)
-#endif
-
-static int icmp_pkt_to_tuple(const struct sk_buff *skb,
-                            unsigned int dataoff,
-                            struct ip_conntrack_tuple *tuple)
-{
-       struct icmphdr _hdr, *hp;
-
-       hp = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr);
-       if (hp == NULL)
-               return 0;
-
-       tuple->dst.u.icmp.type = hp->type;
-       tuple->src.u.icmp.id = hp->un.echo.id;
-       tuple->dst.u.icmp.code = hp->code;
-
-       return 1;
-}
-
-/* Add 1; spaces filled with 0. */
-static const u_int8_t invmap[] = {
-       [ICMP_ECHO] = ICMP_ECHOREPLY + 1,
-       [ICMP_ECHOREPLY] = ICMP_ECHO + 1,
-       [ICMP_TIMESTAMP] = ICMP_TIMESTAMPREPLY + 1,
-       [ICMP_TIMESTAMPREPLY] = ICMP_TIMESTAMP + 1,
-       [ICMP_INFO_REQUEST] = ICMP_INFO_REPLY + 1,
-       [ICMP_INFO_REPLY] = ICMP_INFO_REQUEST + 1,
-       [ICMP_ADDRESS] = ICMP_ADDRESSREPLY + 1,
-       [ICMP_ADDRESSREPLY] = ICMP_ADDRESS + 1
-};
-
-static int icmp_invert_tuple(struct ip_conntrack_tuple *tuple,
-                            const struct ip_conntrack_tuple *orig)
-{
-       if (orig->dst.u.icmp.type >= sizeof(invmap)
-           || !invmap[orig->dst.u.icmp.type])
-               return 0;
-
-       tuple->src.u.icmp.id = orig->src.u.icmp.id;
-       tuple->dst.u.icmp.type = invmap[orig->dst.u.icmp.type] - 1;
-       tuple->dst.u.icmp.code = orig->dst.u.icmp.code;
-       return 1;
-}
-
-/* Print out the per-protocol part of the tuple. */
-static int icmp_print_tuple(struct seq_file *s,
-                           const struct ip_conntrack_tuple *tuple)
-{
-       return seq_printf(s, "type=%u code=%u id=%u ",
-                         tuple->dst.u.icmp.type,
-                         tuple->dst.u.icmp.code,
-                         ntohs(tuple->src.u.icmp.id));
-}
-
-/* Print out the private part of the conntrack. */
-static int icmp_print_conntrack(struct seq_file *s,
-                               const struct ip_conntrack *conntrack)
-{
-       return 0;
-}
-
-/* Returns verdict for packet, or -1 for invalid. */
-static int icmp_packet(struct ip_conntrack *ct,
-                      const struct sk_buff *skb,
-                      enum ip_conntrack_info ctinfo)
-{
-       /* Try to delete connection immediately after all replies:
-          won't actually vanish as we still have skb, and del_timer
-          means this will only run once even if count hits zero twice
-          (theoretically possible with SMP) */
-       if (CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY) {
-               if (atomic_dec_and_test(&ct->proto.icmp.count)
-                   && del_timer(&ct->timeout))
-                       ct->timeout.function((unsigned long)ct);
-       } else {
-               atomic_inc(&ct->proto.icmp.count);
-               ip_conntrack_event_cache(IPCT_PROTOINFO_VOLATILE, skb);
-               ip_ct_refresh_acct(ct, ctinfo, skb, ip_ct_icmp_timeout);
-       }
-
-       return NF_ACCEPT;
-}
-
-/* Called when a new connection for this protocol found. */
-static int icmp_new(struct ip_conntrack *conntrack,
-                   const struct sk_buff *skb)
-{
-       static const u_int8_t valid_new[] = {
-               [ICMP_ECHO] = 1,
-               [ICMP_TIMESTAMP] = 1,
-               [ICMP_INFO_REQUEST] = 1,
-               [ICMP_ADDRESS] = 1
-       };
-
-       if (conntrack->tuplehash[0].tuple.dst.u.icmp.type >= sizeof(valid_new)
-           || !valid_new[conntrack->tuplehash[0].tuple.dst.u.icmp.type]) {
-               /* Can't create a new ICMP `conn' with this. */
-               DEBUGP("icmp: can't create new conn with type %u\n",
-                      conntrack->tuplehash[0].tuple.dst.u.icmp.type);
-               DUMP_TUPLE(&conntrack->tuplehash[0].tuple);
-               return 0;
-       }
-       atomic_set(&conntrack->proto.icmp.count, 0);
-       return 1;
-}
-
-static int
-icmp_error_message(struct sk_buff *skb,
-                  enum ip_conntrack_info *ctinfo,
-                  unsigned int hooknum)
-{
-       struct ip_conntrack_tuple innertuple, origtuple;
-       struct {
-               struct icmphdr icmp;
-               struct iphdr ip;
-       } _in, *inside;
-       struct ip_conntrack_protocol *innerproto;
-       struct ip_conntrack_tuple_hash *h;
-       int dataoff;
-
-       IP_NF_ASSERT(skb->nfct == NULL);
-
-       /* Not enough header? */
-       inside = skb_header_pointer(skb, skb->nh.iph->ihl*4, sizeof(_in), &_in);
-       if (inside == NULL)
-               return -NF_ACCEPT;
-
-       /* Ignore ICMP's containing fragments (shouldn't happen) */
-       if (inside->ip.frag_off & htons(IP_OFFSET)) {
-               DEBUGP("icmp_error_track: fragment of proto %u\n",
-                      inside->ip.protocol);
-               return -NF_ACCEPT;
-       }
-
-       innerproto = ip_conntrack_proto_find_get(inside->ip.protocol);
-       dataoff = skb->nh.iph->ihl*4 + sizeof(inside->icmp) + inside->ip.ihl*4;
-       /* Are they talking about one of our connections? */
-       if (!ip_ct_get_tuple(&inside->ip, skb, dataoff, &origtuple, innerproto)) {
-               DEBUGP("icmp_error: ! get_tuple p=%u", inside->ip.protocol);
-               ip_conntrack_proto_put(innerproto);
-               return -NF_ACCEPT;
-       }
-
-       /* Ordinarily, we'd expect the inverted tupleproto, but it's
-          been preserved inside the ICMP. */
-       if (!ip_ct_invert_tuple(&innertuple, &origtuple, innerproto)) {
-               DEBUGP("icmp_error_track: Can't invert tuple\n");
-               ip_conntrack_proto_put(innerproto);
-               return -NF_ACCEPT;
-       }
-       ip_conntrack_proto_put(innerproto);
-
-       *ctinfo = IP_CT_RELATED;
-
-       h = ip_conntrack_find_get(&innertuple, NULL);
-       if (!h) {
-               /* Locally generated ICMPs will match inverted if they
-                  haven't been SNAT'ed yet */
-               /* FIXME: NAT code has to handle half-done double NAT --RR */
-               if (hooknum == NF_IP_LOCAL_OUT)
-                       h = ip_conntrack_find_get(&origtuple, NULL);
-
-               if (!h) {
-                       DEBUGP("icmp_error_track: no match\n");
-                       return -NF_ACCEPT;
-               }
-               /* Reverse direction from that found */
-               if (DIRECTION(h) != IP_CT_DIR_REPLY)
-                       *ctinfo += IP_CT_IS_REPLY;
-       } else {
-               if (DIRECTION(h) == IP_CT_DIR_REPLY)
-                       *ctinfo += IP_CT_IS_REPLY;
-       }
-
-       /* Update skb to refer to this connection */
-       skb->nfct = &tuplehash_to_ctrack(h)->ct_general;
-       skb->nfctinfo = *ctinfo;
-       return -NF_ACCEPT;
-}
-
-/* Small and modified version of icmp_rcv */
-static int
-icmp_error(struct sk_buff *skb, enum ip_conntrack_info *ctinfo,
-          unsigned int hooknum)
-{
-       struct icmphdr _ih, *icmph;
-
-       /* Not enough header? */
-       icmph = skb_header_pointer(skb, skb->nh.iph->ihl*4, sizeof(_ih), &_ih);
-       if (icmph == NULL) {
-               if (LOG_INVALID(IPPROTO_ICMP))
-                       nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL,
-                                     "ip_ct_icmp: short packet ");
-               return -NF_ACCEPT;
-       }
-
-       /* See ip_conntrack_proto_tcp.c */
-       if (ip_conntrack_checksum && hooknum == NF_IP_PRE_ROUTING &&
-           nf_ip_checksum(skb, hooknum, skb->nh.iph->ihl * 4, 0)) {
-               if (LOG_INVALID(IPPROTO_ICMP))
-                       nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL,
-                                     "ip_ct_icmp: bad ICMP checksum ");
-               return -NF_ACCEPT;
-       }
-
-       /*
-        *      18 is the highest 'known' ICMP type. Anything else is a mystery
-        *
-        *      RFC 1122: 3.2.2  Unknown ICMP messages types MUST be silently
-        *                discarded.
-        */
-       if (icmph->type > NR_ICMP_TYPES) {
-               if (LOG_INVALID(IPPROTO_ICMP))
-                       nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL,
-                                     "ip_ct_icmp: invalid ICMP type ");
-               return -NF_ACCEPT;
-       }
-
-       /* Need to track icmp error message? */
-       if (icmph->type != ICMP_DEST_UNREACH
-           && icmph->type != ICMP_SOURCE_QUENCH
-           && icmph->type != ICMP_TIME_EXCEEDED
-           && icmph->type != ICMP_PARAMETERPROB
-           && icmph->type != ICMP_REDIRECT)
-               return NF_ACCEPT;
-
-       return icmp_error_message(skb, ctinfo, hooknum);
-}
-
-#if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \
-    defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE)
-static int icmp_tuple_to_nfattr(struct sk_buff *skb,
-                               const struct ip_conntrack_tuple *t)
-{
-       NFA_PUT(skb, CTA_PROTO_ICMP_ID, sizeof(__be16),
-               &t->src.u.icmp.id);
-       NFA_PUT(skb, CTA_PROTO_ICMP_TYPE, sizeof(u_int8_t),
-               &t->dst.u.icmp.type);
-       NFA_PUT(skb, CTA_PROTO_ICMP_CODE, sizeof(u_int8_t),
-               &t->dst.u.icmp.code);
-
-       return 0;
-
-nfattr_failure:
-       return -1;
-}
-
-static int icmp_nfattr_to_tuple(struct nfattr *tb[],
-                               struct ip_conntrack_tuple *tuple)
-{
-       if (!tb[CTA_PROTO_ICMP_TYPE-1]
-           || !tb[CTA_PROTO_ICMP_CODE-1]
-           || !tb[CTA_PROTO_ICMP_ID-1])
-               return -EINVAL;
-
-       tuple->dst.u.icmp.type =
-                       *(u_int8_t *)NFA_DATA(tb[CTA_PROTO_ICMP_TYPE-1]);
-       tuple->dst.u.icmp.code =
-                       *(u_int8_t *)NFA_DATA(tb[CTA_PROTO_ICMP_CODE-1]);
-       tuple->src.u.icmp.id =
-                       *(__be16 *)NFA_DATA(tb[CTA_PROTO_ICMP_ID-1]);
-
-       if (tuple->dst.u.icmp.type >= sizeof(invmap)
-           || !invmap[tuple->dst.u.icmp.type])
-               return -EINVAL;
-
-       return 0;
-}
-#endif
-
-struct ip_conntrack_protocol ip_conntrack_protocol_icmp =
-{
-       .proto                  = IPPROTO_ICMP,
-       .name                   = "icmp",
-       .pkt_to_tuple           = icmp_pkt_to_tuple,
-       .invert_tuple           = icmp_invert_tuple,
-       .print_tuple            = icmp_print_tuple,
-       .print_conntrack        = icmp_print_conntrack,
-       .packet                 = icmp_packet,
-       .new                    = icmp_new,
-       .error                  = icmp_error,
-#if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \
-    defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE)
-       .tuple_to_nfattr        = icmp_tuple_to_nfattr,
-       .nfattr_to_tuple        = icmp_nfattr_to_tuple,
-#endif
-};
diff --git a/net/ipv4/netfilter/ip_conntrack_proto_sctp.c b/net/ipv4/netfilter/ip_conntrack_proto_sctp.c
deleted file mode 100644 (file)
index e694299..0000000
+++ /dev/null
@@ -1,659 +0,0 @@
-/*
- * Connection tracking protocol helper module for SCTP.
- *
- * SCTP is defined in RFC 2960. References to various sections in this code
- * are to this RFC.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-/*
- * Added support for proc manipulation of timeouts.
- */
-
-#include <linux/types.h>
-#include <linux/timer.h>
-#include <linux/interrupt.h>
-#include <linux/netfilter.h>
-#include <linux/module.h>
-#include <linux/in.h>
-#include <linux/ip.h>
-#include <linux/sctp.h>
-#include <linux/string.h>
-#include <linux/seq_file.h>
-
-#include <linux/netfilter_ipv4/ip_conntrack.h>
-#include <linux/netfilter_ipv4/ip_conntrack_protocol.h>
-
-#if 0
-#define DEBUGP(format, ...) printk(format, ## __VA_ARGS__)
-#else
-#define DEBUGP(format, args...)
-#endif
-
-/* Protects conntrack->proto.sctp */
-static DEFINE_RWLOCK(sctp_lock);
-
-/* FIXME: Examine ipfilter's timeouts and conntrack transitions more
-   closely.  They're more complex. --RR
-
-   And so for me for SCTP :D -Kiran */
-
-static const char *sctp_conntrack_names[] = {
-       "NONE",
-       "CLOSED",
-       "COOKIE_WAIT",
-       "COOKIE_ECHOED",
-       "ESTABLISHED",
-       "SHUTDOWN_SENT",
-       "SHUTDOWN_RECD",
-       "SHUTDOWN_ACK_SENT",
-};
-
-#define SECS  * HZ
-#define MINS  * 60 SECS
-#define HOURS * 60 MINS
-#define DAYS  * 24 HOURS
-
-static unsigned int ip_ct_sctp_timeout_closed __read_mostly           = 10 SECS;
-static unsigned int ip_ct_sctp_timeout_cookie_wait __read_mostly      =  3 SECS;
-static unsigned int ip_ct_sctp_timeout_cookie_echoed __read_mostly    =  3 SECS;
-static unsigned int ip_ct_sctp_timeout_established __read_mostly      =  5 DAYS;
-static unsigned int ip_ct_sctp_timeout_shutdown_sent __read_mostly    = 300 SECS / 1000;
-static unsigned int ip_ct_sctp_timeout_shutdown_recd __read_mostly    = 300 SECS / 1000;
-static unsigned int ip_ct_sctp_timeout_shutdown_ack_sent __read_mostly = 3 SECS;
-
-static const unsigned int * sctp_timeouts[]
-= { NULL,                                  /* SCTP_CONNTRACK_NONE  */
-    &ip_ct_sctp_timeout_closed,                   /* SCTP_CONNTRACK_CLOSED */
-    &ip_ct_sctp_timeout_cookie_wait,       /* SCTP_CONNTRACK_COOKIE_WAIT */
-    &ip_ct_sctp_timeout_cookie_echoed,     /* SCTP_CONNTRACK_COOKIE_ECHOED */
-    &ip_ct_sctp_timeout_established,       /* SCTP_CONNTRACK_ESTABLISHED */
-    &ip_ct_sctp_timeout_shutdown_sent,     /* SCTP_CONNTRACK_SHUTDOWN_SENT */
-    &ip_ct_sctp_timeout_shutdown_recd,     /* SCTP_CONNTRACK_SHUTDOWN_RECD */
-    &ip_ct_sctp_timeout_shutdown_ack_sent  /* SCTP_CONNTRACK_SHUTDOWN_ACK_SENT */
- };
-
-#define sNO SCTP_CONNTRACK_NONE
-#define        sCL SCTP_CONNTRACK_CLOSED
-#define        sCW SCTP_CONNTRACK_COOKIE_WAIT
-#define        sCE SCTP_CONNTRACK_COOKIE_ECHOED
-#define        sES SCTP_CONNTRACK_ESTABLISHED
-#define        sSS SCTP_CONNTRACK_SHUTDOWN_SENT
-#define        sSR SCTP_CONNTRACK_SHUTDOWN_RECD
-#define        sSA SCTP_CONNTRACK_SHUTDOWN_ACK_SENT
-#define        sIV SCTP_CONNTRACK_MAX
-
-/*
-       These are the descriptions of the states:
-
-NOTE: These state names are tantalizingly similar to the states of an
-SCTP endpoint. But the interpretation of the states is a little different,
-considering that these are the states of the connection and not of an end
-point. Please note the subtleties. -Kiran
-
-NONE              - Nothing so far.
-COOKIE WAIT       - We have seen an INIT chunk in the original direction, or also
-                   an INIT_ACK chunk in the reply direction.
-COOKIE ECHOED     - We have seen a COOKIE_ECHO chunk in the original direction.
-ESTABLISHED       - We have seen a COOKIE_ACK in the reply direction.
-SHUTDOWN_SENT     - We have seen a SHUTDOWN chunk in the original direction.
-SHUTDOWN_RECD     - We have seen a SHUTDOWN chunk in the reply directoin.
-SHUTDOWN_ACK_SENT - We have seen a SHUTDOWN_ACK chunk in the direction opposite
-                   to that of the SHUTDOWN chunk.
-CLOSED            - We have seen a SHUTDOWN_COMPLETE chunk in the direction of
-                   the SHUTDOWN chunk. Connection is closed.
-*/
-
-/* TODO
- - I have assumed that the first INIT is in the original direction.
- This messes things when an INIT comes in the reply direction in CLOSED
- state.
- - Check the error type in the reply dir before transitioning from
-cookie echoed to closed.
- - Sec 5.2.4 of RFC 2960
- - Multi Homing support.
-*/
-
-/* SCTP conntrack state transitions */
-static const enum sctp_conntrack sctp_conntracks[2][9][SCTP_CONNTRACK_MAX] = {
-       {
-/*     ORIGINAL        */
-/*                  sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA */
-/* init         */ {sCW, sCW, sCW, sCE, sES, sSS, sSR, sSA},
-/* init_ack     */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA},
-/* abort        */ {sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL},
-/* shutdown     */ {sCL, sCL, sCW, sCE, sSS, sSS, sSR, sSA},
-/* shutdown_ack */ {sSA, sCL, sCW, sCE, sES, sSA, sSA, sSA},
-/* error        */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA},/* Cant have Stale cookie*/
-/* cookie_echo  */ {sCL, sCL, sCE, sCE, sES, sSS, sSR, sSA},/* 5.2.4 - Big TODO */
-/* cookie_ack   */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA},/* Cant come in orig dir */
-/* shutdown_comp*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sCL}
-       },
-       {
-/*     REPLY   */
-/*                  sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA */
-/* init         */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA},/* INIT in sCL Big TODO */
-/* init_ack     */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA},
-/* abort        */ {sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL},
-/* shutdown     */ {sIV, sCL, sCW, sCE, sSR, sSS, sSR, sSA},
-/* shutdown_ack */ {sIV, sCL, sCW, sCE, sES, sSA, sSA, sSA},
-/* error        */ {sIV, sCL, sCW, sCL, sES, sSS, sSR, sSA},
-/* cookie_echo  */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA},/* Cant come in reply dir */
-/* cookie_ack   */ {sIV, sCL, sCW, sES, sES, sSS, sSR, sSA},
-/* shutdown_comp*/ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sCL}
-       }
-};
-
-static int sctp_pkt_to_tuple(const struct sk_buff *skb,
-                            unsigned int dataoff,
-                            struct ip_conntrack_tuple *tuple)
-{
-       sctp_sctphdr_t _hdr, *hp;
-
-       DEBUGP(__FUNCTION__);
-       DEBUGP("\n");
-
-       /* Actually only need first 8 bytes. */
-       hp = skb_header_pointer(skb, dataoff, 8, &_hdr);
-       if (hp == NULL)
-               return 0;
-
-       tuple->src.u.sctp.port = hp->source;
-       tuple->dst.u.sctp.port = hp->dest;
-       return 1;
-}
-
-static int sctp_invert_tuple(struct ip_conntrack_tuple *tuple,
-                            const struct ip_conntrack_tuple *orig)
-{
-       DEBUGP(__FUNCTION__);
-       DEBUGP("\n");
-
-       tuple->src.u.sctp.port = orig->dst.u.sctp.port;
-       tuple->dst.u.sctp.port = orig->src.u.sctp.port;
-       return 1;
-}
-
-/* Print out the per-protocol part of the tuple. */
-static int sctp_print_tuple(struct seq_file *s,
-                           const struct ip_conntrack_tuple *tuple)
-{
-       DEBUGP(__FUNCTION__);
-       DEBUGP("\n");
-
-       return seq_printf(s, "sport=%hu dport=%hu ",
-                         ntohs(tuple->src.u.sctp.port),
-                         ntohs(tuple->dst.u.sctp.port));
-}
-
-/* Print out the private part of the conntrack. */
-static int sctp_print_conntrack(struct seq_file *s,
-                               const struct ip_conntrack *conntrack)
-{
-       enum sctp_conntrack state;
-
-       DEBUGP(__FUNCTION__);
-       DEBUGP("\n");
-
-       read_lock_bh(&sctp_lock);
-       state = conntrack->proto.sctp.state;
-       read_unlock_bh(&sctp_lock);
-
-       return seq_printf(s, "%s ", sctp_conntrack_names[state]);
-}
-
-#define for_each_sctp_chunk(skb, sch, _sch, offset, count)             \
-for (offset = skb->nh.iph->ihl * 4 + sizeof(sctp_sctphdr_t), count = 0;        \
-       offset < skb->len &&                                            \
-       (sch = skb_header_pointer(skb, offset, sizeof(_sch), &_sch));   \
-       offset += (ntohs(sch->length) + 3) & ~3, count++)
-
-/* Some validity checks to make sure the chunks are fine */
-static int do_basic_checks(struct ip_conntrack *conntrack,
-                          const struct sk_buff *skb,
-                          char *map)
-{
-       u_int32_t offset, count;
-       sctp_chunkhdr_t _sch, *sch;
-       int flag;
-
-       DEBUGP(__FUNCTION__);
-       DEBUGP("\n");
-
-       flag = 0;
-
-       for_each_sctp_chunk (skb, sch, _sch, offset, count) {
-               DEBUGP("Chunk Num: %d  Type: %d\n", count, sch->type);
-
-               if (sch->type == SCTP_CID_INIT
-                       || sch->type == SCTP_CID_INIT_ACK
-                       || sch->type == SCTP_CID_SHUTDOWN_COMPLETE) {
-                       flag = 1;
-               }
-
-               /*
-                * Cookie Ack/Echo chunks not the first OR
-                * Init / Init Ack / Shutdown compl chunks not the only chunks
-                * OR zero-length.
-                */
-               if (((sch->type == SCTP_CID_COOKIE_ACK
-                       || sch->type == SCTP_CID_COOKIE_ECHO
-                       || flag)
-                     && count !=0) || !sch->length) {
-                       DEBUGP("Basic checks failed\n");
-                       return 1;
-               }
-
-               if (map) {
-                       set_bit(sch->type, (void *)map);
-               }
-       }
-
-       DEBUGP("Basic checks passed\n");
-       return count == 0;
-}
-
-static int new_state(enum ip_conntrack_dir dir,
-                    enum sctp_conntrack cur_state,
-                    int chunk_type)
-{
-       int i;
-
-       DEBUGP(__FUNCTION__);
-       DEBUGP("\n");
-
-       DEBUGP("Chunk type: %d\n", chunk_type);
-
-       switch (chunk_type) {
-               case SCTP_CID_INIT:
-                       DEBUGP("SCTP_CID_INIT\n");
-                       i = 0; break;
-               case SCTP_CID_INIT_ACK:
-                       DEBUGP("SCTP_CID_INIT_ACK\n");
-                       i = 1; break;
-               case SCTP_CID_ABORT:
-                       DEBUGP("SCTP_CID_ABORT\n");
-                       i = 2; break;
-               case SCTP_CID_SHUTDOWN:
-                       DEBUGP("SCTP_CID_SHUTDOWN\n");
-                       i = 3; break;
-               case SCTP_CID_SHUTDOWN_ACK:
-                       DEBUGP("SCTP_CID_SHUTDOWN_ACK\n");
-                       i = 4; break;
-               case SCTP_CID_ERROR:
-                       DEBUGP("SCTP_CID_ERROR\n");
-                       i = 5; break;
-               case SCTP_CID_COOKIE_ECHO:
-                       DEBUGP("SCTP_CID_COOKIE_ECHO\n");
-                       i = 6; break;
-               case SCTP_CID_COOKIE_ACK:
-                       DEBUGP("SCTP_CID_COOKIE_ACK\n");
-                       i = 7; break;
-               case SCTP_CID_SHUTDOWN_COMPLETE:
-                       DEBUGP("SCTP_CID_SHUTDOWN_COMPLETE\n");
-                       i = 8; break;
-               default:
-                       /* Other chunks like DATA, SACK, HEARTBEAT and
-                       its ACK do not cause a change in state */
-                       DEBUGP("Unknown chunk type, Will stay in %s\n",
-                                               sctp_conntrack_names[cur_state]);
-                       return cur_state;
-       }
-
-       DEBUGP("dir: %d   cur_state: %s  chunk_type: %d  new_state: %s\n",
-                       dir, sctp_conntrack_names[cur_state], chunk_type,
-                       sctp_conntrack_names[sctp_conntracks[dir][i][cur_state]]);
-
-       return sctp_conntracks[dir][i][cur_state];
-}
-
-/* Returns verdict for packet, or -1 for invalid. */
-static int sctp_packet(struct ip_conntrack *conntrack,
-                      const struct sk_buff *skb,
-                      enum ip_conntrack_info ctinfo)
-{
-       enum sctp_conntrack newconntrack, oldsctpstate;
-       struct iphdr *iph = skb->nh.iph;
-       sctp_sctphdr_t _sctph, *sh;
-       sctp_chunkhdr_t _sch, *sch;
-       u_int32_t offset, count;
-       char map[256 / sizeof (char)] = {0};
-
-       DEBUGP(__FUNCTION__);
-       DEBUGP("\n");
-
-       sh = skb_header_pointer(skb, iph->ihl * 4, sizeof(_sctph), &_sctph);
-       if (sh == NULL)
-               return -1;
-
-       if (do_basic_checks(conntrack, skb, map) != 0)
-               return -1;
-
-       /* Check the verification tag (Sec 8.5) */
-       if (!test_bit(SCTP_CID_INIT, (void *)map)
-               && !test_bit(SCTP_CID_SHUTDOWN_COMPLETE, (void *)map)
-               && !test_bit(SCTP_CID_COOKIE_ECHO, (void *)map)
-               && !test_bit(SCTP_CID_ABORT, (void *)map)
-               && !test_bit(SCTP_CID_SHUTDOWN_ACK, (void *)map)
-               && (sh->vtag != conntrack->proto.sctp.vtag[CTINFO2DIR(ctinfo)])) {
-               DEBUGP("Verification tag check failed\n");
-               return -1;
-       }
-
-       oldsctpstate = newconntrack = SCTP_CONNTRACK_MAX;
-       for_each_sctp_chunk (skb, sch, _sch, offset, count) {
-               write_lock_bh(&sctp_lock);
-
-               /* Special cases of Verification tag check (Sec 8.5.1) */
-               if (sch->type == SCTP_CID_INIT) {
-                       /* Sec 8.5.1 (A) */
-                       if (sh->vtag != 0) {
-                               write_unlock_bh(&sctp_lock);
-                               return -1;
-                       }
-               } else if (sch->type == SCTP_CID_ABORT) {
-                       /* Sec 8.5.1 (B) */
-                       if (!(sh->vtag == conntrack->proto.sctp.vtag[CTINFO2DIR(ctinfo)])
-                               && !(sh->vtag == conntrack->proto.sctp.vtag
-                                                       [1 - CTINFO2DIR(ctinfo)])) {
-                               write_unlock_bh(&sctp_lock);
-                               return -1;
-                       }
-               } else if (sch->type == SCTP_CID_SHUTDOWN_COMPLETE) {
-                       /* Sec 8.5.1 (C) */
-                       if (!(sh->vtag == conntrack->proto.sctp.vtag[CTINFO2DIR(ctinfo)])
-                               && !(sh->vtag == conntrack->proto.sctp.vtag
-                                                       [1 - CTINFO2DIR(ctinfo)]
-                                       && (sch->flags & 1))) {
-                               write_unlock_bh(&sctp_lock);
-                               return -1;
-                       }
-               } else if (sch->type == SCTP_CID_COOKIE_ECHO) {
-                       /* Sec 8.5.1 (D) */
-                       if (!(sh->vtag == conntrack->proto.sctp.vtag[CTINFO2DIR(ctinfo)])) {
-                               write_unlock_bh(&sctp_lock);
-                               return -1;
-                       }
-               }
-
-               oldsctpstate = conntrack->proto.sctp.state;
-               newconntrack = new_state(CTINFO2DIR(ctinfo), oldsctpstate, sch->type);
-
-               /* Invalid */
-               if (newconntrack == SCTP_CONNTRACK_MAX) {
-                       DEBUGP("ip_conntrack_sctp: Invalid dir=%i ctype=%u conntrack=%u\n",
-                              CTINFO2DIR(ctinfo), sch->type, oldsctpstate);
-                       write_unlock_bh(&sctp_lock);
-                       return -1;
-               }
-
-               /* If it is an INIT or an INIT ACK note down the vtag */
-               if (sch->type == SCTP_CID_INIT
-                       || sch->type == SCTP_CID_INIT_ACK) {
-                       sctp_inithdr_t _inithdr, *ih;
-
-                       ih = skb_header_pointer(skb, offset + sizeof(sctp_chunkhdr_t),
-                                               sizeof(_inithdr), &_inithdr);
-                       if (ih == NULL) {
-                                       write_unlock_bh(&sctp_lock);
-                                       return -1;
-                       }
-                       DEBUGP("Setting vtag %x for dir %d\n",
-                                       ih->init_tag, !CTINFO2DIR(ctinfo));
-                       conntrack->proto.sctp.vtag[!CTINFO2DIR(ctinfo)] = ih->init_tag;
-               }
-
-               conntrack->proto.sctp.state = newconntrack;
-               if (oldsctpstate != newconntrack)
-                       ip_conntrack_event_cache(IPCT_PROTOINFO, skb);
-               write_unlock_bh(&sctp_lock);
-       }
-
-       ip_ct_refresh_acct(conntrack, ctinfo, skb, *sctp_timeouts[newconntrack]);
-
-       if (oldsctpstate == SCTP_CONNTRACK_COOKIE_ECHOED
-               && CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY
-               && newconntrack == SCTP_CONNTRACK_ESTABLISHED) {
-               DEBUGP("Setting assured bit\n");
-               set_bit(IPS_ASSURED_BIT, &conntrack->status);
-               ip_conntrack_event_cache(IPCT_STATUS, skb);
-       }
-
-       return NF_ACCEPT;
-}
-
-/* Called when a new connection for this protocol found. */
-static int sctp_new(struct ip_conntrack *conntrack,
-                   const struct sk_buff *skb)
-{
-       enum sctp_conntrack newconntrack;
-       struct iphdr *iph = skb->nh.iph;
-       sctp_sctphdr_t _sctph, *sh;
-       sctp_chunkhdr_t _sch, *sch;
-       u_int32_t offset, count;
-       char map[256 / sizeof (char)] = {0};
-
-       DEBUGP(__FUNCTION__);
-       DEBUGP("\n");
-
-       sh = skb_header_pointer(skb, iph->ihl * 4, sizeof(_sctph), &_sctph);
-       if (sh == NULL)
-               return 0;
-
-       if (do_basic_checks(conntrack, skb, map) != 0)
-               return 0;
-
-       /* If an OOTB packet has any of these chunks discard (Sec 8.4) */
-       if ((test_bit (SCTP_CID_ABORT, (void *)map))
-               || (test_bit (SCTP_CID_SHUTDOWN_COMPLETE, (void *)map))
-               || (test_bit (SCTP_CID_COOKIE_ACK, (void *)map))) {
-               return 0;
-       }
-
-       newconntrack = SCTP_CONNTRACK_MAX;
-       for_each_sctp_chunk (skb, sch, _sch, offset, count) {
-               /* Don't need lock here: this conntrack not in circulation yet */
-               newconntrack = new_state (IP_CT_DIR_ORIGINAL,
-                                               SCTP_CONNTRACK_NONE, sch->type);
-
-               /* Invalid: delete conntrack */
-               if (newconntrack == SCTP_CONNTRACK_MAX) {
-                       DEBUGP("ip_conntrack_sctp: invalid new deleting.\n");
-                       return 0;
-               }
-
-               /* Copy the vtag into the state info */
-               if (sch->type == SCTP_CID_INIT) {
-                       if (sh->vtag == 0) {
-                               sctp_inithdr_t _inithdr, *ih;
-
-                               ih = skb_header_pointer(skb, offset + sizeof(sctp_chunkhdr_t),
-                                                       sizeof(_inithdr), &_inithdr);
-                               if (ih == NULL)
-                                       return 0;
-
-                               DEBUGP("Setting vtag %x for new conn\n",
-                                       ih->init_tag);
-
-                               conntrack->proto.sctp.vtag[IP_CT_DIR_REPLY] =
-                                                               ih->init_tag;
-                       } else {
-                               /* Sec 8.5.1 (A) */
-                               return 0;
-                       }
-               }
-               /* If it is a shutdown ack OOTB packet, we expect a return
-                  shutdown complete, otherwise an ABORT Sec 8.4 (5) and (8) */
-               else {
-                       DEBUGP("Setting vtag %x for new conn OOTB\n",
-                               sh->vtag);
-                       conntrack->proto.sctp.vtag[IP_CT_DIR_REPLY] = sh->vtag;
-               }
-
-               conntrack->proto.sctp.state = newconntrack;
-       }
-
-       return 1;
-}
-
-static struct ip_conntrack_protocol ip_conntrack_protocol_sctp = {
-       .proto           = IPPROTO_SCTP,
-       .name            = "sctp",
-       .pkt_to_tuple    = sctp_pkt_to_tuple,
-       .invert_tuple    = sctp_invert_tuple,
-       .print_tuple     = sctp_print_tuple,
-       .print_conntrack = sctp_print_conntrack,
-       .packet          = sctp_packet,
-       .new             = sctp_new,
-       .destroy         = NULL,
-       .me              = THIS_MODULE,
-#if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \
-    defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE)
-       .tuple_to_nfattr = ip_ct_port_tuple_to_nfattr,
-       .nfattr_to_tuple = ip_ct_port_nfattr_to_tuple,
-#endif
-};
-
-#ifdef CONFIG_SYSCTL
-static ctl_table ip_ct_sysctl_table[] = {
-       {
-               .ctl_name       = NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_CLOSED,
-               .procname       = "ip_conntrack_sctp_timeout_closed",
-               .data           = &ip_ct_sctp_timeout_closed,
-               .maxlen         = sizeof(unsigned int),
-               .mode           = 0644,
-               .proc_handler   = &proc_dointvec_jiffies,
-       },
-       {
-               .ctl_name       = NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_WAIT,
-               .procname       = "ip_conntrack_sctp_timeout_cookie_wait",
-               .data           = &ip_ct_sctp_timeout_cookie_wait,
-               .maxlen         = sizeof(unsigned int),
-               .mode           = 0644,
-               .proc_handler   = &proc_dointvec_jiffies,
-       },
-       {
-               .ctl_name       = NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_ECHOED,
-               .procname       = "ip_conntrack_sctp_timeout_cookie_echoed",
-               .data           = &ip_ct_sctp_timeout_cookie_echoed,
-               .maxlen         = sizeof(unsigned int),
-               .mode           = 0644,
-               .proc_handler   = &proc_dointvec_jiffies,
-       },
-       {
-               .ctl_name       = NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_ESTABLISHED,
-               .procname       = "ip_conntrack_sctp_timeout_established",
-               .data           = &ip_ct_sctp_timeout_established,
-               .maxlen         = sizeof(unsigned int),
-               .mode           = 0644,
-               .proc_handler   = &proc_dointvec_jiffies,
-       },
-       {
-               .ctl_name       = NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_SENT,
-               .procname       = "ip_conntrack_sctp_timeout_shutdown_sent",
-               .data           = &ip_ct_sctp_timeout_shutdown_sent,
-               .maxlen         = sizeof(unsigned int),
-               .mode           = 0644,
-               .proc_handler   = &proc_dointvec_jiffies,
-       },
-       {
-               .ctl_name       = NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_RECD,
-               .procname       = "ip_conntrack_sctp_timeout_shutdown_recd",
-               .data           = &ip_ct_sctp_timeout_shutdown_recd,
-               .maxlen         = sizeof(unsigned int),
-               .mode           = 0644,
-               .proc_handler   = &proc_dointvec_jiffies,
-       },
-       {
-               .ctl_name       = NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_ACK_SENT,
-               .procname       = "ip_conntrack_sctp_timeout_shutdown_ack_sent",
-               .data           = &ip_ct_sctp_timeout_shutdown_ack_sent,
-               .maxlen         = sizeof(unsigned int),
-               .mode           = 0644,
-               .proc_handler   = &proc_dointvec_jiffies,
-       },
-       { .ctl_name = 0 }
-};
-
-static ctl_table ip_ct_netfilter_table[] = {
-       {
-               .ctl_name       = NET_IPV4_NETFILTER,
-               .procname       = "netfilter",
-               .mode           = 0555,
-               .child          = ip_ct_sysctl_table,
-       },
-       { .ctl_name = 0 }
-};
-
-static ctl_table ip_ct_ipv4_table[] = {
-       {
-               .ctl_name       = NET_IPV4,
-               .procname       = "ipv4",
-               .mode           = 0555,
-               .child          = ip_ct_netfilter_table,
-       },
-       { .ctl_name = 0 }
-};
-
-static ctl_table ip_ct_net_table[] = {
-       {
-               .ctl_name       = CTL_NET,
-               .procname       = "net",
-               .mode           = 0555,
-               .child          = ip_ct_ipv4_table,
-       },
-       { .ctl_name = 0 }
-};
-
-static struct ctl_table_header *ip_ct_sysctl_header;
-#endif
-
-static int __init ip_conntrack_proto_sctp_init(void)
-{
-       int ret;
-
-       ret = ip_conntrack_protocol_register(&ip_conntrack_protocol_sctp);
-       if (ret) {
-               printk("ip_conntrack_proto_sctp: protocol register failed\n");
-               goto out;
-       }
-
-#ifdef CONFIG_SYSCTL
-       ip_ct_sysctl_header = register_sysctl_table(ip_ct_net_table);
-       if (ip_ct_sysctl_header == NULL) {
-               ret = -ENOMEM;
-               printk("ip_conntrack_proto_sctp: can't register to sysctl.\n");
-               goto cleanup;
-       }
-#endif
-
-       return ret;
-
-#ifdef CONFIG_SYSCTL
- cleanup:
-       ip_conntrack_protocol_unregister(&ip_conntrack_protocol_sctp);
-#endif
- out:
-       DEBUGP("SCTP conntrack module loading %s\n",
-                                       ret ? "failed": "succeeded");
-       return ret;
-}
-
-static void __exit ip_conntrack_proto_sctp_fini(void)
-{
-       ip_conntrack_protocol_unregister(&ip_conntrack_protocol_sctp);
-#ifdef CONFIG_SYSCTL
-       unregister_sysctl_table(ip_ct_sysctl_header);
-#endif
-       DEBUGP("SCTP conntrack module unloaded\n");
-}
-
-module_init(ip_conntrack_proto_sctp_init);
-module_exit(ip_conntrack_proto_sctp_fini);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Kiran Kumar Immidi");
-MODULE_DESCRIPTION("Netfilter connection tracking protocol helper for SCTP");
diff --git a/net/ipv4/netfilter/ip_conntrack_proto_tcp.c b/net/ipv4/netfilter/ip_conntrack_proto_tcp.c
deleted file mode 100644 (file)
index 0a72eab..0000000
+++ /dev/null
@@ -1,1164 +0,0 @@
-/* (C) 1999-2001 Paul `Rusty' Russell
- * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>:
- *     - Real stateful connection tracking
- *     - Modified state transitions table
- *     - Window scaling support added
- *     - SACK support added
- *
- * Willy Tarreau:
- *     - State table bugfixes
- *     - More robust state changes
- *     - Tuning timer parameters
- *
- * version 2.2
- */
-
-#include <linux/types.h>
-#include <linux/timer.h>
-#include <linux/netfilter.h>
-#include <linux/module.h>
-#include <linux/in.h>
-#include <linux/ip.h>
-#include <linux/tcp.h>
-#include <linux/spinlock.h>
-
-#include <net/tcp.h>
-
-#include <linux/netfilter_ipv4.h>
-#include <linux/netfilter_ipv4/ip_conntrack.h>
-#include <linux/netfilter_ipv4/ip_conntrack_protocol.h>
-
-#if 0
-#define DEBUGP printk
-#define DEBUGP_VARS
-#else
-#define DEBUGP(format, args...)
-#endif
-
-/* Protects conntrack->proto.tcp */
-static DEFINE_RWLOCK(tcp_lock);
-
-/* "Be conservative in what you do,
-    be liberal in what you accept from others."
-    If it's non-zero, we mark only out of window RST segments as INVALID. */
-int ip_ct_tcp_be_liberal __read_mostly = 0;
-
-/* If it is set to zero, we disable picking up already established
-   connections. */
-int ip_ct_tcp_loose __read_mostly = 1;
-
-/* Max number of the retransmitted packets without receiving an (acceptable)
-   ACK from the destination. If this number is reached, a shorter timer
-   will be started. */
-int ip_ct_tcp_max_retrans __read_mostly = 3;
-
-  /* FIXME: Examine ipfilter's timeouts and conntrack transitions more
-     closely.  They're more complex. --RR */
-
-static const char *tcp_conntrack_names[] = {
-       "NONE",
-       "SYN_SENT",
-       "SYN_RECV",
-       "ESTABLISHED",
-       "FIN_WAIT",
-       "CLOSE_WAIT",
-       "LAST_ACK",
-       "TIME_WAIT",
-       "CLOSE",
-       "LISTEN"
-};
-
-#define SECS * HZ
-#define MINS * 60 SECS
-#define HOURS * 60 MINS
-#define DAYS * 24 HOURS
-
-unsigned int ip_ct_tcp_timeout_syn_sent __read_mostly =      2 MINS;
-unsigned int ip_ct_tcp_timeout_syn_recv __read_mostly =     60 SECS;
-unsigned int ip_ct_tcp_timeout_established __read_mostly =   5 DAYS;
-unsigned int ip_ct_tcp_timeout_fin_wait __read_mostly =      2 MINS;
-unsigned int ip_ct_tcp_timeout_close_wait __read_mostly =   60 SECS;
-unsigned int ip_ct_tcp_timeout_last_ack __read_mostly =     30 SECS;
-unsigned int ip_ct_tcp_timeout_time_wait __read_mostly =     2 MINS;
-unsigned int ip_ct_tcp_timeout_close __read_mostly =        10 SECS;
-
-/* RFC1122 says the R2 limit should be at least 100 seconds.
-   Linux uses 15 packets as limit, which corresponds
-   to ~13-30min depending on RTO. */
-unsigned int ip_ct_tcp_timeout_max_retrans __read_mostly =   5 MINS;
-
-static const unsigned int * tcp_timeouts[]
-= { NULL,                              /*      TCP_CONNTRACK_NONE */
-    &ip_ct_tcp_timeout_syn_sent,       /*      TCP_CONNTRACK_SYN_SENT, */
-    &ip_ct_tcp_timeout_syn_recv,       /*      TCP_CONNTRACK_SYN_RECV, */
-    &ip_ct_tcp_timeout_established,    /*      TCP_CONNTRACK_ESTABLISHED,      */
-    &ip_ct_tcp_timeout_fin_wait,       /*      TCP_CONNTRACK_FIN_WAIT, */
-    &ip_ct_tcp_timeout_close_wait,     /*      TCP_CONNTRACK_CLOSE_WAIT,       */
-    &ip_ct_tcp_timeout_last_ack,       /*      TCP_CONNTRACK_LAST_ACK, */
-    &ip_ct_tcp_timeout_time_wait,      /*      TCP_CONNTRACK_TIME_WAIT,        */
-    &ip_ct_tcp_timeout_close,          /*      TCP_CONNTRACK_CLOSE,    */
-    NULL,                              /*      TCP_CONNTRACK_LISTEN */
- };
-
-#define sNO TCP_CONNTRACK_NONE
-#define sSS TCP_CONNTRACK_SYN_SENT
-#define sSR TCP_CONNTRACK_SYN_RECV
-#define sES TCP_CONNTRACK_ESTABLISHED
-#define sFW TCP_CONNTRACK_FIN_WAIT
-#define sCW TCP_CONNTRACK_CLOSE_WAIT
-#define sLA TCP_CONNTRACK_LAST_ACK
-#define sTW TCP_CONNTRACK_TIME_WAIT
-#define sCL TCP_CONNTRACK_CLOSE
-#define sLI TCP_CONNTRACK_LISTEN
-#define sIV TCP_CONNTRACK_MAX
-#define sIG TCP_CONNTRACK_IGNORE
-
-/* What TCP flags are set from RST/SYN/FIN/ACK. */
-enum tcp_bit_set {
-       TCP_SYN_SET,
-       TCP_SYNACK_SET,
-       TCP_FIN_SET,
-       TCP_ACK_SET,
-       TCP_RST_SET,
-       TCP_NONE_SET,
-};
-
-/*
- * The TCP state transition table needs a few words...
- *
- * We are the man in the middle. All the packets go through us
- * but might get lost in transit to the destination.
- * It is assumed that the destinations can't receive segments
- * we haven't seen.
- *
- * The checked segment is in window, but our windows are *not*
- * equivalent with the ones of the sender/receiver. We always
- * try to guess the state of the current sender.
- *
- * The meaning of the states are:
- *
- * NONE:       initial state
- * SYN_SENT:   SYN-only packet seen
- * SYN_RECV:   SYN-ACK packet seen
- * ESTABLISHED:        ACK packet seen
- * FIN_WAIT:   FIN packet seen
- * CLOSE_WAIT: ACK seen (after FIN)
- * LAST_ACK:   FIN seen (after FIN)
- * TIME_WAIT:  last ACK seen
- * CLOSE:      closed connection
- *
- * LISTEN state is not used.
- *
- * Packets marked as IGNORED (sIG):
- *     if they may be either invalid or valid
- *     and the receiver may send back a connection
- *     closing RST or a SYN/ACK.
- *
- * Packets marked as INVALID (sIV):
- *     if they are invalid
- *     or we do not support the request (simultaneous open)
- */
-static const enum tcp_conntrack tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = {
-       {
-/* ORIGINAL */
-/*          sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sLI   */
-/*syn*/           { sSS, sSS, sIG, sIG, sIG, sIG, sIG, sSS, sSS, sIV },
-/*
- *     sNO -> sSS      Initialize a new connection
- *     sSS -> sSS      Retransmitted SYN
- *     sSR -> sIG      Late retransmitted SYN?
- *     sES -> sIG      Error: SYNs in window outside the SYN_SENT state
- *                     are errors. Receiver will reply with RST
- *                     and close the connection.
- *                     Or we are not in sync and hold a dead connection.
- *     sFW -> sIG
- *     sCW -> sIG
- *     sLA -> sIG
- *     sTW -> sSS      Reopened connection (RFC 1122).
- *     sCL -> sSS
- */
-/*          sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sLI   */
-/*synack*/ { sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV },
-/*
- * A SYN/ACK from the client is always invalid:
- *     - either it tries to set up a simultaneous open, which is
- *       not supported;
- *     - or the firewall has just been inserted between the two hosts
- *       during the session set-up. The SYN will be retransmitted
- *       by the true client (or it'll time out).
- */
-/*          sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sLI   */
-/*fin*/    { sIV, sIV, sFW, sFW, sLA, sLA, sLA, sTW, sCL, sIV },
-/*
- *     sNO -> sIV      Too late and no reason to do anything...
- *     sSS -> sIV      Client migth not send FIN in this state:
- *                     we enforce waiting for a SYN/ACK reply first.
- *     sSR -> sFW      Close started.
- *     sES -> sFW
- *     sFW -> sLA      FIN seen in both directions, waiting for
- *                     the last ACK.
- *                     Migth be a retransmitted FIN as well...
- *     sCW -> sLA
- *     sLA -> sLA      Retransmitted FIN. Remain in the same state.
- *     sTW -> sTW
- *     sCL -> sCL
- */
-/*          sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sLI   */
-/*ack*/           { sES, sIV, sES, sES, sCW, sCW, sTW, sTW, sCL, sIV },
-/*
- *     sNO -> sES      Assumed.
- *     sSS -> sIV      ACK is invalid: we haven't seen a SYN/ACK yet.
- *     sSR -> sES      Established state is reached.
- *     sES -> sES      :-)
- *     sFW -> sCW      Normal close request answered by ACK.
- *     sCW -> sCW
- *     sLA -> sTW      Last ACK detected.
- *     sTW -> sTW      Retransmitted last ACK. Remain in the same state.
- *     sCL -> sCL
- */
-/*          sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sLI   */
-/*rst*/    { sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sIV },
-/*none*/   { sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV }
-       },
-       {
-/* REPLY */
-/*          sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sLI   */
-/*syn*/           { sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV },
-/*
- *     sNO -> sIV      Never reached.
- *     sSS -> sIV      Simultaneous open, not supported
- *     sSR -> sIV      Simultaneous open, not supported.
- *     sES -> sIV      Server may not initiate a connection.
- *     sFW -> sIV
- *     sCW -> sIV
- *     sLA -> sIV
- *     sTW -> sIV      Reopened connection, but server may not do it.
- *     sCL -> sIV
- */
-/*          sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sLI   */
-/*synack*/ { sIV, sSR, sSR, sIG, sIG, sIG, sIG, sIG, sIG, sIV },
-/*
- *     sSS -> sSR      Standard open.
- *     sSR -> sSR      Retransmitted SYN/ACK.
- *     sES -> sIG      Late retransmitted SYN/ACK?
- *     sFW -> sIG      Might be SYN/ACK answering ignored SYN
- *     sCW -> sIG
- *     sLA -> sIG
- *     sTW -> sIG
- *     sCL -> sIG
- */
-/*          sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sLI   */
-/*fin*/    { sIV, sIV, sFW, sFW, sLA, sLA, sLA, sTW, sCL, sIV },
-/*
- *     sSS -> sIV      Server might not send FIN in this state.
- *     sSR -> sFW      Close started.
- *     sES -> sFW
- *     sFW -> sLA      FIN seen in both directions.
- *     sCW -> sLA
- *     sLA -> sLA      Retransmitted FIN.
- *     sTW -> sTW
- *     sCL -> sCL
- */
-/*          sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sLI   */
-/*ack*/           { sIV, sIG, sSR, sES, sCW, sCW, sTW, sTW, sCL, sIV },
-/*
- *     sSS -> sIG      Might be a half-open connection.
- *     sSR -> sSR      Might answer late resent SYN.
- *     sES -> sES      :-)
- *     sFW -> sCW      Normal close request answered by ACK.
- *     sCW -> sCW
- *     sLA -> sTW      Last ACK detected.
- *     sTW -> sTW      Retransmitted last ACK.
- *     sCL -> sCL
- */
-/*          sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sLI   */
-/*rst*/    { sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sIV },
-/*none*/   { sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV }
-       }
-};
-
-static int tcp_pkt_to_tuple(const struct sk_buff *skb,
-                           unsigned int dataoff,
-                           struct ip_conntrack_tuple *tuple)
-{
-       struct tcphdr _hdr, *hp;
-
-       /* Actually only need first 8 bytes. */
-       hp = skb_header_pointer(skb, dataoff, 8, &_hdr);
-       if (hp == NULL)
-               return 0;
-
-       tuple->src.u.tcp.port = hp->source;
-       tuple->dst.u.tcp.port = hp->dest;
-
-       return 1;
-}
-
-static int tcp_invert_tuple(struct ip_conntrack_tuple *tuple,
-                           const struct ip_conntrack_tuple *orig)
-{
-       tuple->src.u.tcp.port = orig->dst.u.tcp.port;
-       tuple->dst.u.tcp.port = orig->src.u.tcp.port;
-       return 1;
-}
-
-/* Print out the per-protocol part of the tuple. */
-static int tcp_print_tuple(struct seq_file *s,
-                          const struct ip_conntrack_tuple *tuple)
-{
-       return seq_printf(s, "sport=%hu dport=%hu ",
-                         ntohs(tuple->src.u.tcp.port),
-                         ntohs(tuple->dst.u.tcp.port));
-}
-
-/* Print out the private part of the conntrack. */
-static int tcp_print_conntrack(struct seq_file *s,
-                              const struct ip_conntrack *conntrack)
-{
-       enum tcp_conntrack state;
-
-       read_lock_bh(&tcp_lock);
-       state = conntrack->proto.tcp.state;
-       read_unlock_bh(&tcp_lock);
-
-       return seq_printf(s, "%s ", tcp_conntrack_names[state]);
-}
-
-#if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \
-    defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE)
-static int tcp_to_nfattr(struct sk_buff *skb, struct nfattr *nfa,
-                        const struct ip_conntrack *ct)
-{
-       struct nfattr *nest_parms;
-
-       read_lock_bh(&tcp_lock);
-       nest_parms = NFA_NEST(skb, CTA_PROTOINFO_TCP);
-       NFA_PUT(skb, CTA_PROTOINFO_TCP_STATE, sizeof(u_int8_t),
-               &ct->proto.tcp.state);
-       read_unlock_bh(&tcp_lock);
-
-       NFA_NEST_END(skb, nest_parms);
-
-       return 0;
-
-nfattr_failure:
-       read_unlock_bh(&tcp_lock);
-       return -1;
-}
-
-static const size_t cta_min_tcp[CTA_PROTOINFO_TCP_MAX] = {
-       [CTA_PROTOINFO_TCP_STATE-1]     = sizeof(u_int8_t),
-};
-
-static int nfattr_to_tcp(struct nfattr *cda[], struct ip_conntrack *ct)
-{
-       struct nfattr *attr = cda[CTA_PROTOINFO_TCP-1];
-       struct nfattr *tb[CTA_PROTOINFO_TCP_MAX];
-
-       /* updates could not contain anything about the private
-        * protocol info, in that case skip the parsing */
-       if (!attr)
-               return 0;
-
-       nfattr_parse_nested(tb, CTA_PROTOINFO_TCP_MAX, attr);
-
-       if (nfattr_bad_size(tb, CTA_PROTOINFO_TCP_MAX, cta_min_tcp))
-               return -EINVAL;
-
-       if (!tb[CTA_PROTOINFO_TCP_STATE-1])
-               return -EINVAL;
-
-       write_lock_bh(&tcp_lock);
-       ct->proto.tcp.state =
-               *(u_int8_t *)NFA_DATA(tb[CTA_PROTOINFO_TCP_STATE-1]);
-       write_unlock_bh(&tcp_lock);
-
-       return 0;
-}
-#endif
-
-static unsigned int get_conntrack_index(const struct tcphdr *tcph)
-{
-       if (tcph->rst) return TCP_RST_SET;
-       else if (tcph->syn) return (tcph->ack ? TCP_SYNACK_SET : TCP_SYN_SET);
-       else if (tcph->fin) return TCP_FIN_SET;
-       else if (tcph->ack) return TCP_ACK_SET;
-       else return TCP_NONE_SET;
-}
-
-/* TCP connection tracking based on 'Real Stateful TCP Packet Filtering
-   in IP Filter' by Guido van Rooij.
-
-   http://www.nluug.nl/events/sane2000/papers.html
-   http://www.iae.nl/users/guido/papers/tcp_filtering.ps.gz
-
-   The boundaries and the conditions are changed according to RFC793:
-   the packet must intersect the window (i.e. segments may be
-   after the right or before the left edge) and thus receivers may ACK
-   segments after the right edge of the window.
-
-       td_maxend = max(sack + max(win,1)) seen in reply packets
-       td_maxwin = max(max(win, 1)) + (sack - ack) seen in sent packets
-       td_maxwin += seq + len - sender.td_maxend
-                       if seq + len > sender.td_maxend
-       td_end    = max(seq + len) seen in sent packets
-
-   I.   Upper bound for valid data:    seq <= sender.td_maxend
-   II.  Lower bound for valid data:    seq + len >= sender.td_end - receiver.td_maxwin
-   III.        Upper bound for valid ack:      sack <= receiver.td_end
-   IV. Lower bound for valid ack:      ack >= receiver.td_end - MAXACKWINDOW
-
-   where sack is the highest right edge of sack block found in the packet.
-
-   The upper bound limit for a valid ack is not ignored -
-   we doesn't have to deal with fragments.
-*/
-
-static inline __u32 segment_seq_plus_len(__u32 seq,
-                                        size_t len,
-                                        struct iphdr *iph,
-                                        struct tcphdr *tcph)
-{
-       return (seq + len - (iph->ihl + tcph->doff)*4
-               + (tcph->syn ? 1 : 0) + (tcph->fin ? 1 : 0));
-}
-
-/* Fixme: what about big packets? */
-#define MAXACKWINCONST                 66000
-#define MAXACKWINDOW(sender)                                           \
-       ((sender)->td_maxwin > MAXACKWINCONST ? (sender)->td_maxwin     \
-                                             : MAXACKWINCONST)
-
-/*
- * Simplified tcp_parse_options routine from tcp_input.c
- */
-static void tcp_options(const struct sk_buff *skb,
-                       struct iphdr *iph,
-                       struct tcphdr *tcph,
-                       struct ip_ct_tcp_state *state)
-{
-       unsigned char buff[(15 * 4) - sizeof(struct tcphdr)];
-       unsigned char *ptr;
-       int length = (tcph->doff*4) - sizeof(struct tcphdr);
-
-       if (!length)
-               return;
-
-       ptr = skb_header_pointer(skb,
-                                (iph->ihl * 4) + sizeof(struct tcphdr),
-                                length, buff);
-       BUG_ON(ptr == NULL);
-
-       state->td_scale =
-       state->flags = 0;
-
-       while (length > 0) {
-               int opcode=*ptr++;
-               int opsize;
-
-               switch (opcode) {
-               case TCPOPT_EOL:
-                       return;
-               case TCPOPT_NOP:        /* Ref: RFC 793 section 3.1 */
-                       length--;
-                       continue;
-               default:
-                       opsize=*ptr++;
-                       if (opsize < 2) /* "silly options" */
-                               return;
-                       if (opsize > length)
-                               break;  /* don't parse partial options */
-
-                       if (opcode == TCPOPT_SACK_PERM
-                           && opsize == TCPOLEN_SACK_PERM)
-                               state->flags |= IP_CT_TCP_FLAG_SACK_PERM;
-                       else if (opcode == TCPOPT_WINDOW
-                                && opsize == TCPOLEN_WINDOW) {
-                               state->td_scale = *(u_int8_t *)ptr;
-
-                               if (state->td_scale > 14) {
-                                       /* See RFC1323 */
-                                       state->td_scale = 14;
-                               }
-                               state->flags |=
-                                       IP_CT_TCP_FLAG_WINDOW_SCALE;
-                       }
-                       ptr += opsize - 2;
-                       length -= opsize;
-               }
-       }
-}
-
-static void tcp_sack(const struct sk_buff *skb,
-                    struct iphdr *iph,
-                    struct tcphdr *tcph,
-                    __u32 *sack)
-{
-       unsigned char buff[(15 * 4) - sizeof(struct tcphdr)];
-       unsigned char *ptr;
-       int length = (tcph->doff*4) - sizeof(struct tcphdr);
-       __u32 tmp;
-
-       if (!length)
-               return;
-
-       ptr = skb_header_pointer(skb,
-                                (iph->ihl * 4) + sizeof(struct tcphdr),
-                                length, buff);
-       BUG_ON(ptr == NULL);
-
-       /* Fast path for timestamp-only option */
-       if (length == TCPOLEN_TSTAMP_ALIGNED*4
-           && *(__be32 *)ptr ==
-               __constant_htonl((TCPOPT_NOP << 24)
-                                | (TCPOPT_NOP << 16)
-                                | (TCPOPT_TIMESTAMP << 8)
-                                | TCPOLEN_TIMESTAMP))
-               return;
-
-       while (length > 0) {
-               int opcode=*ptr++;
-               int opsize, i;
-
-               switch (opcode) {
-               case TCPOPT_EOL:
-                       return;
-               case TCPOPT_NOP:        /* Ref: RFC 793 section 3.1 */
-                       length--;
-                       continue;
-               default:
-                       opsize=*ptr++;
-                       if (opsize < 2) /* "silly options" */
-                               return;
-                       if (opsize > length)
-                               break;  /* don't parse partial options */
-
-                       if (opcode == TCPOPT_SACK
-                           && opsize >= (TCPOLEN_SACK_BASE
-                                         + TCPOLEN_SACK_PERBLOCK)
-                           && !((opsize - TCPOLEN_SACK_BASE)
-                                % TCPOLEN_SACK_PERBLOCK)) {
-                               for (i = 0;
-                                    i < (opsize - TCPOLEN_SACK_BASE);
-                                    i += TCPOLEN_SACK_PERBLOCK) {
-                                       tmp = ntohl(*((__be32 *)(ptr+i)+1));
-
-                                       if (after(tmp, *sack))
-                                               *sack = tmp;
-                               }
-                               return;
-                       }
-                       ptr += opsize - 2;
-                       length -= opsize;
-               }
-       }
-}
-
-static int tcp_in_window(struct ip_ct_tcp *state,
-                        enum ip_conntrack_dir dir,
-                        unsigned int index,
-                        const struct sk_buff *skb,
-                        struct iphdr *iph,
-                        struct tcphdr *tcph)
-{
-       struct ip_ct_tcp_state *sender = &state->seen[dir];
-       struct ip_ct_tcp_state *receiver = &state->seen[!dir];
-       __u32 seq, ack, sack, end, win, swin;
-       int res;
-
-       /*
-        * Get the required data from the packet.
-        */
-       seq = ntohl(tcph->seq);
-       ack = sack = ntohl(tcph->ack_seq);
-       win = ntohs(tcph->window);
-       end = segment_seq_plus_len(seq, skb->len, iph, tcph);
-
-       if (receiver->flags & IP_CT_TCP_FLAG_SACK_PERM)
-               tcp_sack(skb, iph, tcph, &sack);
-
-       DEBUGP("tcp_in_window: START\n");
-       DEBUGP("tcp_in_window: src=%u.%u.%u.%u:%hu dst=%u.%u.%u.%u:%hu "
-              "seq=%u ack=%u sack=%u win=%u end=%u\n",
-               NIPQUAD(iph->saddr), ntohs(tcph->source),
-               NIPQUAD(iph->daddr), ntohs(tcph->dest),
-               seq, ack, sack, win, end);
-       DEBUGP("tcp_in_window: sender end=%u maxend=%u maxwin=%u scale=%i "
-              "receiver end=%u maxend=%u maxwin=%u scale=%i\n",
-               sender->td_end, sender->td_maxend, sender->td_maxwin,
-               sender->td_scale,
-               receiver->td_end, receiver->td_maxend, receiver->td_maxwin,
-               receiver->td_scale);
-
-       if (sender->td_end == 0) {
-               /*
-                * Initialize sender data.
-                */
-               if (tcph->syn && tcph->ack) {
-                       /*
-                        * Outgoing SYN-ACK in reply to a SYN.
-                        */
-                       sender->td_end =
-                       sender->td_maxend = end;
-                       sender->td_maxwin = (win == 0 ? 1 : win);
-
-                       tcp_options(skb, iph, tcph, sender);
-                       /*
-                        * RFC 1323:
-                        * Both sides must send the Window Scale option
-                        * to enable window scaling in either direction.
-                        */
-                       if (!(sender->flags & IP_CT_TCP_FLAG_WINDOW_SCALE
-                             && receiver->flags & IP_CT_TCP_FLAG_WINDOW_SCALE))
-                               sender->td_scale =
-                               receiver->td_scale = 0;
-               } else {
-                       /*
-                        * We are in the middle of a connection,
-                        * its history is lost for us.
-                        * Let's try to use the data from the packet.
-                        */
-                       sender->td_end = end;
-                       sender->td_maxwin = (win == 0 ? 1 : win);
-                       sender->td_maxend = end + sender->td_maxwin;
-               }
-       } else if (((state->state == TCP_CONNTRACK_SYN_SENT
-                    && dir == IP_CT_DIR_ORIGINAL)
-                   || (state->state == TCP_CONNTRACK_SYN_RECV
-                       && dir == IP_CT_DIR_REPLY))
-                   && after(end, sender->td_end)) {
-               /*
-                * RFC 793: "if a TCP is reinitialized ... then it need
-                * not wait at all; it must only be sure to use sequence
-                * numbers larger than those recently used."
-                */
-               sender->td_end =
-               sender->td_maxend = end;
-               sender->td_maxwin = (win == 0 ? 1 : win);
-
-               tcp_options(skb, iph, tcph, sender);
-       }
-
-       if (!(tcph->ack)) {
-               /*
-                * If there is no ACK, just pretend it was set and OK.
-                */
-               ack = sack = receiver->td_end;
-       } else if (((tcp_flag_word(tcph) & (TCP_FLAG_ACK|TCP_FLAG_RST)) ==
-                   (TCP_FLAG_ACK|TCP_FLAG_RST))
-                  && (ack == 0)) {
-               /*
-                * Broken TCP stacks, that set ACK in RST packets as well
-                * with zero ack value.
-                */
-               ack = sack = receiver->td_end;
-       }
-
-       if (seq == end
-           && (!tcph->rst
-               || (seq == 0 && state->state == TCP_CONNTRACK_SYN_SENT)))
-               /*
-                * Packets contains no data: we assume it is valid
-                * and check the ack value only.
-                * However RST segments are always validated by their
-                * SEQ number, except when seq == 0 (reset sent answering
-                * SYN.
-                */
-               seq = end = sender->td_end;
-
-       DEBUGP("tcp_in_window: src=%u.%u.%u.%u:%hu dst=%u.%u.%u.%u:%hu "
-              "seq=%u ack=%u sack =%u win=%u end=%u\n",
-               NIPQUAD(iph->saddr), ntohs(tcph->source),
-               NIPQUAD(iph->daddr), ntohs(tcph->dest),
-               seq, ack, sack, win, end);
-       DEBUGP("tcp_in_window: sender end=%u maxend=%u maxwin=%u scale=%i "
-              "receiver end=%u maxend=%u maxwin=%u scale=%i\n",
-               sender->td_end, sender->td_maxend, sender->td_maxwin,
-               sender->td_scale,
-               receiver->td_end, receiver->td_maxend, receiver->td_maxwin,
-               receiver->td_scale);
-
-       DEBUGP("tcp_in_window: I=%i II=%i III=%i IV=%i\n",
-               before(seq, sender->td_maxend + 1),
-               after(end, sender->td_end - receiver->td_maxwin - 1),
-               before(sack, receiver->td_end + 1),
-               after(ack, receiver->td_end - MAXACKWINDOW(sender)));
-
-       if (before(seq, sender->td_maxend + 1) &&
-           after(end, sender->td_end - receiver->td_maxwin - 1) &&
-           before(sack, receiver->td_end + 1) &&
-           after(ack, receiver->td_end - MAXACKWINDOW(sender))) {
-               /*
-                * Take into account window scaling (RFC 1323).
-                */
-               if (!tcph->syn)
-                       win <<= sender->td_scale;
-
-               /*
-                * Update sender data.
-                */
-               swin = win + (sack - ack);
-               if (sender->td_maxwin < swin)
-                       sender->td_maxwin = swin;
-               if (after(end, sender->td_end))
-                       sender->td_end = end;
-               /*
-                * Update receiver data.
-                */
-               if (after(end, sender->td_maxend))
-                       receiver->td_maxwin += end - sender->td_maxend;
-               if (after(sack + win, receiver->td_maxend - 1)) {
-                       receiver->td_maxend = sack + win;
-                       if (win == 0)
-                               receiver->td_maxend++;
-               }
-
-               /*
-                * Check retransmissions.
-                */
-               if (index == TCP_ACK_SET) {
-                       if (state->last_dir == dir
-                           && state->last_seq == seq
-                           && state->last_ack == ack
-                           && state->last_end == end
-                           && state->last_win == win)
-                               state->retrans++;
-                       else {
-                               state->last_dir = dir;
-                               state->last_seq = seq;
-                               state->last_ack = ack;
-                               state->last_end = end;
-                               state->last_win = win;
-                               state->retrans = 0;
-                       }
-               }
-               res = 1;
-       } else {
-               res = 0;
-               if (sender->flags & IP_CT_TCP_FLAG_BE_LIBERAL ||
-                   ip_ct_tcp_be_liberal)
-                       res = 1;
-               if (!res && LOG_INVALID(IPPROTO_TCP))
-                       nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL,
-                       "ip_ct_tcp: %s ",
-                       before(seq, sender->td_maxend + 1) ?
-                       after(end, sender->td_end - receiver->td_maxwin - 1) ?
-                       before(sack, receiver->td_end + 1) ?
-                       after(ack, receiver->td_end - MAXACKWINDOW(sender)) ? "BUG"
-                       : "ACK is under the lower bound (possible overly delayed ACK)"
-                       : "ACK is over the upper bound (ACKed data not seen yet)"
-                       : "SEQ is under the lower bound (already ACKed data retransmitted)"
-                       : "SEQ is over the upper bound (over the window of the receiver)");
-       }
-
-       DEBUGP("tcp_in_window: res=%i sender end=%u maxend=%u maxwin=%u "
-              "receiver end=%u maxend=%u maxwin=%u\n",
-               res, sender->td_end, sender->td_maxend, sender->td_maxwin,
-               receiver->td_end, receiver->td_maxend, receiver->td_maxwin);
-
-       return res;
-}
-
-#ifdef CONFIG_IP_NF_NAT_NEEDED
-/* Update sender->td_end after NAT successfully mangled the packet */
-void ip_conntrack_tcp_update(struct sk_buff *skb,
-                            struct ip_conntrack *conntrack,
-                            enum ip_conntrack_dir dir)
-{
-       struct iphdr *iph = skb->nh.iph;
-       struct tcphdr *tcph = (void *)skb->nh.iph + skb->nh.iph->ihl*4;
-       __u32 end;
-#ifdef DEBUGP_VARS
-       struct ip_ct_tcp_state *sender = &conntrack->proto.tcp.seen[dir];
-       struct ip_ct_tcp_state *receiver = &conntrack->proto.tcp.seen[!dir];
-#endif
-
-       end = segment_seq_plus_len(ntohl(tcph->seq), skb->len, iph, tcph);
-
-       write_lock_bh(&tcp_lock);
-       /*
-        * We have to worry for the ack in the reply packet only...
-        */
-       if (after(end, conntrack->proto.tcp.seen[dir].td_end))
-               conntrack->proto.tcp.seen[dir].td_end = end;
-       conntrack->proto.tcp.last_end = end;
-       write_unlock_bh(&tcp_lock);
-       DEBUGP("tcp_update: sender end=%u maxend=%u maxwin=%u scale=%i "
-              "receiver end=%u maxend=%u maxwin=%u scale=%i\n",
-               sender->td_end, sender->td_maxend, sender->td_maxwin,
-               sender->td_scale,
-               receiver->td_end, receiver->td_maxend, receiver->td_maxwin,
-               receiver->td_scale);
-}
-
-#endif
-
-#define        TH_FIN  0x01
-#define        TH_SYN  0x02
-#define        TH_RST  0x04
-#define        TH_PUSH 0x08
-#define        TH_ACK  0x10
-#define        TH_URG  0x20
-#define        TH_ECE  0x40
-#define        TH_CWR  0x80
-
-/* table of valid flag combinations - ECE and CWR are always valid */
-static const u8 tcp_valid_flags[(TH_FIN|TH_SYN|TH_RST|TH_PUSH|TH_ACK|TH_URG) + 1] =
-{
-       [TH_SYN]                        = 1,
-       [TH_SYN|TH_PUSH]                = 1,
-       [TH_SYN|TH_URG]                 = 1,
-       [TH_SYN|TH_PUSH|TH_URG]         = 1,
-       [TH_SYN|TH_ACK]                 = 1,
-       [TH_SYN|TH_ACK|TH_PUSH]         = 1,
-       [TH_RST]                        = 1,
-       [TH_RST|TH_ACK]                 = 1,
-       [TH_RST|TH_ACK|TH_PUSH]         = 1,
-       [TH_FIN|TH_ACK]                 = 1,
-       [TH_ACK]                        = 1,
-       [TH_ACK|TH_PUSH]                = 1,
-       [TH_ACK|TH_URG]                 = 1,
-       [TH_ACK|TH_URG|TH_PUSH]         = 1,
-       [TH_FIN|TH_ACK|TH_PUSH]         = 1,
-       [TH_FIN|TH_ACK|TH_URG]          = 1,
-       [TH_FIN|TH_ACK|TH_URG|TH_PUSH]  = 1,
-};
-
-/* Protect conntrack agaist broken packets. Code taken from ipt_unclean.c.  */
-static int tcp_error(struct sk_buff *skb,
-                    enum ip_conntrack_info *ctinfo,
-                    unsigned int hooknum)
-{
-       struct iphdr *iph = skb->nh.iph;
-       struct tcphdr _tcph, *th;
-       unsigned int tcplen = skb->len - iph->ihl * 4;
-       u_int8_t tcpflags;
-
-       /* Smaller that minimal TCP header? */
-       th = skb_header_pointer(skb, iph->ihl * 4,
-                               sizeof(_tcph), &_tcph);
-       if (th == NULL) {
-               if (LOG_INVALID(IPPROTO_TCP))
-                       nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL,
-                               "ip_ct_tcp: short packet ");
-               return -NF_ACCEPT;
-       }
-
-       /* Not whole TCP header or malformed packet */
-       if (th->doff*4 < sizeof(struct tcphdr) || tcplen < th->doff*4) {
-               if (LOG_INVALID(IPPROTO_TCP))
-                       nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL,
-                               "ip_ct_tcp: truncated/malformed packet ");
-               return -NF_ACCEPT;
-       }
-
-       /* Checksum invalid? Ignore.
-        * We skip checking packets on the outgoing path
-        * because it is assumed to be correct.
-        */
-       /* FIXME: Source route IP option packets --RR */
-       if (ip_conntrack_checksum && hooknum == NF_IP_PRE_ROUTING &&
-           nf_ip_checksum(skb, hooknum, iph->ihl * 4, IPPROTO_TCP)) {
-               if (LOG_INVALID(IPPROTO_TCP))
-                       nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL,
-                                 "ip_ct_tcp: bad TCP checksum ");
-               return -NF_ACCEPT;
-       }
-
-       /* Check TCP flags. */
-       tcpflags = (((u_int8_t *)th)[13] & ~(TH_ECE|TH_CWR));
-       if (!tcp_valid_flags[tcpflags]) {
-               if (LOG_INVALID(IPPROTO_TCP))
-                       nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL,
-                                 "ip_ct_tcp: invalid TCP flag combination ");
-               return -NF_ACCEPT;
-       }
-
-       return NF_ACCEPT;
-}
-
-/* Returns verdict for packet, or -1 for invalid. */
-static int tcp_packet(struct ip_conntrack *conntrack,
-                     const struct sk_buff *skb,
-                     enum ip_conntrack_info ctinfo)
-{
-       enum tcp_conntrack new_state, old_state;
-       enum ip_conntrack_dir dir;
-       struct iphdr *iph = skb->nh.iph;
-       struct tcphdr *th, _tcph;
-       unsigned long timeout;
-       unsigned int index;
-
-       th = skb_header_pointer(skb, iph->ihl * 4,
-                               sizeof(_tcph), &_tcph);
-       BUG_ON(th == NULL);
-
-       write_lock_bh(&tcp_lock);
-       old_state = conntrack->proto.tcp.state;
-       dir = CTINFO2DIR(ctinfo);
-       index = get_conntrack_index(th);
-       new_state = tcp_conntracks[dir][index][old_state];
-
-       switch (new_state) {
-       case TCP_CONNTRACK_IGNORE:
-               /* Ignored packets:
-                *
-                * a) SYN in ORIGINAL
-                * b) SYN/ACK in REPLY
-                * c) ACK in reply direction after initial SYN in original.
-                */
-               if (index == TCP_SYNACK_SET
-                   && conntrack->proto.tcp.last_index == TCP_SYN_SET
-                   && conntrack->proto.tcp.last_dir != dir
-                   && ntohl(th->ack_seq) ==
-                            conntrack->proto.tcp.last_end) {
-                       /* This SYN/ACK acknowledges a SYN that we earlier
-                        * ignored as invalid. This means that the client and
-                        * the server are both in sync, while the firewall is
-                        * not. We kill this session and block the SYN/ACK so
-                        * that the client cannot but retransmit its SYN and
-                        * thus initiate a clean new session.
-                        */
-                       write_unlock_bh(&tcp_lock);
-                       if (LOG_INVALID(IPPROTO_TCP))
-                               nf_log_packet(PF_INET, 0, skb, NULL, NULL,
-                                             NULL, "ip_ct_tcp: "
-                                             "killing out of sync session ");
-                       if (del_timer(&conntrack->timeout))
-                               conntrack->timeout.function((unsigned long)
-                                                           conntrack);
-                       return -NF_DROP;
-               }
-               conntrack->proto.tcp.last_index = index;
-               conntrack->proto.tcp.last_dir = dir;
-               conntrack->proto.tcp.last_seq = ntohl(th->seq);
-               conntrack->proto.tcp.last_end =
-                   segment_seq_plus_len(ntohl(th->seq), skb->len, iph, th);
-
-               write_unlock_bh(&tcp_lock);
-               if (LOG_INVALID(IPPROTO_TCP))
-                       nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL,
-                                 "ip_ct_tcp: invalid packet ignored ");
-               return NF_ACCEPT;
-       case TCP_CONNTRACK_MAX:
-               /* Invalid packet */
-               DEBUGP("ip_ct_tcp: Invalid dir=%i index=%u ostate=%u\n",
-                      dir, get_conntrack_index(th),
-                      old_state);
-               write_unlock_bh(&tcp_lock);
-               if (LOG_INVALID(IPPROTO_TCP))
-                       nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL,
-                                 "ip_ct_tcp: invalid state ");
-               return -NF_ACCEPT;
-       case TCP_CONNTRACK_SYN_SENT:
-               if (old_state < TCP_CONNTRACK_TIME_WAIT)
-                       break;
-               if ((conntrack->proto.tcp.seen[dir].flags &
-                        IP_CT_TCP_FLAG_CLOSE_INIT)
-                   || after(ntohl(th->seq),
-                            conntrack->proto.tcp.seen[dir].td_end)) {
-                       /* Attempt to reopen a closed connection.
-                       * Delete this connection and look up again. */
-                       write_unlock_bh(&tcp_lock);
-                       if (del_timer(&conntrack->timeout))
-                               conntrack->timeout.function((unsigned long)
-                                                           conntrack);
-                       return -NF_REPEAT;
-               } else {
-                       write_unlock_bh(&tcp_lock);
-                       if (LOG_INVALID(IPPROTO_TCP))
-                               nf_log_packet(PF_INET, 0, skb, NULL, NULL,
-                                             NULL, "ip_ct_tcp: invalid SYN");
-                       return -NF_ACCEPT;
-               }
-       case TCP_CONNTRACK_CLOSE:
-               if (index == TCP_RST_SET
-                   && ((test_bit(IPS_SEEN_REPLY_BIT, &conntrack->status)
-                        && conntrack->proto.tcp.last_index == TCP_SYN_SET)
-                       || (!test_bit(IPS_ASSURED_BIT, &conntrack->status)
-                           && conntrack->proto.tcp.last_index == TCP_ACK_SET))
-                   && ntohl(th->ack_seq) == conntrack->proto.tcp.last_end) {
-                       /* RST sent to invalid SYN or ACK we had let through
-                        * at a) and c) above:
-                        *
-                        * a) SYN was in window then
-                        * c) we hold a half-open connection.
-                        *
-                        * Delete our connection entry.
-                        * We skip window checking, because packet might ACK
-                        * segments we ignored. */
-                       goto in_window;
-               }
-               /* Just fall through */
-       default:
-               /* Keep compilers happy. */
-               break;
-       }
-
-       if (!tcp_in_window(&conntrack->proto.tcp, dir, index,
-                          skb, iph, th)) {
-               write_unlock_bh(&tcp_lock);
-               return -NF_ACCEPT;
-       }
-    in_window:
-       /* From now on we have got in-window packets */
-       conntrack->proto.tcp.last_index = index;
-
-       DEBUGP("tcp_conntracks: src=%u.%u.%u.%u:%hu dst=%u.%u.%u.%u:%hu "
-              "syn=%i ack=%i fin=%i rst=%i old=%i new=%i\n",
-               NIPQUAD(iph->saddr), ntohs(th->source),
-               NIPQUAD(iph->daddr), ntohs(th->dest),
-               (th->syn ? 1 : 0), (th->ack ? 1 : 0),
-               (th->fin ? 1 : 0), (th->rst ? 1 : 0),
-               old_state, new_state);
-
-       conntrack->proto.tcp.state = new_state;
-       if (old_state != new_state
-           && (new_state == TCP_CONNTRACK_FIN_WAIT
-               || new_state == TCP_CONNTRACK_CLOSE))
-               conntrack->proto.tcp.seen[dir].flags |= IP_CT_TCP_FLAG_CLOSE_INIT;
-       timeout = conntrack->proto.tcp.retrans >= ip_ct_tcp_max_retrans
-                 && *tcp_timeouts[new_state] > ip_ct_tcp_timeout_max_retrans
-                 ? ip_ct_tcp_timeout_max_retrans : *tcp_timeouts[new_state];
-       write_unlock_bh(&tcp_lock);
-
-       ip_conntrack_event_cache(IPCT_PROTOINFO_VOLATILE, skb);
-       if (new_state != old_state)
-               ip_conntrack_event_cache(IPCT_PROTOINFO, skb);
-
-       if (!test_bit(IPS_SEEN_REPLY_BIT, &conntrack->status)) {
-               /* If only reply is a RST, we can consider ourselves not to
-                  have an established connection: this is a fairly common
-                  problem case, so we can delete the conntrack
-                  immediately.  --RR */
-               if (th->rst) {
-                       if (del_timer(&conntrack->timeout))
-                               conntrack->timeout.function((unsigned long)
-                                                           conntrack);
-                       return NF_ACCEPT;
-               }
-       } else if (!test_bit(IPS_ASSURED_BIT, &conntrack->status)
-                  && (old_state == TCP_CONNTRACK_SYN_RECV
-                      || old_state == TCP_CONNTRACK_ESTABLISHED)
-                  && new_state == TCP_CONNTRACK_ESTABLISHED) {
-               /* Set ASSURED if we see see valid ack in ESTABLISHED
-                  after SYN_RECV or a valid answer for a picked up
-                  connection. */
-               set_bit(IPS_ASSURED_BIT, &conntrack->status);
-               ip_conntrack_event_cache(IPCT_STATUS, skb);
-       }
-       ip_ct_refresh_acct(conntrack, ctinfo, skb, timeout);
-
-       return NF_ACCEPT;
-}
-
-/* Called when a new connection for this protocol found. */
-static int tcp_new(struct ip_conntrack *conntrack,
-                  const struct sk_buff *skb)
-{
-       enum tcp_conntrack new_state;
-       struct iphdr *iph = skb->nh.iph;
-       struct tcphdr *th, _tcph;
-#ifdef DEBUGP_VARS
-       struct ip_ct_tcp_state *sender = &conntrack->proto.tcp.seen[0];
-       struct ip_ct_tcp_state *receiver = &conntrack->proto.tcp.seen[1];
-#endif
-
-       th = skb_header_pointer(skb, iph->ihl * 4,
-                               sizeof(_tcph), &_tcph);
-       BUG_ON(th == NULL);
-
-       /* Don't need lock here: this conntrack not in circulation yet */
-       new_state
-               = tcp_conntracks[0][get_conntrack_index(th)]
-               [TCP_CONNTRACK_NONE];
-
-       /* Invalid: delete conntrack */
-       if (new_state >= TCP_CONNTRACK_MAX) {
-               DEBUGP("ip_ct_tcp: invalid new deleting.\n");
-               return 0;
-       }
-
-       if (new_state == TCP_CONNTRACK_SYN_SENT) {
-               /* SYN packet */
-               conntrack->proto.tcp.seen[0].td_end =
-                       segment_seq_plus_len(ntohl(th->seq), skb->len,
-                                            iph, th);
-               conntrack->proto.tcp.seen[0].td_maxwin = ntohs(th->window);
-               if (conntrack->proto.tcp.seen[0].td_maxwin == 0)
-                       conntrack->proto.tcp.seen[0].td_maxwin = 1;
-               conntrack->proto.tcp.seen[0].td_maxend =
-                       conntrack->proto.tcp.seen[0].td_end;
-
-               tcp_options(skb, iph, th, &conntrack->proto.tcp.seen[0]);
-               conntrack->proto.tcp.seen[1].flags = 0;
-       } else if (ip_ct_tcp_loose == 0) {
-               /* Don't try to pick up connections. */
-               return 0;
-       } else {
-               /*
-                * We are in the middle of a connection,
-                * its history is lost for us.
-                * Let's try to use the data from the packet.
-                */
-               conntrack->proto.tcp.seen[0].td_end =
-                       segment_seq_plus_len(ntohl(th->seq), skb->len,
-                                            iph, th);
-               conntrack->proto.tcp.seen[0].td_maxwin = ntohs(th->window);
-               if (conntrack->proto.tcp.seen[0].td_maxwin == 0)
-                       conntrack->proto.tcp.seen[0].td_maxwin = 1;
-               conntrack->proto.tcp.seen[0].td_maxend =
-                       conntrack->proto.tcp.seen[0].td_end +
-                       conntrack->proto.tcp.seen[0].td_maxwin;
-               conntrack->proto.tcp.seen[0].td_scale = 0;
-
-               /* We assume SACK and liberal window checking to handle
-                * window scaling */
-               conntrack->proto.tcp.seen[0].flags =
-               conntrack->proto.tcp.seen[1].flags = IP_CT_TCP_FLAG_SACK_PERM |
-                                                    IP_CT_TCP_FLAG_BE_LIBERAL;
-       }
-
-       conntrack->proto.tcp.seen[1].td_end = 0;
-       conntrack->proto.tcp.seen[1].td_maxend = 0;
-       conntrack->proto.tcp.seen[1].td_maxwin = 1;
-       conntrack->proto.tcp.seen[1].td_scale = 0;
-
-       /* tcp_packet will set them */
-       conntrack->proto.tcp.state = TCP_CONNTRACK_NONE;
-       conntrack->proto.tcp.last_index = TCP_NONE_SET;
-
-       DEBUGP("tcp_new: sender end=%u maxend=%u maxwin=%u scale=%i "
-              "receiver end=%u maxend=%u maxwin=%u scale=%i\n",
-               sender->td_end, sender->td_maxend, sender->td_maxwin,
-               sender->td_scale,
-               receiver->td_end, receiver->td_maxend, receiver->td_maxwin,
-               receiver->td_scale);
-       return 1;
-}
-
-struct ip_conntrack_protocol ip_conntrack_protocol_tcp =
-{
-       .proto                  = IPPROTO_TCP,
-       .name                   = "tcp",
-       .pkt_to_tuple           = tcp_pkt_to_tuple,
-       .invert_tuple           = tcp_invert_tuple,
-       .print_tuple            = tcp_print_tuple,
-       .print_conntrack        = tcp_print_conntrack,
-       .packet                 = tcp_packet,
-       .new                    = tcp_new,
-       .error                  = tcp_error,
-#if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \
-    defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE)
-       .to_nfattr              = tcp_to_nfattr,
-       .from_nfattr            = nfattr_to_tcp,
-       .tuple_to_nfattr        = ip_ct_port_tuple_to_nfattr,
-       .nfattr_to_tuple        = ip_ct_port_nfattr_to_tuple,
-#endif
-};
diff --git a/net/ipv4/netfilter/ip_conntrack_proto_udp.c b/net/ipv4/netfilter/ip_conntrack_proto_udp.c
deleted file mode 100644 (file)
index 14c30c6..0000000
+++ /dev/null
@@ -1,148 +0,0 @@
-/* (C) 1999-2001 Paul `Rusty' Russell
- * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/types.h>
-#include <linux/timer.h>
-#include <linux/netfilter.h>
-#include <linux/in.h>
-#include <linux/ip.h>
-#include <linux/udp.h>
-#include <linux/seq_file.h>
-#include <net/checksum.h>
-#include <linux/netfilter_ipv4.h>
-#include <linux/netfilter_ipv4/ip_conntrack_protocol.h>
-
-unsigned int ip_ct_udp_timeout __read_mostly = 30*HZ;
-unsigned int ip_ct_udp_timeout_stream __read_mostly = 180*HZ;
-
-static int udp_pkt_to_tuple(const struct sk_buff *skb,
-                            unsigned int dataoff,
-                            struct ip_conntrack_tuple *tuple)
-{
-       struct udphdr _hdr, *hp;
-
-       /* Actually only need first 8 bytes. */
-       hp = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr);
-       if (hp == NULL)
-               return 0;
-
-       tuple->src.u.udp.port = hp->source;
-       tuple->dst.u.udp.port = hp->dest;
-
-       return 1;
-}
-
-static int udp_invert_tuple(struct ip_conntrack_tuple *tuple,
-                           const struct ip_conntrack_tuple *orig)
-{
-       tuple->src.u.udp.port = orig->dst.u.udp.port;
-       tuple->dst.u.udp.port = orig->src.u.udp.port;
-       return 1;
-}
-
-/* Print out the per-protocol part of the tuple. */
-static int udp_print_tuple(struct seq_file *s,
-                          const struct ip_conntrack_tuple *tuple)
-{
-       return seq_printf(s, "sport=%hu dport=%hu ",
-                         ntohs(tuple->src.u.udp.port),
-                         ntohs(tuple->dst.u.udp.port));
-}
-
-/* Print out the private part of the conntrack. */
-static int udp_print_conntrack(struct seq_file *s,
-                              const struct ip_conntrack *conntrack)
-{
-       return 0;
-}
-
-/* Returns verdict for packet, and may modify conntracktype */
-static int udp_packet(struct ip_conntrack *conntrack,
-                     const struct sk_buff *skb,
-                     enum ip_conntrack_info ctinfo)
-{
-       /* If we've seen traffic both ways, this is some kind of UDP
-          stream.  Extend timeout. */
-       if (test_bit(IPS_SEEN_REPLY_BIT, &conntrack->status)) {
-               ip_ct_refresh_acct(conntrack, ctinfo, skb,
-                                  ip_ct_udp_timeout_stream);
-               /* Also, more likely to be important, and not a probe */
-               if (!test_and_set_bit(IPS_ASSURED_BIT, &conntrack->status))
-                       ip_conntrack_event_cache(IPCT_STATUS, skb);
-       } else
-               ip_ct_refresh_acct(conntrack, ctinfo, skb, ip_ct_udp_timeout);
-
-       return NF_ACCEPT;
-}
-
-/* Called when a new connection for this protocol found. */
-static int udp_new(struct ip_conntrack *conntrack, const struct sk_buff *skb)
-{
-       return 1;
-}
-
-static int udp_error(struct sk_buff *skb, enum ip_conntrack_info *ctinfo,
-                    unsigned int hooknum)
-{
-       struct iphdr *iph = skb->nh.iph;
-       unsigned int udplen = skb->len - iph->ihl * 4;
-       struct udphdr _hdr, *hdr;
-
-       /* Header is too small? */
-       hdr = skb_header_pointer(skb, iph->ihl*4, sizeof(_hdr), &_hdr);
-       if (hdr == NULL) {
-               if (LOG_INVALID(IPPROTO_UDP))
-                       nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL,
-                                 "ip_ct_udp: short packet ");
-               return -NF_ACCEPT;
-       }
-
-       /* Truncated/malformed packets */
-       if (ntohs(hdr->len) > udplen || ntohs(hdr->len) < sizeof(*hdr)) {
-               if (LOG_INVALID(IPPROTO_UDP))
-                       nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL,
-                                 "ip_ct_udp: truncated/malformed packet ");
-               return -NF_ACCEPT;
-       }
-
-       /* Packet with no checksum */
-       if (!hdr->check)
-               return NF_ACCEPT;
-
-       /* Checksum invalid? Ignore.
-        * We skip checking packets on the outgoing path
-        * because the checksum is assumed to be correct.
-        * FIXME: Source route IP option packets --RR */
-       if (ip_conntrack_checksum && hooknum == NF_IP_PRE_ROUTING &&
-           nf_ip_checksum(skb, hooknum, iph->ihl * 4, IPPROTO_UDP)) {
-               if (LOG_INVALID(IPPROTO_UDP))
-                       nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL,
-                                 "ip_ct_udp: bad UDP checksum ");
-               return -NF_ACCEPT;
-       }
-
-       return NF_ACCEPT;
-}
-
-struct ip_conntrack_protocol ip_conntrack_protocol_udp =
-{
-       .proto                  = IPPROTO_UDP,
-       .name                   = "udp",
-       .pkt_to_tuple           = udp_pkt_to_tuple,
-       .invert_tuple           = udp_invert_tuple,
-       .print_tuple            = udp_print_tuple,
-       .print_conntrack        = udp_print_conntrack,
-       .packet                 = udp_packet,
-       .new                    = udp_new,
-       .error                  = udp_error,
-#if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \
-    defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE)
-       .tuple_to_nfattr        = ip_ct_port_tuple_to_nfattr,
-       .nfattr_to_tuple        = ip_ct_port_nfattr_to_tuple,
-#endif
-};
diff --git a/net/ipv4/netfilter/ip_conntrack_sip.c b/net/ipv4/netfilter/ip_conntrack_sip.c
deleted file mode 100644 (file)
index c59a962..0000000
+++ /dev/null
@@ -1,520 +0,0 @@
-/* SIP extension for IP connection tracking.
- *
- * (C) 2005 by Christian Hentschel <chentschel@arnet.com.ar>
- * based on RR's ip_conntrack_ftp.c and other modules.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/ctype.h>
-#include <linux/skbuff.h>
-#include <linux/in.h>
-#include <linux/ip.h>
-#include <linux/udp.h>
-
-#include <linux/netfilter.h>
-#include <linux/netfilter_ipv4.h>
-#include <linux/netfilter_ipv4/ip_conntrack_helper.h>
-#include <linux/netfilter_ipv4/ip_conntrack_sip.h>
-
-#if 0
-#define DEBUGP printk
-#else
-#define DEBUGP(format, args...)
-#endif
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Christian Hentschel <chentschel@arnet.com.ar>");
-MODULE_DESCRIPTION("SIP connection tracking helper");
-
-#define MAX_PORTS      8
-static unsigned short ports[MAX_PORTS];
-static int ports_c;
-module_param_array(ports, ushort, &ports_c, 0400);
-MODULE_PARM_DESC(ports, "port numbers of sip servers");
-
-static unsigned int sip_timeout = SIP_TIMEOUT;
-module_param(sip_timeout, uint, 0600);
-MODULE_PARM_DESC(sip_timeout, "timeout for the master SIP session");
-
-unsigned int (*ip_nat_sip_hook)(struct sk_buff **pskb,
-                               enum ip_conntrack_info ctinfo,
-                               struct ip_conntrack *ct,
-                               const char **dptr);
-EXPORT_SYMBOL_GPL(ip_nat_sip_hook);
-
-unsigned int (*ip_nat_sdp_hook)(struct sk_buff **pskb,
-                               enum ip_conntrack_info ctinfo,
-                               struct ip_conntrack_expect *exp,
-                               const char *dptr);
-EXPORT_SYMBOL_GPL(ip_nat_sdp_hook);
-
-static int digits_len(const char *dptr, const char *limit, int *shift);
-static int epaddr_len(const char *dptr, const char *limit, int *shift);
-static int skp_digits_len(const char *dptr, const char *limit, int *shift);
-static int skp_epaddr_len(const char *dptr, const char *limit, int *shift);
-
-struct sip_header_nfo {
-       const char      *lname;
-       const char      *sname;
-       const char      *ln_str;
-       size_t          lnlen;
-       size_t          snlen;
-       size_t          ln_strlen;
-       int             case_sensitive;
-       int             (*match_len)(const char *, const char *, int *);
-};
-
-static struct sip_header_nfo ct_sip_hdrs[] = {
-       [POS_REG_REQ_URI] = {   /* SIP REGISTER request URI */
-               .lname          = "sip:",
-               .lnlen          = sizeof("sip:") - 1,
-               .ln_str         = ":",
-               .ln_strlen      = sizeof(":") - 1,
-               .match_len      = epaddr_len
-       },
-       [POS_REQ_URI] = {       /* SIP request URI */
-               .lname          = "sip:",
-               .lnlen          = sizeof("sip:") - 1,
-               .ln_str         = "@",
-               .ln_strlen      = sizeof("@") - 1,
-               .match_len      = epaddr_len
-       },
-       [POS_FROM] = {          /* SIP From header */
-               .lname          = "From:",
-               .lnlen          = sizeof("From:") - 1,
-               .sname          = "\r\nf:",
-               .snlen          = sizeof("\r\nf:") - 1,
-               .ln_str         = "sip:",
-               .ln_strlen      = sizeof("sip:") - 1,
-               .match_len      = skp_epaddr_len,
-       },
-       [POS_TO] = {            /* SIP To header */
-               .lname          = "To:",
-               .lnlen          = sizeof("To:") - 1,
-               .sname          = "\r\nt:",
-               .snlen          = sizeof("\r\nt:") - 1,
-               .ln_str         = "sip:",
-               .ln_strlen      = sizeof("sip:") - 1,
-               .match_len      = skp_epaddr_len,
-       },
-       [POS_VIA] = {           /* SIP Via header */
-               .lname          = "Via:",
-               .lnlen          = sizeof("Via:") - 1,
-               .sname          = "\r\nv:",
-               .snlen          = sizeof("\r\nv:") - 1, /* rfc3261 "\r\n" */
-               .ln_str         = "UDP ",
-               .ln_strlen      = sizeof("UDP ") - 1,
-               .match_len      = epaddr_len,
-       },
-       [POS_CONTACT] = {       /* SIP Contact header */
-               .lname          = "Contact:",
-               .lnlen          = sizeof("Contact:") - 1,
-               .sname          = "\r\nm:",
-               .snlen          = sizeof("\r\nm:") - 1,
-               .ln_str         = "sip:",
-               .ln_strlen      = sizeof("sip:") - 1,
-               .match_len      = skp_epaddr_len
-       },
-       [POS_CONTENT] = {       /* SIP Content length header */
-               .lname          = "Content-Length:",
-               .lnlen          = sizeof("Content-Length:") - 1,
-               .sname          = "\r\nl:",
-               .snlen          = sizeof("\r\nl:") - 1,
-               .ln_str         = ":",
-               .ln_strlen      = sizeof(":") - 1,
-               .match_len      = skp_digits_len
-       },
-       [POS_MEDIA] = {         /* SDP media info */
-               .case_sensitive = 1,
-               .lname          = "\nm=",
-               .lnlen          = sizeof("\nm=") - 1,
-               .sname          = "\rm=",
-               .snlen          = sizeof("\rm=") - 1,
-               .ln_str         = "audio ",
-               .ln_strlen      = sizeof("audio ") - 1,
-               .match_len      = digits_len
-       },
-       [POS_OWNER] = {         /* SDP owner address*/
-               .case_sensitive = 1,
-               .lname          = "\no=",
-               .lnlen          = sizeof("\no=") - 1,
-               .sname          = "\ro=",
-               .snlen          = sizeof("\ro=") - 1,
-               .ln_str         = "IN IP4 ",
-               .ln_strlen      = sizeof("IN IP4 ") - 1,
-               .match_len      = epaddr_len
-       },
-       [POS_CONNECTION] = {    /* SDP connection info */
-               .case_sensitive = 1,
-               .lname          = "\nc=",
-               .lnlen          = sizeof("\nc=") - 1,
-               .sname          = "\rc=",
-               .snlen          = sizeof("\rc=") - 1,
-               .ln_str         = "IN IP4 ",
-               .ln_strlen      = sizeof("IN IP4 ") - 1,
-               .match_len      = epaddr_len
-       },
-       [POS_SDP_HEADER] = {    /* SDP version header */
-               .case_sensitive = 1,
-               .lname          = "\nv=",
-               .lnlen          = sizeof("\nv=") - 1,
-               .sname          = "\rv=",
-               .snlen          = sizeof("\rv=") - 1,
-               .ln_str         = "=",
-               .ln_strlen      = sizeof("=") - 1,
-               .match_len      = digits_len
-       }
-};
-
-/* get line lenght until first CR or LF seen. */
-int ct_sip_lnlen(const char *line, const char *limit)
-{
-       const char *k = line;
-
-       while ((line <= limit) && (*line == '\r' || *line == '\n'))
-               line++;
-
-       while (line <= limit) {
-               if (*line == '\r' || *line == '\n')
-                       break;
-               line++;
-       }
-       return line - k;
-}
-EXPORT_SYMBOL_GPL(ct_sip_lnlen);
-
-/* Linear string search, case sensitive. */
-const char *ct_sip_search(const char *needle, const char *haystack,
-                         size_t needle_len, size_t haystack_len,
-                         int case_sensitive)
-{
-       const char *limit = haystack + (haystack_len - needle_len);
-
-       while (haystack <= limit) {
-               if (case_sensitive) {
-                       if (strncmp(haystack, needle, needle_len) == 0)
-                               return haystack;
-               } else {
-                       if (strnicmp(haystack, needle, needle_len) == 0)
-                               return haystack;
-               }
-               haystack++;
-       }
-       return NULL;
-}
-EXPORT_SYMBOL_GPL(ct_sip_search);
-
-static int digits_len(const char *dptr, const char *limit, int *shift)
-{
-       int len = 0;
-       while (dptr <= limit && isdigit(*dptr)) {
-               dptr++;
-               len++;
-       }
-       return len;
-}
-
-/* get digits lenght, skiping blank spaces. */
-static int skp_digits_len(const char *dptr, const char *limit, int *shift)
-{
-       for (; dptr <= limit && *dptr == ' '; dptr++)
-               (*shift)++;
-
-       return digits_len(dptr, limit, shift);
-}
-
-/* Simple ipaddr parser.. */
-static int parse_ipaddr(const char *cp,        const char **endp,
-                       __be32 *ipaddr, const char *limit)
-{
-       unsigned long int val;
-       int i, digit = 0;
-
-       for (i = 0, *ipaddr = 0; cp <= limit && i < 4; i++) {
-               digit = 0;
-               if (!isdigit(*cp))
-                       break;
-
-               val = simple_strtoul(cp, (char **)&cp, 10);
-               if (val > 0xFF)
-                       return -1;
-
-               ((u_int8_t *)ipaddr)[i] = val;
-               digit = 1;
-
-               if (*cp != '.')
-                       break;
-               cp++;
-       }
-       if (!digit)
-               return -1;
-
-       if (endp)
-               *endp = cp;
-
-       return 0;
-}
-
-/* skip ip address. returns it lenght. */
-static int epaddr_len(const char *dptr, const char *limit, int *shift)
-{
-       const char *aux = dptr;
-       __be32 ip;
-
-       if (parse_ipaddr(dptr, &dptr, &ip, limit) < 0) {
-               DEBUGP("ip: %s parse failed.!\n", dptr);
-               return 0;
-       }
-
-       /* Port number */
-       if (*dptr == ':') {
-               dptr++;
-               dptr += digits_len(dptr, limit, shift);
-       }
-       return dptr - aux;
-}
-
-/* get address length, skiping user info. */
-static int skp_epaddr_len(const char *dptr, const char *limit, int *shift)
-{
-       int s = *shift;
-
-       /* Search for @, but stop at the end of the line.
-        * We are inside a sip: URI, so we don't need to worry about
-        * continuation lines. */
-       while (dptr <= limit &&
-              *dptr != '@' && *dptr != '\r' && *dptr != '\n') {
-               (*shift)++;
-               dptr++;
-       }
-
-       if (dptr <= limit && *dptr == '@') {
-               dptr++;
-               (*shift)++;
-       } else
-               *shift = s;
-
-       return epaddr_len(dptr, limit, shift);
-}
-
-/* Returns 0 if not found, -1 error parsing. */
-int ct_sip_get_info(const char *dptr, size_t dlen,
-                   unsigned int *matchoff,
-                   unsigned int *matchlen,
-                   enum sip_header_pos pos)
-{
-       struct sip_header_nfo *hnfo = &ct_sip_hdrs[pos];
-       const char *limit, *aux, *k = dptr;
-       int shift = 0;
-
-       limit = dptr + (dlen - hnfo->lnlen);
-
-       while (dptr <= limit) {
-               if ((strncmp(dptr, hnfo->lname, hnfo->lnlen) != 0) &&
-                   (hnfo->sname == NULL ||
-                    strncmp(dptr, hnfo->sname, hnfo->snlen) != 0)) {
-                       dptr++;
-                       continue;
-               }
-               aux = ct_sip_search(hnfo->ln_str, dptr, hnfo->ln_strlen,
-                                   ct_sip_lnlen(dptr, limit),
-                                   hnfo->case_sensitive);
-               if (!aux) {
-                       DEBUGP("'%s' not found in '%s'.\n", hnfo->ln_str,
-                              hnfo->lname);
-                       return -1;
-               }
-               aux += hnfo->ln_strlen;
-
-               *matchlen = hnfo->match_len(aux, limit, &shift);
-               if (!*matchlen)
-                       return -1;
-
-               *matchoff = (aux - k) + shift;
-
-               DEBUGP("%s match succeeded! - len: %u\n", hnfo->lname,
-                      *matchlen);
-               return 1;
-       }
-       DEBUGP("%s header not found.\n", hnfo->lname);
-       return 0;
-}
-EXPORT_SYMBOL_GPL(ct_sip_get_info);
-
-static int set_expected_rtp(struct sk_buff **pskb,
-                           struct ip_conntrack *ct,
-                           enum ip_conntrack_info ctinfo,
-                           __be32 ipaddr, u_int16_t port,
-                           const char *dptr)
-{
-       struct ip_conntrack_expect *exp;
-       enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
-       int ret;
-       typeof(ip_nat_sdp_hook) ip_nat_sdp;
-
-       exp = ip_conntrack_expect_alloc(ct);
-       if (exp == NULL)
-               return NF_DROP;
-
-       exp->tuple.src.ip = ct->tuplehash[!dir].tuple.src.ip;
-       exp->tuple.src.u.udp.port = 0;
-       exp->tuple.dst.ip = ipaddr;
-       exp->tuple.dst.u.udp.port = htons(port);
-       exp->tuple.dst.protonum = IPPROTO_UDP;
-
-       exp->mask.src.ip = htonl(0xFFFFFFFF);
-       exp->mask.src.u.udp.port = 0;
-       exp->mask.dst.ip = htonl(0xFFFFFFFF);
-       exp->mask.dst.u.udp.port = htons(0xFFFF);
-       exp->mask.dst.protonum = 0xFF;
-
-       exp->expectfn = NULL;
-       exp->flags = 0;
-
-       ip_nat_sdp = rcu_dereference(ip_nat_sdp_hook);
-       if (ip_nat_sdp)
-               ret = ip_nat_sdp(pskb, ctinfo, exp, dptr);
-       else {
-               if (ip_conntrack_expect_related(exp) != 0)
-                       ret = NF_DROP;
-               else
-                       ret = NF_ACCEPT;
-       }
-       ip_conntrack_expect_put(exp);
-
-       return ret;
-}
-
-static int sip_help(struct sk_buff **pskb,
-                   struct ip_conntrack *ct,
-                   enum ip_conntrack_info ctinfo)
-{
-       unsigned int dataoff, datalen;
-       const char *dptr;
-       int ret = NF_ACCEPT;
-       int matchoff, matchlen;
-       __be32 ipaddr;
-       u_int16_t port;
-       typeof(ip_nat_sip_hook) ip_nat_sip;
-
-       /* No Data ? */
-       dataoff = (*pskb)->nh.iph->ihl*4 + sizeof(struct udphdr);
-       if (dataoff >= (*pskb)->len) {
-               DEBUGP("skb->len = %u\n", (*pskb)->len);
-               return NF_ACCEPT;
-       }
-
-       ip_ct_refresh(ct, *pskb, sip_timeout * HZ);
-
-       if (!skb_is_nonlinear(*pskb))
-               dptr = (*pskb)->data + dataoff;
-       else {
-               DEBUGP("Copy of skbuff not supported yet.\n");
-               goto out;
-       }
-
-       ip_nat_sip = rcu_dereference(ip_nat_sip_hook);
-       if (ip_nat_sip) {
-               if (!ip_nat_sip(pskb, ctinfo, ct, &dptr)) {
-                       ret = NF_DROP;
-                       goto out;
-               }
-       }
-
-       /* After this point NAT, could have mangled skb, so
-          we need to recalculate payload lenght. */
-       datalen = (*pskb)->len - dataoff;
-
-       if (datalen < (sizeof("SIP/2.0 200") - 1))
-               goto out;
-
-       /* RTP info only in some SDP pkts */
-       if (memcmp(dptr, "INVITE", sizeof("INVITE") - 1) != 0 &&
-           memcmp(dptr, "SIP/2.0 200", sizeof("SIP/2.0 200") - 1) != 0) {
-               goto out;
-       }
-       /* Get ip and port address from SDP packet. */
-       if (ct_sip_get_info(dptr, datalen, &matchoff, &matchlen,
-                           POS_CONNECTION) > 0) {
-
-               /* We'll drop only if there are parse problems. */
-               if (parse_ipaddr(dptr + matchoff, NULL, &ipaddr,
-                                dptr + datalen) < 0) {
-                       ret = NF_DROP;
-                       goto out;
-               }
-               if (ct_sip_get_info(dptr, datalen, &matchoff, &matchlen,
-                                   POS_MEDIA) > 0) {
-
-                       port = simple_strtoul(dptr + matchoff, NULL, 10);
-                       if (port < 1024) {
-                               ret = NF_DROP;
-                               goto out;
-                       }
-                       ret = set_expected_rtp(pskb, ct, ctinfo,
-                                              ipaddr, port, dptr);
-               }
-       }
-out:
-       return ret;
-}
-
-static struct ip_conntrack_helper sip[MAX_PORTS];
-static char sip_names[MAX_PORTS][10];
-
-static void fini(void)
-{
-       int i;
-       for (i = 0; i < ports_c; i++) {
-               DEBUGP("unregistering helper for port %d\n", ports[i]);
-               ip_conntrack_helper_unregister(&sip[i]);
-       }
-}
-
-static int __init init(void)
-{
-       int i, ret;
-       char *tmpname;
-
-       if (ports_c == 0)
-               ports[ports_c++] = SIP_PORT;
-
-       for (i = 0; i < ports_c; i++) {
-               /* Create helper structure */
-               memset(&sip[i], 0, sizeof(struct ip_conntrack_helper));
-
-               sip[i].tuple.dst.protonum = IPPROTO_UDP;
-               sip[i].tuple.src.u.udp.port = htons(ports[i]);
-               sip[i].mask.src.u.udp.port = htons(0xFFFF);
-               sip[i].mask.dst.protonum = 0xFF;
-               sip[i].max_expected = 2;
-               sip[i].timeout = 3 * 60; /* 3 minutes */
-               sip[i].me = THIS_MODULE;
-               sip[i].help = sip_help;
-
-               tmpname = &sip_names[i][0];
-               if (ports[i] == SIP_PORT)
-                       sprintf(tmpname, "sip");
-               else
-                       sprintf(tmpname, "sip-%d", i);
-               sip[i].name = tmpname;
-
-               DEBUGP("port #%d: %d\n", i, ports[i]);
-
-               ret = ip_conntrack_helper_register(&sip[i]);
-               if (ret) {
-                       printk("ERROR registering helper for port %d\n",
-                               ports[i]);
-                       fini();
-                       return ret;
-               }
-       }
-       return 0;
-}
-
-module_init(init);
-module_exit(fini);
diff --git a/net/ipv4/netfilter/ip_conntrack_standalone.c b/net/ipv4/netfilter/ip_conntrack_standalone.c
deleted file mode 100644 (file)
index 56b2f75..0000000
+++ /dev/null
@@ -1,962 +0,0 @@
-/* This file contains all the functions required for the standalone
-   ip_conntrack module.
-
-   These are not required by the compatibility layer.
-*/
-
-/* (C) 1999-2001 Paul `Rusty' Russell
- * (C) 2002-2005 Netfilter Core Team <coreteam@netfilter.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/types.h>
-#include <linux/ip.h>
-#include <linux/netfilter.h>
-#include <linux/netfilter_ipv4.h>
-#include <linux/module.h>
-#include <linux/skbuff.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/percpu.h>
-#ifdef CONFIG_SYSCTL
-#include <linux/sysctl.h>
-#endif
-#include <net/checksum.h>
-#include <net/ip.h>
-#include <net/route.h>
-
-#include <linux/netfilter_ipv4/ip_conntrack.h>
-#include <linux/netfilter_ipv4/ip_conntrack_protocol.h>
-#include <linux/netfilter_ipv4/ip_conntrack_core.h>
-#include <linux/netfilter_ipv4/ip_conntrack_helper.h>
-
-#if 0
-#define DEBUGP printk
-#else
-#define DEBUGP(format, args...)
-#endif
-
-MODULE_LICENSE("GPL");
-
-extern atomic_t ip_conntrack_count;
-DECLARE_PER_CPU(struct ip_conntrack_stat, ip_conntrack_stat);
-
-static int kill_proto(struct ip_conntrack *i, void *data)
-{
-       return (i->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum ==
-                       *((u_int8_t *) data));
-}
-
-#ifdef CONFIG_PROC_FS
-static int
-print_tuple(struct seq_file *s, const struct ip_conntrack_tuple *tuple,
-           struct ip_conntrack_protocol *proto)
-{
-       seq_printf(s, "src=%u.%u.%u.%u dst=%u.%u.%u.%u ",
-                  NIPQUAD(tuple->src.ip), NIPQUAD(tuple->dst.ip));
-       return proto->print_tuple(s, tuple);
-}
-
-#ifdef CONFIG_IP_NF_CT_ACCT
-static unsigned int
-seq_print_counters(struct seq_file *s,
-                  const struct ip_conntrack_counter *counter)
-{
-       return seq_printf(s, "packets=%llu bytes=%llu ",
-                         (unsigned long long)counter->packets,
-                         (unsigned long long)counter->bytes);
-}
-#else
-#define seq_print_counters(x, y)       0
-#endif
-
-struct ct_iter_state {
-       unsigned int bucket;
-};
-
-static struct list_head *ct_get_first(struct seq_file *seq)
-{
-       struct ct_iter_state *st = seq->private;
-
-       for (st->bucket = 0;
-            st->bucket < ip_conntrack_htable_size;
-            st->bucket++) {
-               if (!list_empty(&ip_conntrack_hash[st->bucket]))
-                       return ip_conntrack_hash[st->bucket].next;
-       }
-       return NULL;
-}
-
-static struct list_head *ct_get_next(struct seq_file *seq, struct list_head *head)
-{
-       struct ct_iter_state *st = seq->private;
-
-       head = head->next;
-       while (head == &ip_conntrack_hash[st->bucket]) {
-               if (++st->bucket >= ip_conntrack_htable_size)
-                       return NULL;
-               head = ip_conntrack_hash[st->bucket].next;
-       }
-       return head;
-}
-
-static struct list_head *ct_get_idx(struct seq_file *seq, loff_t pos)
-{
-       struct list_head *head = ct_get_first(seq);
-
-       if (head)
-               while (pos && (head = ct_get_next(seq, head)))
-                       pos--;
-       return pos ? NULL : head;
-}
-
-static void *ct_seq_start(struct seq_file *seq, loff_t *pos)
-{
-       read_lock_bh(&ip_conntrack_lock);
-       return ct_get_idx(seq, *pos);
-}
-
-static void *ct_seq_next(struct seq_file *s, void *v, loff_t *pos)
-{
-       (*pos)++;
-       return ct_get_next(s, v);
-}
-
-static void ct_seq_stop(struct seq_file *s, void *v)
-{
-       read_unlock_bh(&ip_conntrack_lock);
-}
-
-static int ct_seq_show(struct seq_file *s, void *v)
-{
-       const struct ip_conntrack_tuple_hash *hash = v;
-       const struct ip_conntrack *conntrack = tuplehash_to_ctrack(hash);
-       struct ip_conntrack_protocol *proto;
-
-       IP_NF_ASSERT(conntrack);
-
-       /* we only want to print DIR_ORIGINAL */
-       if (DIRECTION(hash))
-               return 0;
-
-       proto = __ip_conntrack_proto_find(conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum);
-       IP_NF_ASSERT(proto);
-
-       if (seq_printf(s, "%-8s %u %ld ",
-                     proto->name,
-                     conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum,
-                     timer_pending(&conntrack->timeout)
-                     ? (long)(conntrack->timeout.expires - jiffies)/HZ
-                     : 0) != 0)
-               return -ENOSPC;
-
-       if (proto->print_conntrack(s, conntrack))
-               return -ENOSPC;
-
-       if (print_tuple(s, &conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
-                       proto))
-               return -ENOSPC;
-
-       if (seq_print_counters(s, &conntrack->counters[IP_CT_DIR_ORIGINAL]))
-               return -ENOSPC;
-
-       if (!(test_bit(IPS_SEEN_REPLY_BIT, &conntrack->status)))
-               if (seq_printf(s, "[UNREPLIED] "))
-                       return -ENOSPC;
-
-       if (print_tuple(s, &conntrack->tuplehash[IP_CT_DIR_REPLY].tuple,
-                       proto))
-               return -ENOSPC;
-
-       if (seq_print_counters(s, &conntrack->counters[IP_CT_DIR_REPLY]))
-               return -ENOSPC;
-
-       if (test_bit(IPS_ASSURED_BIT, &conntrack->status))
-               if (seq_printf(s, "[ASSURED] "))
-                       return -ENOSPC;
-
-#if defined(CONFIG_IP_NF_CONNTRACK_MARK)
-       if (seq_printf(s, "mark=%u ", conntrack->mark))
-               return -ENOSPC;
-#endif
-
-#ifdef CONFIG_IP_NF_CONNTRACK_SECMARK
-       if (seq_printf(s, "secmark=%u ", conntrack->secmark))
-               return -ENOSPC;
-#endif
-
-       if (seq_printf(s, "use=%u\n", atomic_read(&conntrack->ct_general.use)))
-               return -ENOSPC;
-
-       return 0;
-}
-
-static struct seq_operations ct_seq_ops = {
-       .start = ct_seq_start,
-       .next  = ct_seq_next,
-       .stop  = ct_seq_stop,
-       .show  = ct_seq_show
-};
-
-static int ct_open(struct inode *inode, struct file *file)
-{
-       struct seq_file *seq;
-       struct ct_iter_state *st;
-       int ret;
-
-       st = kmalloc(sizeof(struct ct_iter_state), GFP_KERNEL);
-       if (st == NULL)
-               return -ENOMEM;
-       ret = seq_open(file, &ct_seq_ops);
-       if (ret)
-               goto out_free;
-       seq          = file->private_data;
-       seq->private = st;
-       memset(st, 0, sizeof(struct ct_iter_state));
-       return ret;
-out_free:
-       kfree(st);
-       return ret;
-}
-
-static const struct file_operations ct_file_ops = {
-       .owner   = THIS_MODULE,
-       .open    = ct_open,
-       .read    = seq_read,
-       .llseek  = seq_lseek,
-       .release = seq_release_private,
-};
-
-/* expects */
-static void *exp_seq_start(struct seq_file *s, loff_t *pos)
-{
-       struct list_head *e = &ip_conntrack_expect_list;
-       loff_t i;
-
-       /* strange seq_file api calls stop even if we fail,
-        * thus we need to grab lock since stop unlocks */
-       read_lock_bh(&ip_conntrack_lock);
-
-       if (list_empty(e))
-               return NULL;
-
-       for (i = 0; i <= *pos; i++) {
-               e = e->next;
-               if (e == &ip_conntrack_expect_list)
-                       return NULL;
-       }
-       return e;
-}
-
-static void *exp_seq_next(struct seq_file *s, void *v, loff_t *pos)
-{
-       struct list_head *e = v;
-
-       ++*pos;
-       e = e->next;
-
-       if (e == &ip_conntrack_expect_list)
-               return NULL;
-
-       return e;
-}
-
-static void exp_seq_stop(struct seq_file *s, void *v)
-{
-       read_unlock_bh(&ip_conntrack_lock);
-}
-
-static int exp_seq_show(struct seq_file *s, void *v)
-{
-       struct ip_conntrack_expect *expect = v;
-
-       if (expect->timeout.function)
-               seq_printf(s, "%ld ", timer_pending(&expect->timeout)
-                          ? (long)(expect->timeout.expires - jiffies)/HZ : 0);
-       else
-               seq_printf(s, "- ");
-
-       seq_printf(s, "proto=%u ", expect->tuple.dst.protonum);
-
-       print_tuple(s, &expect->tuple,
-                   __ip_conntrack_proto_find(expect->tuple.dst.protonum));
-       return seq_putc(s, '\n');
-}
-
-static struct seq_operations exp_seq_ops = {
-       .start = exp_seq_start,
-       .next = exp_seq_next,
-       .stop = exp_seq_stop,
-       .show = exp_seq_show
-};
-
-static int exp_open(struct inode *inode, struct file *file)
-{
-       return seq_open(file, &exp_seq_ops);
-}
-
-static const struct file_operations exp_file_ops = {
-       .owner   = THIS_MODULE,
-       .open    = exp_open,
-       .read    = seq_read,
-       .llseek  = seq_lseek,
-       .release = seq_release
-};
-
-static void *ct_cpu_seq_start(struct seq_file *seq, loff_t *pos)
-{
-       int cpu;
-
-       if (*pos == 0)
-               return SEQ_START_TOKEN;
-
-       for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) {
-               if (!cpu_possible(cpu))
-                       continue;
-               *pos = cpu+1;
-               return &per_cpu(ip_conntrack_stat, cpu);
-       }
-
-       return NULL;
-}
-
-static void *ct_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
-{
-       int cpu;
-
-       for (cpu = *pos; cpu < NR_CPUS; ++cpu) {
-               if (!cpu_possible(cpu))
-                       continue;
-               *pos = cpu+1;
-               return &per_cpu(ip_conntrack_stat, cpu);
-       }
-
-       return NULL;
-}
-
-static void ct_cpu_seq_stop(struct seq_file *seq, void *v)
-{
-}
-
-static int ct_cpu_seq_show(struct seq_file *seq, void *v)
-{
-       unsigned int nr_conntracks = atomic_read(&ip_conntrack_count);
-       struct ip_conntrack_stat *st = v;
-
-       if (v == SEQ_START_TOKEN) {
-               seq_printf(seq, "entries  searched found new invalid ignore delete delete_list insert insert_failed drop early_drop icmp_error  expect_new expect_create expect_delete\n");
-               return 0;
-       }
-
-       seq_printf(seq, "%08x  %08x %08x %08x %08x %08x %08x %08x "
-                       "%08x %08x %08x %08x %08x  %08x %08x %08x \n",
-                  nr_conntracks,
-                  st->searched,
-                  st->found,
-                  st->new,
-                  st->invalid,
-                  st->ignore,
-                  st->delete,
-                  st->delete_list,
-                  st->insert,
-                  st->insert_failed,
-                  st->drop,
-                  st->early_drop,
-                  st->error,
-
-                  st->expect_new,
-                  st->expect_create,
-                  st->expect_delete
-               );
-       return 0;
-}
-
-static struct seq_operations ct_cpu_seq_ops = {
-       .start  = ct_cpu_seq_start,
-       .next   = ct_cpu_seq_next,
-       .stop   = ct_cpu_seq_stop,
-       .show   = ct_cpu_seq_show,
-};
-
-static int ct_cpu_seq_open(struct inode *inode, struct file *file)
-{
-       return seq_open(file, &ct_cpu_seq_ops);
-}
-
-static const struct file_operations ct_cpu_seq_fops = {
-       .owner   = THIS_MODULE,
-       .open    = ct_cpu_seq_open,
-       .read    = seq_read,
-       .llseek  = seq_lseek,
-       .release = seq_release_private,
-};
-#endif
-
-static unsigned int ip_confirm(unsigned int hooknum,
-                              struct sk_buff **pskb,
-                              const struct net_device *in,
-                              const struct net_device *out,
-                              int (*okfn)(struct sk_buff *))
-{
-       /* We've seen it coming out the other side: confirm it */
-       return ip_conntrack_confirm(pskb);
-}
-
-static unsigned int ip_conntrack_help(unsigned int hooknum,
-                                     struct sk_buff **pskb,
-                                     const struct net_device *in,
-                                     const struct net_device *out,
-                                     int (*okfn)(struct sk_buff *))
-{
-       struct ip_conntrack *ct;
-       enum ip_conntrack_info ctinfo;
-
-       /* This is where we call the helper: as the packet goes out. */
-       ct = ip_conntrack_get(*pskb, &ctinfo);
-       if (ct && ct->helper && ctinfo != IP_CT_RELATED + IP_CT_IS_REPLY) {
-               unsigned int ret;
-               ret = ct->helper->help(pskb, ct, ctinfo);
-               if (ret != NF_ACCEPT)
-                       return ret;
-       }
-       return NF_ACCEPT;
-}
-
-static unsigned int ip_conntrack_defrag(unsigned int hooknum,
-                                       struct sk_buff **pskb,
-                                       const struct net_device *in,
-                                       const struct net_device *out,
-                                       int (*okfn)(struct sk_buff *))
-{
-#if !defined(CONFIG_IP_NF_NAT) && !defined(CONFIG_IP_NF_NAT_MODULE)
-       /* Previously seen (loopback)?  Ignore.  Do this before
-          fragment check. */
-       if ((*pskb)->nfct)
-               return NF_ACCEPT;
-#endif
-
-       /* Gather fragments. */
-       if ((*pskb)->nh.iph->frag_off & htons(IP_MF|IP_OFFSET)) {
-               *pskb = ip_ct_gather_frags(*pskb,
-                                          hooknum == NF_IP_PRE_ROUTING ?
-                                          IP_DEFRAG_CONNTRACK_IN :
-                                          IP_DEFRAG_CONNTRACK_OUT);
-               if (!*pskb)
-                       return NF_STOLEN;
-       }
-       return NF_ACCEPT;
-}
-
-static unsigned int ip_conntrack_local(unsigned int hooknum,
-                                      struct sk_buff **pskb,
-                                      const struct net_device *in,
-                                      const struct net_device *out,
-                                      int (*okfn)(struct sk_buff *))
-{
-       /* root is playing with raw sockets. */
-       if ((*pskb)->len < sizeof(struct iphdr)
-           || (*pskb)->nh.iph->ihl * 4 < sizeof(struct iphdr)) {
-               if (net_ratelimit())
-                       printk("ipt_hook: happy cracking.\n");
-               return NF_ACCEPT;
-       }
-       return ip_conntrack_in(hooknum, pskb, in, out, okfn);
-}
-
-/* Connection tracking may drop packets, but never alters them, so
-   make it the first hook. */
-static struct nf_hook_ops ip_conntrack_ops[] = {
-       {
-               .hook           = ip_conntrack_defrag,
-               .owner          = THIS_MODULE,
-               .pf             = PF_INET,
-               .hooknum        = NF_IP_PRE_ROUTING,
-               .priority       = NF_IP_PRI_CONNTRACK_DEFRAG,
-       },
-       {
-               .hook           = ip_conntrack_in,
-               .owner          = THIS_MODULE,
-               .pf             = PF_INET,
-               .hooknum        = NF_IP_PRE_ROUTING,
-               .priority       = NF_IP_PRI_CONNTRACK,
-       },
-       {
-               .hook           = ip_conntrack_defrag,
-               .owner          = THIS_MODULE,
-               .pf             = PF_INET,
-               .hooknum        = NF_IP_LOCAL_OUT,
-               .priority       = NF_IP_PRI_CONNTRACK_DEFRAG,
-       },
-       {
-               .hook           = ip_conntrack_local,
-               .owner          = THIS_MODULE,
-               .pf             = PF_INET,
-               .hooknum        = NF_IP_LOCAL_OUT,
-               .priority       = NF_IP_PRI_CONNTRACK,
-       },
-       {
-               .hook           = ip_conntrack_help,
-               .owner          = THIS_MODULE,
-               .pf             = PF_INET,
-               .hooknum        = NF_IP_POST_ROUTING,
-               .priority       = NF_IP_PRI_CONNTRACK_HELPER,
-       },
-       {
-               .hook           = ip_conntrack_help,
-               .owner          = THIS_MODULE,
-               .pf             = PF_INET,
-               .hooknum        = NF_IP_LOCAL_IN,
-               .priority       = NF_IP_PRI_CONNTRACK_HELPER,
-       },
-       {
-               .hook           = ip_confirm,
-               .owner          = THIS_MODULE,
-               .pf             = PF_INET,
-               .hooknum        = NF_IP_POST_ROUTING,
-               .priority       = NF_IP_PRI_CONNTRACK_CONFIRM,
-       },
-       {
-               .hook           = ip_confirm,
-               .owner          = THIS_MODULE,
-               .pf             = PF_INET,
-               .hooknum        = NF_IP_LOCAL_IN,
-               .priority       = NF_IP_PRI_CONNTRACK_CONFIRM,
-       },
-};
-
-/* Sysctl support */
-
-int ip_conntrack_checksum __read_mostly = 1;
-
-#ifdef CONFIG_SYSCTL
-
-/* From ip_conntrack_core.c */
-extern int ip_conntrack_max;
-extern unsigned int ip_conntrack_htable_size;
-
-/* From ip_conntrack_proto_tcp.c */
-extern unsigned int ip_ct_tcp_timeout_syn_sent;
-extern unsigned int ip_ct_tcp_timeout_syn_recv;
-extern unsigned int ip_ct_tcp_timeout_established;
-extern unsigned int ip_ct_tcp_timeout_fin_wait;
-extern unsigned int ip_ct_tcp_timeout_close_wait;
-extern unsigned int ip_ct_tcp_timeout_last_ack;
-extern unsigned int ip_ct_tcp_timeout_time_wait;
-extern unsigned int ip_ct_tcp_timeout_close;
-extern unsigned int ip_ct_tcp_timeout_max_retrans;
-extern int ip_ct_tcp_loose;
-extern int ip_ct_tcp_be_liberal;
-extern int ip_ct_tcp_max_retrans;
-
-/* From ip_conntrack_proto_udp.c */
-extern unsigned int ip_ct_udp_timeout;
-extern unsigned int ip_ct_udp_timeout_stream;
-
-/* From ip_conntrack_proto_icmp.c */
-extern unsigned int ip_ct_icmp_timeout;
-
-/* From ip_conntrack_proto_generic.c */
-extern unsigned int ip_ct_generic_timeout;
-
-/* Log invalid packets of a given protocol */
-static int log_invalid_proto_min = 0;
-static int log_invalid_proto_max = 255;
-
-static struct ctl_table_header *ip_ct_sysctl_header;
-
-static ctl_table ip_ct_sysctl_table[] = {
-       {
-               .ctl_name       = NET_IPV4_NF_CONNTRACK_MAX,
-               .procname       = "ip_conntrack_max",
-               .data           = &ip_conntrack_max,
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = &proc_dointvec,
-       },
-       {
-               .ctl_name       = NET_IPV4_NF_CONNTRACK_COUNT,
-               .procname       = "ip_conntrack_count",
-               .data           = &ip_conntrack_count,
-               .maxlen         = sizeof(int),
-               .mode           = 0444,
-               .proc_handler   = &proc_dointvec,
-       },
-       {
-               .ctl_name       = NET_IPV4_NF_CONNTRACK_BUCKETS,
-               .procname       = "ip_conntrack_buckets",
-               .data           = &ip_conntrack_htable_size,
-               .maxlen         = sizeof(unsigned int),
-               .mode           = 0444,
-               .proc_handler   = &proc_dointvec,
-       },
-       {
-               .ctl_name       = NET_IPV4_NF_CONNTRACK_CHECKSUM,
-               .procname       = "ip_conntrack_checksum",
-               .data           = &ip_conntrack_checksum,
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = &proc_dointvec,
-       },
-       {
-               .ctl_name       = NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_SYN_SENT,
-               .procname       = "ip_conntrack_tcp_timeout_syn_sent",
-               .data           = &ip_ct_tcp_timeout_syn_sent,
-               .maxlen         = sizeof(unsigned int),
-               .mode           = 0644,
-               .proc_handler   = &proc_dointvec_jiffies,
-       },
-       {
-               .ctl_name       = NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_SYN_RECV,
-               .procname       = "ip_conntrack_tcp_timeout_syn_recv",
-               .data           = &ip_ct_tcp_timeout_syn_recv,
-               .maxlen         = sizeof(unsigned int),
-               .mode           = 0644,
-               .proc_handler   = &proc_dointvec_jiffies,
-       },
-       {
-               .ctl_name       = NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_ESTABLISHED,
-               .procname       = "ip_conntrack_tcp_timeout_established",
-               .data           = &ip_ct_tcp_timeout_established,
-               .maxlen         = sizeof(unsigned int),
-               .mode           = 0644,
-               .proc_handler   = &proc_dointvec_jiffies,
-       },
-       {
-               .ctl_name       = NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_FIN_WAIT,
-               .procname       = "ip_conntrack_tcp_timeout_fin_wait",
-               .data           = &ip_ct_tcp_timeout_fin_wait,
-               .maxlen         = sizeof(unsigned int),
-               .mode           = 0644,
-               .proc_handler   = &proc_dointvec_jiffies,
-       },
-       {
-               .ctl_name       = NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_CLOSE_WAIT,
-               .procname       = "ip_conntrack_tcp_timeout_close_wait",
-               .data           = &ip_ct_tcp_timeout_close_wait,
-               .maxlen         = sizeof(unsigned int),
-               .mode           = 0644,
-               .proc_handler   = &proc_dointvec_jiffies,
-       },
-       {
-               .ctl_name       = NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_LAST_ACK,
-               .procname       = "ip_conntrack_tcp_timeout_last_ack",
-               .data           = &ip_ct_tcp_timeout_last_ack,
-               .maxlen         = sizeof(unsigned int),
-               .mode           = 0644,
-               .proc_handler   = &proc_dointvec_jiffies,
-       },
-       {
-               .ctl_name       = NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_TIME_WAIT,
-               .procname       = "ip_conntrack_tcp_timeout_time_wait",
-               .data           = &ip_ct_tcp_timeout_time_wait,
-               .maxlen         = sizeof(unsigned int),
-               .mode           = 0644,
-               .proc_handler   = &proc_dointvec_jiffies,
-       },
-       {
-               .ctl_name       = NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_CLOSE,
-               .procname       = "ip_conntrack_tcp_timeout_close",
-               .data           = &ip_ct_tcp_timeout_close,
-               .maxlen         = sizeof(unsigned int),
-               .mode           = 0644,
-               .proc_handler   = &proc_dointvec_jiffies,
-       },
-       {
-               .ctl_name       = NET_IPV4_NF_CONNTRACK_UDP_TIMEOUT,
-               .procname       = "ip_conntrack_udp_timeout",
-               .data           = &ip_ct_udp_timeout,
-               .maxlen         = sizeof(unsigned int),
-               .mode           = 0644,
-               .proc_handler   = &proc_dointvec_jiffies,
-       },
-       {
-               .ctl_name       = NET_IPV4_NF_CONNTRACK_UDP_TIMEOUT_STREAM,
-               .procname       = "ip_conntrack_udp_timeout_stream",
-               .data           = &ip_ct_udp_timeout_stream,
-               .maxlen         = sizeof(unsigned int),
-               .mode           = 0644,
-               .proc_handler   = &proc_dointvec_jiffies,
-       },
-       {
-               .ctl_name       = NET_IPV4_NF_CONNTRACK_ICMP_TIMEOUT,
-               .procname       = "ip_conntrack_icmp_timeout",
-               .data           = &ip_ct_icmp_timeout,
-               .maxlen         = sizeof(unsigned int),
-               .mode           = 0644,
-               .proc_handler   = &proc_dointvec_jiffies,
-       },
-       {
-               .ctl_name       = NET_IPV4_NF_CONNTRACK_GENERIC_TIMEOUT,
-               .procname       = "ip_conntrack_generic_timeout",
-               .data           = &ip_ct_generic_timeout,
-               .maxlen         = sizeof(unsigned int),
-               .mode           = 0644,
-               .proc_handler   = &proc_dointvec_jiffies,
-       },
-       {
-               .ctl_name       = NET_IPV4_NF_CONNTRACK_LOG_INVALID,
-               .procname       = "ip_conntrack_log_invalid",
-               .data           = &ip_ct_log_invalid,
-               .maxlen         = sizeof(unsigned int),
-               .mode           = 0644,
-               .proc_handler   = &proc_dointvec_minmax,
-               .strategy       = &sysctl_intvec,
-               .extra1         = &log_invalid_proto_min,
-               .extra2         = &log_invalid_proto_max,
-       },
-       {
-               .ctl_name       = NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_MAX_RETRANS,
-               .procname       = "ip_conntrack_tcp_timeout_max_retrans",
-               .data           = &ip_ct_tcp_timeout_max_retrans,
-               .maxlen         = sizeof(unsigned int),
-               .mode           = 0644,
-               .proc_handler   = &proc_dointvec_jiffies,
-       },
-       {
-               .ctl_name       = NET_IPV4_NF_CONNTRACK_TCP_LOOSE,
-               .procname       = "ip_conntrack_tcp_loose",
-               .data           = &ip_ct_tcp_loose,
-               .maxlen         = sizeof(unsigned int),
-               .mode           = 0644,
-               .proc_handler   = &proc_dointvec,
-       },
-       {
-               .ctl_name       = NET_IPV4_NF_CONNTRACK_TCP_BE_LIBERAL,
-               .procname       = "ip_conntrack_tcp_be_liberal",
-               .data           = &ip_ct_tcp_be_liberal,
-               .maxlen         = sizeof(unsigned int),
-               .mode           = 0644,
-               .proc_handler   = &proc_dointvec,
-       },
-       {
-               .ctl_name       = NET_IPV4_NF_CONNTRACK_TCP_MAX_RETRANS,
-               .procname       = "ip_conntrack_tcp_max_retrans",
-               .data           = &ip_ct_tcp_max_retrans,
-               .maxlen         = sizeof(unsigned int),
-               .mode           = 0644,
-               .proc_handler   = &proc_dointvec,
-       },
-       { .ctl_name = 0 }
-};
-
-#define NET_IP_CONNTRACK_MAX 2089
-
-static ctl_table ip_ct_netfilter_table[] = {
-       {
-               .ctl_name       = NET_IPV4_NETFILTER,
-               .procname       = "netfilter",
-               .mode           = 0555,
-               .child          = ip_ct_sysctl_table,
-       },
-       {
-               .ctl_name       = NET_IP_CONNTRACK_MAX,
-               .procname       = "ip_conntrack_max",
-               .data           = &ip_conntrack_max,
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = &proc_dointvec
-       },
-       { .ctl_name = 0 }
-};
-
-static ctl_table ip_ct_ipv4_table[] = {
-       {
-               .ctl_name       = NET_IPV4,
-               .procname       = "ipv4",
-               .mode           = 0555,
-               .child          = ip_ct_netfilter_table,
-       },
-       { .ctl_name = 0 }
-};
-
-static ctl_table ip_ct_net_table[] = {
-       {
-               .ctl_name       = CTL_NET,
-               .procname       = "net",
-               .mode           = 0555,
-               .child          = ip_ct_ipv4_table,
-       },
-       { .ctl_name = 0 }
-};
-
-EXPORT_SYMBOL(ip_ct_log_invalid);
-#endif /* CONFIG_SYSCTL */
-
-/* FIXME: Allow NULL functions and sub in pointers to generic for
-   them. --RR */
-int ip_conntrack_protocol_register(struct ip_conntrack_protocol *proto)
-{
-       int ret = 0;
-
-       write_lock_bh(&ip_conntrack_lock);
-       if (ip_ct_protos[proto->proto] != &ip_conntrack_generic_protocol) {
-               ret = -EBUSY;
-               goto out;
-       }
-       rcu_assign_pointer(ip_ct_protos[proto->proto], proto);
- out:
-       write_unlock_bh(&ip_conntrack_lock);
-       return ret;
-}
-
-void ip_conntrack_protocol_unregister(struct ip_conntrack_protocol *proto)
-{
-       write_lock_bh(&ip_conntrack_lock);
-       rcu_assign_pointer(ip_ct_protos[proto->proto],
-                          &ip_conntrack_generic_protocol);
-       write_unlock_bh(&ip_conntrack_lock);
-       synchronize_rcu();
-
-       /* Remove all contrack entries for this protocol */
-       ip_ct_iterate_cleanup(kill_proto, &proto->proto);
-}
-
-static int __init ip_conntrack_standalone_init(void)
-{
-#ifdef CONFIG_PROC_FS
-       struct proc_dir_entry *proc, *proc_exp, *proc_stat;
-#endif
-       int ret = 0;
-
-       ret = ip_conntrack_init();
-       if (ret < 0)
-               return ret;
-
-#ifdef CONFIG_PROC_FS
-       ret = -ENOMEM;
-       proc = proc_net_fops_create("ip_conntrack", 0440, &ct_file_ops);
-       if (!proc) goto cleanup_init;
-
-       proc_exp = proc_net_fops_create("ip_conntrack_expect", 0440,
-                                       &exp_file_ops);
-       if (!proc_exp) goto cleanup_proc;
-
-       proc_stat = create_proc_entry("ip_conntrack", S_IRUGO, proc_net_stat);
-       if (!proc_stat)
-               goto cleanup_proc_exp;
-
-       proc_stat->proc_fops = &ct_cpu_seq_fops;
-       proc_stat->owner = THIS_MODULE;
-#endif
-
-       ret = nf_register_hooks(ip_conntrack_ops, ARRAY_SIZE(ip_conntrack_ops));
-       if (ret < 0) {
-               printk("ip_conntrack: can't register hooks.\n");
-               goto cleanup_proc_stat;
-       }
-#ifdef CONFIG_SYSCTL
-       ip_ct_sysctl_header = register_sysctl_table(ip_ct_net_table);
-       if (ip_ct_sysctl_header == NULL) {
-               printk("ip_conntrack: can't register to sysctl.\n");
-               ret = -ENOMEM;
-               goto cleanup_hooks;
-       }
-#endif
-       return ret;
-
-#ifdef CONFIG_SYSCTL
- cleanup_hooks:
-       nf_unregister_hooks(ip_conntrack_ops, ARRAY_SIZE(ip_conntrack_ops));
-#endif
- cleanup_proc_stat:
-#ifdef CONFIG_PROC_FS
-       remove_proc_entry("ip_conntrack", proc_net_stat);
- cleanup_proc_exp:
-       proc_net_remove("ip_conntrack_expect");
- cleanup_proc:
-       proc_net_remove("ip_conntrack");
- cleanup_init:
-#endif /* CONFIG_PROC_FS */
-       ip_conntrack_cleanup();
-       return ret;
-}
-
-static void __exit ip_conntrack_standalone_fini(void)
-{
-       synchronize_net();
-#ifdef CONFIG_SYSCTL
-       unregister_sysctl_table(ip_ct_sysctl_header);
-#endif
-       nf_unregister_hooks(ip_conntrack_ops, ARRAY_SIZE(ip_conntrack_ops));
-#ifdef CONFIG_PROC_FS
-       remove_proc_entry("ip_conntrack", proc_net_stat);
-       proc_net_remove("ip_conntrack_expect");
-       proc_net_remove("ip_conntrack");
-#endif /* CONFIG_PROC_FS */
-       ip_conntrack_cleanup();
-}
-
-module_init(ip_conntrack_standalone_init);
-module_exit(ip_conntrack_standalone_fini);
-
-/* Some modules need us, but don't depend directly on any symbol.
-   They should call this. */
-void need_conntrack(void)
-{
-}
-
-#ifdef CONFIG_IP_NF_CONNTRACK_EVENTS
-EXPORT_SYMBOL_GPL(ip_conntrack_chain);
-EXPORT_SYMBOL_GPL(ip_conntrack_expect_chain);
-EXPORT_SYMBOL_GPL(ip_conntrack_register_notifier);
-EXPORT_SYMBOL_GPL(ip_conntrack_unregister_notifier);
-EXPORT_SYMBOL_GPL(__ip_ct_event_cache_init);
-EXPORT_PER_CPU_SYMBOL_GPL(ip_conntrack_ecache);
-#endif
-EXPORT_SYMBOL(ip_conntrack_protocol_register);
-EXPORT_SYMBOL(ip_conntrack_protocol_unregister);
-EXPORT_SYMBOL(ip_ct_get_tuple);
-EXPORT_SYMBOL(invert_tuplepr);
-EXPORT_SYMBOL(ip_conntrack_alter_reply);
-EXPORT_SYMBOL(ip_conntrack_destroyed);
-EXPORT_SYMBOL(need_conntrack);
-EXPORT_SYMBOL(ip_conntrack_helper_register);
-EXPORT_SYMBOL(ip_conntrack_helper_unregister);
-EXPORT_SYMBOL(ip_ct_iterate_cleanup);
-EXPORT_SYMBOL(__ip_ct_refresh_acct);
-
-EXPORT_SYMBOL(ip_conntrack_expect_alloc);
-EXPORT_SYMBOL(ip_conntrack_expect_put);
-EXPORT_SYMBOL_GPL(__ip_conntrack_expect_find);
-EXPORT_SYMBOL_GPL(ip_conntrack_expect_find_get);
-EXPORT_SYMBOL(ip_conntrack_expect_related);
-EXPORT_SYMBOL(ip_conntrack_unexpect_related);
-EXPORT_SYMBOL_GPL(ip_conntrack_expect_list);
-EXPORT_SYMBOL_GPL(ip_ct_unlink_expect);
-
-EXPORT_SYMBOL(ip_conntrack_tuple_taken);
-EXPORT_SYMBOL(ip_ct_gather_frags);
-EXPORT_SYMBOL(ip_conntrack_htable_size);
-EXPORT_SYMBOL(ip_conntrack_lock);
-EXPORT_SYMBOL(ip_conntrack_hash);
-EXPORT_SYMBOL(ip_conntrack_untracked);
-EXPORT_SYMBOL_GPL(ip_conntrack_find_get);
-#ifdef CONFIG_IP_NF_NAT_NEEDED
-EXPORT_SYMBOL(ip_conntrack_tcp_update);
-#endif
-
-EXPORT_SYMBOL_GPL(ip_conntrack_flush);
-EXPORT_SYMBOL_GPL(__ip_conntrack_find);
-
-EXPORT_SYMBOL_GPL(ip_conntrack_alloc);
-EXPORT_SYMBOL_GPL(ip_conntrack_free);
-EXPORT_SYMBOL_GPL(ip_conntrack_hash_insert);
-
-EXPORT_SYMBOL_GPL(ip_ct_remove_expectations);
-
-EXPORT_SYMBOL_GPL(ip_conntrack_helper_find_get);
-EXPORT_SYMBOL_GPL(ip_conntrack_helper_put);
-EXPORT_SYMBOL_GPL(__ip_conntrack_helper_find_byname);
-
-EXPORT_SYMBOL_GPL(ip_conntrack_proto_find_get);
-EXPORT_SYMBOL_GPL(ip_conntrack_proto_put);
-EXPORT_SYMBOL_GPL(__ip_conntrack_proto_find);
-EXPORT_SYMBOL_GPL(ip_conntrack_checksum);
-#if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \
-    defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE)
-EXPORT_SYMBOL_GPL(ip_ct_port_tuple_to_nfattr);
-EXPORT_SYMBOL_GPL(ip_ct_port_nfattr_to_tuple);
-#endif
diff --git a/net/ipv4/netfilter/ip_conntrack_tftp.c b/net/ipv4/netfilter/ip_conntrack_tftp.c
deleted file mode 100644 (file)
index 76e175e..0000000
+++ /dev/null
@@ -1,161 +0,0 @@
-/* (C) 2001-2002 Magnus Boden <mb@ozaba.mine.nu>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Version: 0.0.7
- *
- * Thu 21 Mar 2002 Harald Welte <laforge@gnumonks.org>
- *     - port to newnat API
- *
- */
-
-#include <linux/module.h>
-#include <linux/ip.h>
-#include <linux/udp.h>
-
-#include <linux/netfilter.h>
-#include <linux/netfilter_ipv4/ip_tables.h>
-#include <linux/netfilter_ipv4/ip_conntrack_helper.h>
-#include <linux/netfilter_ipv4/ip_conntrack_tftp.h>
-#include <linux/moduleparam.h>
-
-MODULE_AUTHOR("Magnus Boden <mb@ozaba.mine.nu>");
-MODULE_DESCRIPTION("tftp connection tracking helper");
-MODULE_LICENSE("GPL");
-
-#define MAX_PORTS 8
-static unsigned short ports[MAX_PORTS];
-static int ports_c;
-module_param_array(ports, ushort, &ports_c, 0400);
-MODULE_PARM_DESC(ports, "port numbers of tftp servers");
-
-#if 0
-#define DEBUGP(format, args...) printk("%s:%s:" format, \
-                                      __FILE__, __FUNCTION__ , ## args)
-#else
-#define DEBUGP(format, args...)
-#endif
-
-unsigned int (*ip_nat_tftp_hook)(struct sk_buff **pskb,
-                                enum ip_conntrack_info ctinfo,
-                                struct ip_conntrack_expect *exp);
-EXPORT_SYMBOL_GPL(ip_nat_tftp_hook);
-
-static int tftp_help(struct sk_buff **pskb,
-                    struct ip_conntrack *ct,
-                    enum ip_conntrack_info ctinfo)
-{
-       struct tftphdr _tftph, *tfh;
-       struct ip_conntrack_expect *exp;
-       unsigned int ret = NF_ACCEPT;
-       typeof(ip_nat_tftp_hook) ip_nat_tftp;
-
-       tfh = skb_header_pointer(*pskb,
-                                (*pskb)->nh.iph->ihl*4+sizeof(struct udphdr),
-                                sizeof(_tftph), &_tftph);
-       if (tfh == NULL)
-               return NF_ACCEPT;
-
-       switch (ntohs(tfh->opcode)) {
-       /* RRQ and WRQ works the same way */
-       case TFTP_OPCODE_READ:
-       case TFTP_OPCODE_WRITE:
-               DEBUGP("");
-               DUMP_TUPLE(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
-               DUMP_TUPLE(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
-
-               exp = ip_conntrack_expect_alloc(ct);
-               if (exp == NULL)
-                       return NF_DROP;
-
-               exp->tuple = ct->tuplehash[IP_CT_DIR_REPLY].tuple;
-               exp->mask.src.ip = htonl(0xffffffff);
-               exp->mask.src.u.udp.port = 0;
-               exp->mask.dst.ip = htonl(0xffffffff);
-               exp->mask.dst.u.udp.port = htons(0xffff);
-               exp->mask.dst.protonum = 0xff;
-               exp->expectfn = NULL;
-               exp->flags = 0;
-
-               DEBUGP("expect: ");
-               DUMP_TUPLE(&exp->tuple);
-               DUMP_TUPLE(&exp->mask);
-               ip_nat_tftp = rcu_dereference(ip_nat_tftp_hook);
-               if (ip_nat_tftp)
-                       ret = ip_nat_tftp(pskb, ctinfo, exp);
-               else if (ip_conntrack_expect_related(exp) != 0)
-                       ret = NF_DROP;
-               ip_conntrack_expect_put(exp);
-               break;
-       case TFTP_OPCODE_DATA:
-       case TFTP_OPCODE_ACK:
-               DEBUGP("Data/ACK opcode\n");
-               break;
-       case TFTP_OPCODE_ERROR:
-               DEBUGP("Error opcode\n");
-               break;
-       default:
-               DEBUGP("Unknown opcode\n");
-       }
-       return NF_ACCEPT;
-}
-
-static struct ip_conntrack_helper tftp[MAX_PORTS];
-static char tftp_names[MAX_PORTS][sizeof("tftp-65535")];
-
-static void ip_conntrack_tftp_fini(void)
-{
-       int i;
-
-       for (i = 0 ; i < ports_c; i++) {
-               DEBUGP("unregistering helper for port %d\n",
-                       ports[i]);
-               ip_conntrack_helper_unregister(&tftp[i]);
-       }
-}
-
-static int __init ip_conntrack_tftp_init(void)
-{
-       int i, ret;
-       char *tmpname;
-
-       if (ports_c == 0)
-               ports[ports_c++] = TFTP_PORT;
-
-       for (i = 0; i < ports_c; i++) {
-               /* Create helper structure */
-               memset(&tftp[i], 0, sizeof(struct ip_conntrack_helper));
-
-               tftp[i].tuple.dst.protonum = IPPROTO_UDP;
-               tftp[i].tuple.src.u.udp.port = htons(ports[i]);
-               tftp[i].mask.dst.protonum = 0xFF;
-               tftp[i].mask.src.u.udp.port = htons(0xFFFF);
-               tftp[i].max_expected = 1;
-               tftp[i].timeout = 5 * 60; /* 5 minutes */
-               tftp[i].me = THIS_MODULE;
-               tftp[i].help = tftp_help;
-
-               tmpname = &tftp_names[i][0];
-               if (ports[i] == TFTP_PORT)
-                       sprintf(tmpname, "tftp");
-               else
-                       sprintf(tmpname, "tftp-%d", i);
-               tftp[i].name = tmpname;
-
-               DEBUGP("port #%d: %d\n", i, ports[i]);
-
-               ret=ip_conntrack_helper_register(&tftp[i]);
-               if (ret) {
-                       printk("ERROR registering helper for port %d\n",
-                               ports[i]);
-                       ip_conntrack_tftp_fini();
-                       return(ret);
-               }
-       }
-       return(0);
-}
-
-module_init(ip_conntrack_tftp_init);
-module_exit(ip_conntrack_tftp_fini);
diff --git a/net/ipv4/netfilter/ip_nat_amanda.c b/net/ipv4/netfilter/ip_nat_amanda.c
deleted file mode 100644 (file)
index 85df1a9..0000000
+++ /dev/null
@@ -1,85 +0,0 @@
-/* Amanda extension for TCP NAT alteration.
- * (C) 2002 by Brian J. Murrell <netfilter@interlinx.bc.ca>
- * based on a copy of HW's ip_nat_irc.c as well as other modules
- *
- *      This program is free software; you can redistribute it and/or
- *      modify it under the terms of the GNU General Public License
- *      as published by the Free Software Foundation; either version
- *      2 of the License, or (at your option) any later version.
- *
- *     Module load syntax:
- *     insmod ip_nat_amanda.o
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/netfilter.h>
-#include <linux/skbuff.h>
-#include <linux/ip.h>
-#include <linux/udp.h>
-#include <net/tcp.h>
-#include <net/udp.h>
-
-#include <linux/netfilter_ipv4.h>
-#include <linux/netfilter_ipv4/ip_nat.h>
-#include <linux/netfilter_ipv4/ip_nat_helper.h>
-#include <linux/netfilter_ipv4/ip_conntrack_helper.h>
-#include <linux/netfilter_ipv4/ip_conntrack_amanda.h>
-
-
-MODULE_AUTHOR("Brian J. Murrell <netfilter@interlinx.bc.ca>");
-MODULE_DESCRIPTION("Amanda NAT helper");
-MODULE_LICENSE("GPL");
-
-static unsigned int help(struct sk_buff **pskb,
-                        enum ip_conntrack_info ctinfo,
-                        unsigned int matchoff,
-                        unsigned int matchlen,
-                        struct ip_conntrack_expect *exp)
-{
-       char buffer[sizeof("65535")];
-       u_int16_t port;
-       unsigned int ret;
-
-       /* Connection comes from client. */
-       exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port;
-       exp->dir = IP_CT_DIR_ORIGINAL;
-
-       /* When you see the packet, we need to NAT it the same as the
-        * this one (ie. same IP: it will be TCP and master is UDP). */
-       exp->expectfn = ip_nat_follow_master;
-
-       /* Try to get same port: if not, try to change it. */
-       for (port = ntohs(exp->saved_proto.tcp.port); port != 0; port++) {
-               exp->tuple.dst.u.tcp.port = htons(port);
-               if (ip_conntrack_expect_related(exp) == 0)
-                       break;
-       }
-
-       if (port == 0)
-               return NF_DROP;
-
-       sprintf(buffer, "%u", port);
-       ret = ip_nat_mangle_udp_packet(pskb, exp->master, ctinfo,
-                                      matchoff, matchlen,
-                                      buffer, strlen(buffer));
-       if (ret != NF_ACCEPT)
-               ip_conntrack_unexpect_related(exp);
-       return ret;
-}
-
-static void __exit ip_nat_amanda_fini(void)
-{
-       rcu_assign_pointer(ip_nat_amanda_hook, NULL);
-       synchronize_rcu();
-}
-
-static int __init ip_nat_amanda_init(void)
-{
-       BUG_ON(rcu_dereference(ip_nat_amanda_hook));
-       rcu_assign_pointer(ip_nat_amanda_hook, help);
-       return 0;
-}
-
-module_init(ip_nat_amanda_init);
-module_exit(ip_nat_amanda_fini);
diff --git a/net/ipv4/netfilter/ip_nat_core.c b/net/ipv4/netfilter/ip_nat_core.c
deleted file mode 100644 (file)
index 40737fd..0000000
+++ /dev/null
@@ -1,634 +0,0 @@
-/* NAT for netfilter; shared with compatibility layer. */
-
-/* (C) 1999-2001 Paul `Rusty' Russell
- * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/timer.h>
-#include <linux/skbuff.h>
-#include <linux/netfilter_ipv4.h>
-#include <linux/vmalloc.h>
-#include <net/checksum.h>
-#include <net/icmp.h>
-#include <net/ip.h>
-#include <net/tcp.h>  /* For tcp_prot in getorigdst */
-#include <linux/icmp.h>
-#include <linux/udp.h>
-#include <linux/jhash.h>
-
-#include <linux/netfilter_ipv4/ip_conntrack.h>
-#include <linux/netfilter_ipv4/ip_conntrack_core.h>
-#include <linux/netfilter_ipv4/ip_conntrack_protocol.h>
-#include <linux/netfilter_ipv4/ip_nat.h>
-#include <linux/netfilter_ipv4/ip_nat_protocol.h>
-#include <linux/netfilter_ipv4/ip_nat_core.h>
-#include <linux/netfilter_ipv4/ip_nat_helper.h>
-#include <linux/netfilter_ipv4/ip_conntrack_helper.h>
-
-#if 0
-#define DEBUGP printk
-#else
-#define DEBUGP(format, args...)
-#endif
-
-DEFINE_RWLOCK(ip_nat_lock);
-
-/* Calculated at init based on memory size */
-static unsigned int ip_nat_htable_size;
-
-static struct list_head *bysource;
-
-#define MAX_IP_NAT_PROTO 256
-static struct ip_nat_protocol *ip_nat_protos[MAX_IP_NAT_PROTO];
-
-static inline struct ip_nat_protocol *
-__ip_nat_proto_find(u_int8_t protonum)
-{
-       return rcu_dereference(ip_nat_protos[protonum]);
-}
-
-struct ip_nat_protocol *
-ip_nat_proto_find_get(u_int8_t protonum)
-{
-       struct ip_nat_protocol *p;
-
-       rcu_read_lock();
-       p = __ip_nat_proto_find(protonum);
-       if (!try_module_get(p->me))
-               p = &ip_nat_unknown_protocol;
-       rcu_read_unlock();
-
-       return p;
-}
-EXPORT_SYMBOL_GPL(ip_nat_proto_find_get);
-
-void
-ip_nat_proto_put(struct ip_nat_protocol *p)
-{
-       module_put(p->me);
-}
-EXPORT_SYMBOL_GPL(ip_nat_proto_put);
-
-/* We keep an extra hash for each conntrack, for fast searching. */
-static inline unsigned int
-hash_by_src(const struct ip_conntrack_tuple *tuple)
-{
-       /* Original src, to ensure we map it consistently if poss. */
-       return jhash_3words((__force u32)tuple->src.ip, tuple->src.u.all,
-                           tuple->dst.protonum, 0) % ip_nat_htable_size;
-}
-
-/* Noone using conntrack by the time this called. */
-static void ip_nat_cleanup_conntrack(struct ip_conntrack *conn)
-{
-       if (!(conn->status & IPS_NAT_DONE_MASK))
-               return;
-
-       write_lock_bh(&ip_nat_lock);
-       list_del(&conn->nat.info.bysource);
-       write_unlock_bh(&ip_nat_lock);
-}
-
-/* Is this tuple already taken? (not by us) */
-int
-ip_nat_used_tuple(const struct ip_conntrack_tuple *tuple,
-                 const struct ip_conntrack *ignored_conntrack)
-{
-       /* Conntrack tracking doesn't keep track of outgoing tuples; only
-          incoming ones.  NAT means they don't have a fixed mapping,
-          so we invert the tuple and look for the incoming reply.
-
-          We could keep a separate hash if this proves too slow. */
-       struct ip_conntrack_tuple reply;
-
-       invert_tuplepr(&reply, tuple);
-       return ip_conntrack_tuple_taken(&reply, ignored_conntrack);
-}
-EXPORT_SYMBOL(ip_nat_used_tuple);
-
-/* If we source map this tuple so reply looks like reply_tuple, will
- * that meet the constraints of range. */
-static int
-in_range(const struct ip_conntrack_tuple *tuple,
-        const struct ip_nat_range *range)
-{
-       struct ip_nat_protocol *proto;
-       int ret = 0;
-
-       /* If we are supposed to map IPs, then we must be in the
-          range specified, otherwise let this drag us onto a new src IP. */
-       if (range->flags & IP_NAT_RANGE_MAP_IPS) {
-               if (ntohl(tuple->src.ip) < ntohl(range->min_ip)
-                   || ntohl(tuple->src.ip) > ntohl(range->max_ip))
-                       return 0;
-       }
-
-       rcu_read_lock();
-       proto = __ip_nat_proto_find(tuple->dst.protonum);
-       if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED)
-           || proto->in_range(tuple, IP_NAT_MANIP_SRC,
-                              &range->min, &range->max))
-               ret = 1;
-       rcu_read_unlock();
-
-       return ret;
-}
-
-static inline int
-same_src(const struct ip_conntrack *ct,
-        const struct ip_conntrack_tuple *tuple)
-{
-       return (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum
-               == tuple->dst.protonum
-               && ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.ip
-               == tuple->src.ip
-               && ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.all
-               == tuple->src.u.all);
-}
-
-/* Only called for SRC manip */
-static int
-find_appropriate_src(const struct ip_conntrack_tuple *tuple,
-                    struct ip_conntrack_tuple *result,
-                    const struct ip_nat_range *range)
-{
-       unsigned int h = hash_by_src(tuple);
-       struct ip_conntrack *ct;
-
-       read_lock_bh(&ip_nat_lock);
-       list_for_each_entry(ct, &bysource[h], nat.info.bysource) {
-               if (same_src(ct, tuple)) {
-                       /* Copy source part from reply tuple. */
-                       invert_tuplepr(result,
-                                      &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
-                       result->dst = tuple->dst;
-
-                       if (in_range(result, range)) {
-                               read_unlock_bh(&ip_nat_lock);
-                               return 1;
-                       }
-               }
-       }
-       read_unlock_bh(&ip_nat_lock);
-       return 0;
-}
-
-/* For [FUTURE] fragmentation handling, we want the least-used
-   src-ip/dst-ip/proto triple.  Fairness doesn't come into it.  Thus
-   if the range specifies 1.2.3.4 ports 10000-10005 and 1.2.3.5 ports
-   1-65535, we don't do pro-rata allocation based on ports; we choose
-   the ip with the lowest src-ip/dst-ip/proto usage.
-*/
-static void
-find_best_ips_proto(struct ip_conntrack_tuple *tuple,
-                   const struct ip_nat_range *range,
-                   const struct ip_conntrack *conntrack,
-                   enum ip_nat_manip_type maniptype)
-{
-       __be32 *var_ipp;
-       /* Host order */
-       u_int32_t minip, maxip, j;
-
-       /* No IP mapping?  Do nothing. */
-       if (!(range->flags & IP_NAT_RANGE_MAP_IPS))
-               return;
-
-       if (maniptype == IP_NAT_MANIP_SRC)
-               var_ipp = &tuple->src.ip;
-       else
-               var_ipp = &tuple->dst.ip;
-
-       /* Fast path: only one choice. */
-       if (range->min_ip == range->max_ip) {
-               *var_ipp = range->min_ip;
-               return;
-       }
-
-       /* Hashing source and destination IPs gives a fairly even
-        * spread in practice (if there are a small number of IPs
-        * involved, there usually aren't that many connections
-        * anyway).  The consistency means that servers see the same
-        * client coming from the same IP (some Internet Banking sites
-        * like this), even across reboots. */
-       minip = ntohl(range->min_ip);
-       maxip = ntohl(range->max_ip);
-       j = jhash_2words((__force u32)tuple->src.ip, (__force u32)tuple->dst.ip, 0);
-       *var_ipp = htonl(minip + j % (maxip - minip + 1));
-}
-
-/* Manipulate the tuple into the range given.  For NF_IP_POST_ROUTING,
- * we change the source to map into the range.  For NF_IP_PRE_ROUTING
- * and NF_IP_LOCAL_OUT, we change the destination to map into the
- * range.  It might not be possible to get a unique tuple, but we try.
- * At worst (or if we race), we will end up with a final duplicate in
- * __ip_conntrack_confirm and drop the packet. */
-static void
-get_unique_tuple(struct ip_conntrack_tuple *tuple,
-                const struct ip_conntrack_tuple *orig_tuple,
-                const struct ip_nat_range *range,
-                struct ip_conntrack *conntrack,
-                enum ip_nat_manip_type maniptype)
-{
-       struct ip_nat_protocol *proto;
-
-       /* 1) If this srcip/proto/src-proto-part is currently mapped,
-          and that same mapping gives a unique tuple within the given
-          range, use that.
-
-          This is only required for source (ie. NAT/masq) mappings.
-          So far, we don't do local source mappings, so multiple
-          manips not an issue.  */
-       if (maniptype == IP_NAT_MANIP_SRC) {
-               if (find_appropriate_src(orig_tuple, tuple, range)) {
-                       DEBUGP("get_unique_tuple: Found current src map\n");
-                       if (!(range->flags & IP_NAT_RANGE_PROTO_RANDOM))
-                               if (!ip_nat_used_tuple(tuple, conntrack))
-                                       return;
-               }
-       }
-
-       /* 2) Select the least-used IP/proto combination in the given
-          range. */
-       *tuple = *orig_tuple;
-       find_best_ips_proto(tuple, range, conntrack, maniptype);
-
-       /* 3) The per-protocol part of the manip is made to map into
-          the range to make a unique tuple. */
-
-       rcu_read_lock();
-       proto = __ip_nat_proto_find(orig_tuple->dst.protonum);
-
-       /* Change protocol info to have some randomization */
-       if (range->flags & IP_NAT_RANGE_PROTO_RANDOM) {
-               proto->unique_tuple(tuple, range, maniptype, conntrack);
-               goto out;
-       }
-
-       /* Only bother mapping if it's not already in range and unique */
-       if ((!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED)
-            || proto->in_range(tuple, maniptype, &range->min, &range->max))
-           && !ip_nat_used_tuple(tuple, conntrack))
-               goto out;
-
-       /* Last change: get protocol to try to obtain unique tuple. */
-       proto->unique_tuple(tuple, range, maniptype, conntrack);
-out:
-       rcu_read_unlock();
-}
-
-unsigned int
-ip_nat_setup_info(struct ip_conntrack *conntrack,
-                 const struct ip_nat_range *range,
-                 unsigned int hooknum)
-{
-       struct ip_conntrack_tuple curr_tuple, new_tuple;
-       struct ip_nat_info *info = &conntrack->nat.info;
-       int have_to_hash = !(conntrack->status & IPS_NAT_DONE_MASK);
-       enum ip_nat_manip_type maniptype = HOOK2MANIP(hooknum);
-
-       IP_NF_ASSERT(hooknum == NF_IP_PRE_ROUTING
-                    || hooknum == NF_IP_POST_ROUTING
-                    || hooknum == NF_IP_LOCAL_IN
-                    || hooknum == NF_IP_LOCAL_OUT);
-       BUG_ON(ip_nat_initialized(conntrack, maniptype));
-
-       /* What we've got will look like inverse of reply. Normally
-          this is what is in the conntrack, except for prior
-          manipulations (future optimization: if num_manips == 0,
-          orig_tp =
-          conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple) */
-       invert_tuplepr(&curr_tuple,
-                      &conntrack->tuplehash[IP_CT_DIR_REPLY].tuple);
-
-       get_unique_tuple(&new_tuple, &curr_tuple, range, conntrack, maniptype);
-
-       if (!ip_ct_tuple_equal(&new_tuple, &curr_tuple)) {
-               struct ip_conntrack_tuple reply;
-
-               /* Alter conntrack table so will recognize replies. */
-               invert_tuplepr(&reply, &new_tuple);
-               ip_conntrack_alter_reply(conntrack, &reply);
-
-               /* Non-atomic: we own this at the moment. */
-               if (maniptype == IP_NAT_MANIP_SRC)
-                       conntrack->status |= IPS_SRC_NAT;
-               else
-                       conntrack->status |= IPS_DST_NAT;
-       }
-
-       /* Place in source hash if this is the first time. */
-       if (have_to_hash) {
-               unsigned int srchash
-                       = hash_by_src(&conntrack->tuplehash[IP_CT_DIR_ORIGINAL]
-                                     .tuple);
-               write_lock_bh(&ip_nat_lock);
-               list_add(&info->bysource, &bysource[srchash]);
-               write_unlock_bh(&ip_nat_lock);
-       }
-
-       /* It's done. */
-       if (maniptype == IP_NAT_MANIP_DST)
-               set_bit(IPS_DST_NAT_DONE_BIT, &conntrack->status);
-       else
-               set_bit(IPS_SRC_NAT_DONE_BIT, &conntrack->status);
-
-       return NF_ACCEPT;
-}
-EXPORT_SYMBOL(ip_nat_setup_info);
-
-/* Returns true if succeeded. */
-static int
-manip_pkt(u_int16_t proto,
-         struct sk_buff **pskb,
-         unsigned int iphdroff,
-         const struct ip_conntrack_tuple *target,
-         enum ip_nat_manip_type maniptype)
-{
-       struct iphdr *iph;
-       struct ip_nat_protocol *p;
-
-       if (!skb_make_writable(pskb, iphdroff + sizeof(*iph)))
-               return 0;
-
-       iph = (void *)(*pskb)->data + iphdroff;
-
-       /* Manipulate protcol part. */
-
-       /* rcu_read_lock()ed by nf_hook_slow */
-       p = __ip_nat_proto_find(proto);
-       if (!p->manip_pkt(pskb, iphdroff, target, maniptype))
-               return 0;
-
-       iph = (void *)(*pskb)->data + iphdroff;
-
-       if (maniptype == IP_NAT_MANIP_SRC) {
-               nf_csum_replace4(&iph->check, iph->saddr, target->src.ip);
-               iph->saddr = target->src.ip;
-       } else {
-               nf_csum_replace4(&iph->check, iph->daddr, target->dst.ip);
-               iph->daddr = target->dst.ip;
-       }
-       return 1;
-}
-
-/* Do packet manipulations according to ip_nat_setup_info. */
-unsigned int ip_nat_packet(struct ip_conntrack *ct,
-                          enum ip_conntrack_info ctinfo,
-                          unsigned int hooknum,
-                          struct sk_buff **pskb)
-{
-       enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
-       unsigned long statusbit;
-       enum ip_nat_manip_type mtype = HOOK2MANIP(hooknum);
-
-       if (mtype == IP_NAT_MANIP_SRC)
-               statusbit = IPS_SRC_NAT;
-       else
-               statusbit = IPS_DST_NAT;
-
-       /* Invert if this is reply dir. */
-       if (dir == IP_CT_DIR_REPLY)
-               statusbit ^= IPS_NAT_MASK;
-
-       /* Non-atomic: these bits don't change. */
-       if (ct->status & statusbit) {
-               struct ip_conntrack_tuple target;
-
-               /* We are aiming to look like inverse of other direction. */
-               invert_tuplepr(&target, &ct->tuplehash[!dir].tuple);
-
-               if (!manip_pkt(target.dst.protonum, pskb, 0, &target, mtype))
-                       return NF_DROP;
-       }
-       return NF_ACCEPT;
-}
-EXPORT_SYMBOL_GPL(ip_nat_packet);
-
-/* Dir is direction ICMP is coming from (opposite to packet it contains) */
-int ip_nat_icmp_reply_translation(struct ip_conntrack *ct,
-                                 enum ip_conntrack_info ctinfo,
-                                 unsigned int hooknum,
-                                 struct sk_buff **pskb)
-{
-       struct {
-               struct icmphdr icmp;
-               struct iphdr ip;
-       } *inside;
-       struct ip_conntrack_protocol *proto;
-       struct ip_conntrack_tuple inner, target;
-       int hdrlen = (*pskb)->nh.iph->ihl * 4;
-       enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
-       unsigned long statusbit;
-       enum ip_nat_manip_type manip = HOOK2MANIP(hooknum);
-
-       if (!skb_make_writable(pskb, hdrlen + sizeof(*inside)))
-               return 0;
-
-       inside = (void *)(*pskb)->data + (*pskb)->nh.iph->ihl*4;
-
-       /* We're actually going to mangle it beyond trivial checksum
-          adjustment, so make sure the current checksum is correct. */
-       if (nf_ip_checksum(*pskb, hooknum, hdrlen, 0))
-               return 0;
-
-       /* Must be RELATED */
-       IP_NF_ASSERT((*pskb)->nfctinfo == IP_CT_RELATED ||
-                    (*pskb)->nfctinfo == IP_CT_RELATED+IP_CT_IS_REPLY);
-
-       /* Redirects on non-null nats must be dropped, else they'll
-          start talking to each other without our translation, and be
-          confused... --RR */
-       if (inside->icmp.type == ICMP_REDIRECT) {
-               /* If NAT isn't finished, assume it and drop. */
-               if ((ct->status & IPS_NAT_DONE_MASK) != IPS_NAT_DONE_MASK)
-                       return 0;
-
-               if (ct->status & IPS_NAT_MASK)
-                       return 0;
-       }
-
-       DEBUGP("icmp_reply_translation: translating error %p manp %u dir %s\n",
-              *pskb, manip, dir == IP_CT_DIR_ORIGINAL ? "ORIG" : "REPLY");
-
-       /* rcu_read_lock()ed by nf_hook_slow */
-       proto = __ip_conntrack_proto_find(inside->ip.protocol);
-       if (!ip_ct_get_tuple(&inside->ip, *pskb, (*pskb)->nh.iph->ihl*4 +
-                            sizeof(struct icmphdr) + inside->ip.ihl*4,
-                            &inner, proto))
-               return 0;
-
-       /* Change inner back to look like incoming packet.  We do the
-          opposite manip on this hook to normal, because it might not
-          pass all hooks (locally-generated ICMP).  Consider incoming
-          packet: PREROUTING (DST manip), routing produces ICMP, goes
-          through POSTROUTING (which must correct the DST manip). */
-       if (!manip_pkt(inside->ip.protocol, pskb,
-                      (*pskb)->nh.iph->ihl*4
-                      + sizeof(inside->icmp),
-                      &ct->tuplehash[!dir].tuple,
-                      !manip))
-               return 0;
-
-       if ((*pskb)->ip_summed != CHECKSUM_PARTIAL) {
-               /* Reloading "inside" here since manip_pkt inner. */
-               inside = (void *)(*pskb)->data + (*pskb)->nh.iph->ihl*4;
-               inside->icmp.checksum = 0;
-               inside->icmp.checksum = csum_fold(skb_checksum(*pskb, hdrlen,
-                                                              (*pskb)->len - hdrlen,
-                                                              0));
-       }
-
-       /* Change outer to look the reply to an incoming packet
-        * (proto 0 means don't invert per-proto part). */
-       if (manip == IP_NAT_MANIP_SRC)
-               statusbit = IPS_SRC_NAT;
-       else
-               statusbit = IPS_DST_NAT;
-
-       /* Invert if this is reply dir. */
-       if (dir == IP_CT_DIR_REPLY)
-               statusbit ^= IPS_NAT_MASK;
-
-       if (ct->status & statusbit) {
-               invert_tuplepr(&target, &ct->tuplehash[!dir].tuple);
-               if (!manip_pkt(0, pskb, 0, &target, manip))
-                       return 0;
-       }
-
-       return 1;
-}
-EXPORT_SYMBOL_GPL(ip_nat_icmp_reply_translation);
-
-/* Protocol registration. */
-int ip_nat_protocol_register(struct ip_nat_protocol *proto)
-{
-       int ret = 0;
-
-       write_lock_bh(&ip_nat_lock);
-       if (ip_nat_protos[proto->protonum] != &ip_nat_unknown_protocol) {
-               ret = -EBUSY;
-               goto out;
-       }
-       rcu_assign_pointer(ip_nat_protos[proto->protonum], proto);
- out:
-       write_unlock_bh(&ip_nat_lock);
-       return ret;
-}
-EXPORT_SYMBOL(ip_nat_protocol_register);
-
-/* Noone stores the protocol anywhere; simply delete it. */
-void ip_nat_protocol_unregister(struct ip_nat_protocol *proto)
-{
-       write_lock_bh(&ip_nat_lock);
-       rcu_assign_pointer(ip_nat_protos[proto->protonum],
-                          &ip_nat_unknown_protocol);
-       write_unlock_bh(&ip_nat_lock);
-       synchronize_rcu();
-}
-EXPORT_SYMBOL(ip_nat_protocol_unregister);
-
-#if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \
-    defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE)
-int
-ip_nat_port_range_to_nfattr(struct sk_buff *skb,
-                           const struct ip_nat_range *range)
-{
-       NFA_PUT(skb, CTA_PROTONAT_PORT_MIN, sizeof(__be16),
-               &range->min.tcp.port);
-       NFA_PUT(skb, CTA_PROTONAT_PORT_MAX, sizeof(__be16),
-               &range->max.tcp.port);
-
-       return 0;
-
-nfattr_failure:
-       return -1;
-}
-
-int
-ip_nat_port_nfattr_to_range(struct nfattr *tb[], struct ip_nat_range *range)
-{
-       int ret = 0;
-
-       /* we have to return whether we actually parsed something or not */
-
-       if (tb[CTA_PROTONAT_PORT_MIN-1]) {
-               ret = 1;
-               range->min.tcp.port =
-                       *(__be16 *)NFA_DATA(tb[CTA_PROTONAT_PORT_MIN-1]);
-       }
-
-       if (!tb[CTA_PROTONAT_PORT_MAX-1]) {
-               if (ret)
-                       range->max.tcp.port = range->min.tcp.port;
-       } else {
-               ret = 1;
-               range->max.tcp.port =
-                       *(__be16 *)NFA_DATA(tb[CTA_PROTONAT_PORT_MAX-1]);
-       }
-
-       return ret;
-}
-EXPORT_SYMBOL_GPL(ip_nat_port_nfattr_to_range);
-EXPORT_SYMBOL_GPL(ip_nat_port_range_to_nfattr);
-#endif
-
-static int __init ip_nat_init(void)
-{
-       size_t i;
-
-       /* Leave them the same for the moment. */
-       ip_nat_htable_size = ip_conntrack_htable_size;
-
-       /* One vmalloc for both hash tables */
-       bysource = vmalloc(sizeof(struct list_head) * ip_nat_htable_size);
-       if (!bysource)
-               return -ENOMEM;
-
-       /* Sew in builtin protocols. */
-       write_lock_bh(&ip_nat_lock);
-       for (i = 0; i < MAX_IP_NAT_PROTO; i++)
-               rcu_assign_pointer(ip_nat_protos[i], &ip_nat_unknown_protocol);
-       rcu_assign_pointer(ip_nat_protos[IPPROTO_TCP], &ip_nat_protocol_tcp);
-       rcu_assign_pointer(ip_nat_protos[IPPROTO_UDP], &ip_nat_protocol_udp);
-       rcu_assign_pointer(ip_nat_protos[IPPROTO_ICMP], &ip_nat_protocol_icmp);
-       write_unlock_bh(&ip_nat_lock);
-
-       for (i = 0; i < ip_nat_htable_size; i++) {
-               INIT_LIST_HEAD(&bysource[i]);
-       }
-
-       /* FIXME: Man, this is a hack.  <SIGH> */
-       IP_NF_ASSERT(rcu_dereference(ip_conntrack_destroyed) == NULL);
-       rcu_assign_pointer(ip_conntrack_destroyed, ip_nat_cleanup_conntrack);
-
-       /* Initialize fake conntrack so that NAT will skip it */
-       ip_conntrack_untracked.status |= IPS_NAT_DONE_MASK;
-       return 0;
-}
-
-/* Clear NAT section of all conntracks, in case we're loaded again. */
-static int clean_nat(struct ip_conntrack *i, void *data)
-{
-       memset(&i->nat, 0, sizeof(i->nat));
-       i->status &= ~(IPS_NAT_MASK | IPS_NAT_DONE_MASK | IPS_SEQ_ADJUST);
-       return 0;
-}
-
-static void __exit ip_nat_cleanup(void)
-{
-       ip_ct_iterate_cleanup(&clean_nat, NULL);
-       rcu_assign_pointer(ip_conntrack_destroyed, NULL);
-       synchronize_rcu();
-       vfree(bysource);
-}
-
-MODULE_LICENSE("GPL");
-
-module_init(ip_nat_init);
-module_exit(ip_nat_cleanup);
diff --git a/net/ipv4/netfilter/ip_nat_ftp.c b/net/ipv4/netfilter/ip_nat_ftp.c
deleted file mode 100644 (file)
index 32e01d8..0000000
+++ /dev/null
@@ -1,180 +0,0 @@
-/* FTP extension for TCP NAT alteration. */
-
-/* (C) 1999-2001 Paul `Rusty' Russell
- * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/netfilter_ipv4.h>
-#include <linux/ip.h>
-#include <linux/tcp.h>
-#include <linux/moduleparam.h>
-#include <net/tcp.h>
-#include <linux/netfilter_ipv4/ip_nat.h>
-#include <linux/netfilter_ipv4/ip_nat_helper.h>
-#include <linux/netfilter_ipv4/ip_nat_rule.h>
-#include <linux/netfilter_ipv4/ip_conntrack_ftp.h>
-#include <linux/netfilter_ipv4/ip_conntrack_helper.h>
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Rusty Russell <rusty@rustcorp.com.au>");
-MODULE_DESCRIPTION("ftp NAT helper");
-
-#if 0
-#define DEBUGP printk
-#else
-#define DEBUGP(format, args...)
-#endif
-
-/* FIXME: Time out? --RR */
-
-static int
-mangle_rfc959_packet(struct sk_buff **pskb,
-                    __be32 newip,
-                    u_int16_t port,
-                    unsigned int matchoff,
-                    unsigned int matchlen,
-                    struct ip_conntrack *ct,
-                    enum ip_conntrack_info ctinfo,
-                    u32 *seq)
-{
-       char buffer[sizeof("nnn,nnn,nnn,nnn,nnn,nnn")];
-
-       sprintf(buffer, "%u,%u,%u,%u,%u,%u",
-               NIPQUAD(newip), port>>8, port&0xFF);
-
-       DEBUGP("calling ip_nat_mangle_tcp_packet\n");
-
-       *seq += strlen(buffer) - matchlen;
-       return ip_nat_mangle_tcp_packet(pskb, ct, ctinfo, matchoff,
-                                       matchlen, buffer, strlen(buffer));
-}
-
-/* |1|132.235.1.2|6275| */
-static int
-mangle_eprt_packet(struct sk_buff **pskb,
-                  __be32 newip,
-                  u_int16_t port,
-                  unsigned int matchoff,
-                  unsigned int matchlen,
-                  struct ip_conntrack *ct,
-                  enum ip_conntrack_info ctinfo,
-                  u32 *seq)
-{
-       char buffer[sizeof("|1|255.255.255.255|65535|")];
-
-       sprintf(buffer, "|1|%u.%u.%u.%u|%u|", NIPQUAD(newip), port);
-
-       DEBUGP("calling ip_nat_mangle_tcp_packet\n");
-
-       *seq += strlen(buffer) - matchlen;
-       return ip_nat_mangle_tcp_packet(pskb, ct, ctinfo, matchoff,
-                                       matchlen, buffer, strlen(buffer));
-}
-
-/* |1|132.235.1.2|6275| */
-static int
-mangle_epsv_packet(struct sk_buff **pskb,
-                  __be32 newip,
-                  u_int16_t port,
-                  unsigned int matchoff,
-                  unsigned int matchlen,
-                  struct ip_conntrack *ct,
-                  enum ip_conntrack_info ctinfo,
-                  u32 *seq)
-{
-       char buffer[sizeof("|||65535|")];
-
-       sprintf(buffer, "|||%u|", port);
-
-       DEBUGP("calling ip_nat_mangle_tcp_packet\n");
-
-       *seq += strlen(buffer) - matchlen;
-       return ip_nat_mangle_tcp_packet(pskb, ct, ctinfo, matchoff,
-                                       matchlen, buffer, strlen(buffer));
-}
-
-static int (*mangle[])(struct sk_buff **, __be32, u_int16_t,
-                    unsigned int,
-                    unsigned int,
-                    struct ip_conntrack *,
-                    enum ip_conntrack_info,
-                    u32 *seq)
-= { [IP_CT_FTP_PORT] = mangle_rfc959_packet,
-    [IP_CT_FTP_PASV] = mangle_rfc959_packet,
-    [IP_CT_FTP_EPRT] = mangle_eprt_packet,
-    [IP_CT_FTP_EPSV] = mangle_epsv_packet
-};
-
-/* So, this packet has hit the connection tracking matching code.
-   Mangle it, and change the expectation to match the new version. */
-static unsigned int ip_nat_ftp(struct sk_buff **pskb,
-                              enum ip_conntrack_info ctinfo,
-                              enum ip_ct_ftp_type type,
-                              unsigned int matchoff,
-                              unsigned int matchlen,
-                              struct ip_conntrack_expect *exp,
-                              u32 *seq)
-{
-       __be32 newip;
-       u_int16_t port;
-       int dir = CTINFO2DIR(ctinfo);
-       struct ip_conntrack *ct = exp->master;
-
-       DEBUGP("FTP_NAT: type %i, off %u len %u\n", type, matchoff, matchlen);
-
-       /* Connection will come from wherever this packet goes, hence !dir */
-       newip = ct->tuplehash[!dir].tuple.dst.ip;
-       exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port;
-       exp->dir = !dir;
-
-       /* When you see the packet, we need to NAT it the same as the
-        * this one. */
-       exp->expectfn = ip_nat_follow_master;
-
-       /* Try to get same port: if not, try to change it. */
-       for (port = ntohs(exp->saved_proto.tcp.port); port != 0; port++) {
-               exp->tuple.dst.u.tcp.port = htons(port);
-               if (ip_conntrack_expect_related(exp) == 0)
-                       break;
-       }
-
-       if (port == 0)
-               return NF_DROP;
-
-       if (!mangle[type](pskb, newip, port, matchoff, matchlen, ct, ctinfo,
-                         seq)) {
-               ip_conntrack_unexpect_related(exp);
-               return NF_DROP;
-       }
-       return NF_ACCEPT;
-}
-
-static void __exit ip_nat_ftp_fini(void)
-{
-       rcu_assign_pointer(ip_nat_ftp_hook, NULL);
-       synchronize_rcu();
-}
-
-static int __init ip_nat_ftp_init(void)
-{
-       BUG_ON(rcu_dereference(ip_nat_ftp_hook));
-       rcu_assign_pointer(ip_nat_ftp_hook, ip_nat_ftp);
-       return 0;
-}
-
-/* Prior to 2.6.11, we had a ports param.  No longer, but don't break users. */
-static int warn_set(const char *val, struct kernel_param *kp)
-{
-       printk(KERN_INFO KBUILD_MODNAME
-              ": kernel >= 2.6.10 only uses 'ports' for conntrack modules\n");
-       return 0;
-}
-module_param_call(ports, warn_set, NULL, NULL, 0);
-
-module_init(ip_nat_ftp_init);
-module_exit(ip_nat_ftp_fini);
diff --git a/net/ipv4/netfilter/ip_nat_helper.c b/net/ipv4/netfilter/ip_nat_helper.c
deleted file mode 100644 (file)
index dc778cf..0000000
+++ /dev/null
@@ -1,436 +0,0 @@
-/* ip_nat_helper.c - generic support functions for NAT helpers
- *
- * (C) 2000-2002 Harald Welte <laforge@netfilter.org>
- * (C) 2003-2004 Netfilter Core Team <coreteam@netfilter.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- *     14 Jan 2002 Harald Welte <laforge@gnumonks.org>:
- *             - add support for SACK adjustment
- *     14 Mar 2002 Harald Welte <laforge@gnumonks.org>:
- *             - merge SACK support into newnat API
- *     16 Aug 2002 Brian J. Murrell <netfilter@interlinx.bc.ca>:
- *             - make ip_nat_resize_packet more generic (TCP and UDP)
- *             - add ip_nat_mangle_udp_packet
- */
-#include <linux/module.h>
-#include <linux/kmod.h>
-#include <linux/types.h>
-#include <linux/timer.h>
-#include <linux/skbuff.h>
-#include <linux/netfilter_ipv4.h>
-#include <net/checksum.h>
-#include <net/icmp.h>
-#include <net/ip.h>
-#include <net/tcp.h>
-#include <net/udp.h>
-
-#include <linux/netfilter_ipv4/ip_conntrack.h>
-#include <linux/netfilter_ipv4/ip_conntrack_helper.h>
-#include <linux/netfilter_ipv4/ip_nat.h>
-#include <linux/netfilter_ipv4/ip_nat_protocol.h>
-#include <linux/netfilter_ipv4/ip_nat_core.h>
-#include <linux/netfilter_ipv4/ip_nat_helper.h>
-
-#if 0
-#define DEBUGP printk
-#define DUMP_OFFSET(x) printk("offset_before=%d, offset_after=%d, correction_pos=%u\n", x->offset_before, x->offset_after, x->correction_pos);
-#else
-#define DEBUGP(format, args...)
-#define DUMP_OFFSET(x)
-#endif
-
-static DEFINE_SPINLOCK(ip_nat_seqofs_lock);
-
-/* Setup TCP sequence correction given this change at this sequence */
-static inline void
-adjust_tcp_sequence(u32 seq,
-                   int sizediff,
-                   struct ip_conntrack *ct,
-                   enum ip_conntrack_info ctinfo)
-{
-       int dir;
-       struct ip_nat_seq *this_way, *other_way;
-
-       DEBUGP("ip_nat_resize_packet: old_size = %u, new_size = %u\n",
-               (*skb)->len, new_size);
-
-       dir = CTINFO2DIR(ctinfo);
-
-       this_way = &ct->nat.info.seq[dir];
-       other_way = &ct->nat.info.seq[!dir];
-
-       DEBUGP("ip_nat_resize_packet: Seq_offset before: ");
-       DUMP_OFFSET(this_way);
-
-       spin_lock_bh(&ip_nat_seqofs_lock);
-
-       /* SYN adjust. If it's uninitialized, or this is after last
-        * correction, record it: we don't handle more than one
-        * adjustment in the window, but do deal with common case of a
-        * retransmit */
-       if (this_way->offset_before == this_way->offset_after
-           || before(this_way->correction_pos, seq)) {
-                   this_way->correction_pos = seq;
-                   this_way->offset_before = this_way->offset_after;
-                   this_way->offset_after += sizediff;
-       }
-       spin_unlock_bh(&ip_nat_seqofs_lock);
-
-       DEBUGP("ip_nat_resize_packet: Seq_offset after: ");
-       DUMP_OFFSET(this_way);
-}
-
-/* Frobs data inside this packet, which is linear. */
-static void mangle_contents(struct sk_buff *skb,
-                           unsigned int dataoff,
-                           unsigned int match_offset,
-                           unsigned int match_len,
-                           const char *rep_buffer,
-                           unsigned int rep_len)
-{
-       unsigned char *data;
-
-       BUG_ON(skb_is_nonlinear(skb));
-       data = (unsigned char *)skb->nh.iph + dataoff;
-
-       /* move post-replacement */
-       memmove(data + match_offset + rep_len,
-               data + match_offset + match_len,
-               skb->tail - (data + match_offset + match_len));
-
-       /* insert data from buffer */
-       memcpy(data + match_offset, rep_buffer, rep_len);
-
-       /* update skb info */
-       if (rep_len > match_len) {
-               DEBUGP("ip_nat_mangle_packet: Extending packet by "
-                       "%u from %u bytes\n", rep_len - match_len,
-                      skb->len);
-               skb_put(skb, rep_len - match_len);
-       } else {
-               DEBUGP("ip_nat_mangle_packet: Shrinking packet from "
-                       "%u from %u bytes\n", match_len - rep_len,
-                      skb->len);
-               __skb_trim(skb, skb->len + rep_len - match_len);
-       }
-
-       /* fix IP hdr checksum information */
-       skb->nh.iph->tot_len = htons(skb->len);
-       ip_send_check(skb->nh.iph);
-}
-
-/* Unusual, but possible case. */
-static int enlarge_skb(struct sk_buff **pskb, unsigned int extra)
-{
-       struct sk_buff *nskb;
-
-       if ((*pskb)->len + extra > 65535)
-               return 0;
-
-       nskb = skb_copy_expand(*pskb, skb_headroom(*pskb), extra, GFP_ATOMIC);
-       if (!nskb)
-               return 0;
-
-       /* Transfer socket to new skb. */
-       if ((*pskb)->sk)
-               skb_set_owner_w(nskb, (*pskb)->sk);
-       kfree_skb(*pskb);
-       *pskb = nskb;
-       return 1;
-}
-
-/* Generic function for mangling variable-length address changes inside
- * NATed TCP connections (like the PORT XXX,XXX,XXX,XXX,XXX,XXX
- * command in FTP).
- *
- * Takes care about all the nasty sequence number changes, checksumming,
- * skb enlargement, ...
- *
- * */
-int
-ip_nat_mangle_tcp_packet(struct sk_buff **pskb,
-                        struct ip_conntrack *ct,
-                        enum ip_conntrack_info ctinfo,
-                        unsigned int match_offset,
-                        unsigned int match_len,
-                        const char *rep_buffer,
-                        unsigned int rep_len)
-{
-       struct iphdr *iph;
-       struct tcphdr *tcph;
-       int oldlen, datalen;
-
-       if (!skb_make_writable(pskb, (*pskb)->len))
-               return 0;
-
-       if (rep_len > match_len
-           && rep_len - match_len > skb_tailroom(*pskb)
-           && !enlarge_skb(pskb, rep_len - match_len))
-               return 0;
-
-       SKB_LINEAR_ASSERT(*pskb);
-
-       iph = (*pskb)->nh.iph;
-       tcph = (void *)iph + iph->ihl*4;
-
-       oldlen = (*pskb)->len - iph->ihl*4;
-       mangle_contents(*pskb, iph->ihl*4 + tcph->doff*4,
-                       match_offset, match_len, rep_buffer, rep_len);
-
-       datalen = (*pskb)->len - iph->ihl*4;
-       if ((*pskb)->ip_summed != CHECKSUM_PARTIAL) {
-               tcph->check = 0;
-               tcph->check = tcp_v4_check(datalen,
-                                          iph->saddr, iph->daddr,
-                                          csum_partial((char *)tcph,
-                                                       datalen, 0));
-       } else
-               nf_proto_csum_replace2(&tcph->check, *pskb,
-                                       htons(oldlen), htons(datalen), 1);
-
-       if (rep_len != match_len) {
-               set_bit(IPS_SEQ_ADJUST_BIT, &ct->status);
-               adjust_tcp_sequence(ntohl(tcph->seq),
-                                   (int)rep_len - (int)match_len,
-                                   ct, ctinfo);
-               /* Tell TCP window tracking about seq change */
-               ip_conntrack_tcp_update(*pskb, ct, CTINFO2DIR(ctinfo));
-       }
-       return 1;
-}
-EXPORT_SYMBOL(ip_nat_mangle_tcp_packet);
-
-/* Generic function for mangling variable-length address changes inside
- * NATed UDP connections (like the CONNECT DATA XXXXX MESG XXXXX INDEX XXXXX
- * command in the Amanda protocol)
- *
- * Takes care about all the nasty sequence number changes, checksumming,
- * skb enlargement, ...
- *
- * XXX - This function could be merged with ip_nat_mangle_tcp_packet which
- *       should be fairly easy to do.
- */
-int
-ip_nat_mangle_udp_packet(struct sk_buff **pskb,
-                        struct ip_conntrack *ct,
-                        enum ip_conntrack_info ctinfo,
-                        unsigned int match_offset,
-                        unsigned int match_len,
-                        const char *rep_buffer,
-                        unsigned int rep_len)
-{
-       struct iphdr *iph;
-       struct udphdr *udph;
-       int datalen, oldlen;
-
-       /* UDP helpers might accidentally mangle the wrong packet */
-       iph = (*pskb)->nh.iph;
-       if ((*pskb)->len < iph->ihl*4 + sizeof(*udph) +
-                              match_offset + match_len)
-               return 0;
-
-       if (!skb_make_writable(pskb, (*pskb)->len))
-               return 0;
-
-       if (rep_len > match_len
-           && rep_len - match_len > skb_tailroom(*pskb)
-           && !enlarge_skb(pskb, rep_len - match_len))
-               return 0;
-
-       iph = (*pskb)->nh.iph;
-       udph = (void *)iph + iph->ihl*4;
-
-       oldlen = (*pskb)->len - iph->ihl*4;
-       mangle_contents(*pskb, iph->ihl*4 + sizeof(*udph),
-                       match_offset, match_len, rep_buffer, rep_len);
-
-       /* update the length of the UDP packet */
-       datalen = (*pskb)->len - iph->ihl*4;
-       udph->len = htons(datalen);
-
-       /* fix udp checksum if udp checksum was previously calculated */
-       if (!udph->check && (*pskb)->ip_summed != CHECKSUM_PARTIAL)
-               return 1;
-
-       if ((*pskb)->ip_summed != CHECKSUM_PARTIAL) {
-               udph->check = 0;
-               udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
-                                               datalen, IPPROTO_UDP,
-                                               csum_partial((char *)udph,
-                                                            datalen, 0));
-               if (!udph->check)
-                       udph->check = CSUM_MANGLED_0;
-       } else
-               nf_proto_csum_replace2(&udph->check, *pskb,
-                                       htons(oldlen), htons(datalen), 1);
-       return 1;
-}
-EXPORT_SYMBOL(ip_nat_mangle_udp_packet);
-
-/* Adjust one found SACK option including checksum correction */
-static void
-sack_adjust(struct sk_buff *skb,
-           struct tcphdr *tcph,
-           unsigned int sackoff,
-           unsigned int sackend,
-           struct ip_nat_seq *natseq)
-{
-       while (sackoff < sackend) {
-               struct tcp_sack_block_wire *sack;
-               __be32 new_start_seq, new_end_seq;
-
-               sack = (void *)skb->data + sackoff;
-               if (after(ntohl(sack->start_seq) - natseq->offset_before,
-                         natseq->correction_pos))
-                       new_start_seq = htonl(ntohl(sack->start_seq)
-                                       - natseq->offset_after);
-               else
-                       new_start_seq = htonl(ntohl(sack->start_seq)
-                                       - natseq->offset_before);
-
-               if (after(ntohl(sack->end_seq) - natseq->offset_before,
-                         natseq->correction_pos))
-                       new_end_seq = htonl(ntohl(sack->end_seq)
-                                     - natseq->offset_after);
-               else
-                       new_end_seq = htonl(ntohl(sack->end_seq)
-                                     - natseq->offset_before);
-
-               DEBUGP("sack_adjust: start_seq: %d->%d, end_seq: %d->%d\n",
-                       ntohl(sack->start_seq), new_start_seq,
-                       ntohl(sack->end_seq), new_end_seq);
-
-               nf_proto_csum_replace4(&tcph->check, skb,
-                                       sack->start_seq, new_start_seq, 0);
-               nf_proto_csum_replace4(&tcph->check, skb,
-                                       sack->end_seq, new_end_seq, 0);
-               sack->start_seq = new_start_seq;
-               sack->end_seq = new_end_seq;
-               sackoff += sizeof(*sack);
-       }
-}
-
-/* TCP SACK sequence number adjustment */
-static inline unsigned int
-ip_nat_sack_adjust(struct sk_buff **pskb,
-                  struct tcphdr *tcph,
-                  struct ip_conntrack *ct,
-                  enum ip_conntrack_info ctinfo)
-{
-       unsigned int dir, optoff, optend;
-
-       optoff = (*pskb)->nh.iph->ihl*4 + sizeof(struct tcphdr);
-       optend = (*pskb)->nh.iph->ihl*4 + tcph->doff*4;
-
-       if (!skb_make_writable(pskb, optend))
-               return 0;
-
-       dir = CTINFO2DIR(ctinfo);
-
-       while (optoff < optend) {
-               /* Usually: option, length. */
-               unsigned char *op = (*pskb)->data + optoff;
-
-               switch (op[0]) {
-               case TCPOPT_EOL:
-                       return 1;
-               case TCPOPT_NOP:
-                       optoff++;
-                       continue;
-               default:
-                       /* no partial options */
-                       if (optoff + 1 == optend
-                           || optoff + op[1] > optend
-                           || op[1] < 2)
-                               return 0;
-                       if (op[0] == TCPOPT_SACK
-                           && op[1] >= 2+TCPOLEN_SACK_PERBLOCK
-                           && ((op[1] - 2) % TCPOLEN_SACK_PERBLOCK) == 0)
-                               sack_adjust(*pskb, tcph, optoff+2,
-                                           optoff+op[1],
-                                           &ct->nat.info.seq[!dir]);
-                       optoff += op[1];
-               }
-       }
-       return 1;
-}
-
-/* TCP sequence number adjustment.  Returns 1 on success, 0 on failure */
-int
-ip_nat_seq_adjust(struct sk_buff **pskb,
-                 struct ip_conntrack *ct,
-                 enum ip_conntrack_info ctinfo)
-{
-       struct tcphdr *tcph;
-       int dir;
-       __be32 newseq, newack;
-       struct ip_nat_seq *this_way, *other_way;
-
-       dir = CTINFO2DIR(ctinfo);
-
-       this_way = &ct->nat.info.seq[dir];
-       other_way = &ct->nat.info.seq[!dir];
-
-       if (!skb_make_writable(pskb, (*pskb)->nh.iph->ihl*4+sizeof(*tcph)))
-               return 0;
-
-       tcph = (void *)(*pskb)->data + (*pskb)->nh.iph->ihl*4;
-       if (after(ntohl(tcph->seq), this_way->correction_pos))
-               newseq = htonl(ntohl(tcph->seq) + this_way->offset_after);
-       else
-               newseq = htonl(ntohl(tcph->seq) + this_way->offset_before);
-
-       if (after(ntohl(tcph->ack_seq) - other_way->offset_before,
-                 other_way->correction_pos))
-               newack = htonl(ntohl(tcph->ack_seq) - other_way->offset_after);
-       else
-               newack = htonl(ntohl(tcph->ack_seq) - other_way->offset_before);
-
-       nf_proto_csum_replace4(&tcph->check, *pskb, tcph->seq, newseq, 0);
-       nf_proto_csum_replace4(&tcph->check, *pskb, tcph->ack_seq, newack, 0);
-
-       DEBUGP("Adjusting sequence number from %u->%u, ack from %u->%u\n",
-               ntohl(tcph->seq), ntohl(newseq), ntohl(tcph->ack_seq),
-               ntohl(newack));
-
-       tcph->seq = newseq;
-       tcph->ack_seq = newack;
-
-       if (!ip_nat_sack_adjust(pskb, tcph, ct, ctinfo))
-               return 0;
-
-       ip_conntrack_tcp_update(*pskb, ct, dir);
-
-       return 1;
-}
-EXPORT_SYMBOL(ip_nat_seq_adjust);
-
-/* Setup NAT on this expected conntrack so it follows master. */
-/* If we fail to get a free NAT slot, we'll get dropped on confirm */
-void ip_nat_follow_master(struct ip_conntrack *ct,
-                         struct ip_conntrack_expect *exp)
-{
-       struct ip_nat_range range;
-
-       /* This must be a fresh one. */
-       BUG_ON(ct->status & IPS_NAT_DONE_MASK);
-
-       /* Change src to where master sends to */
-       range.flags = IP_NAT_RANGE_MAP_IPS;
-       range.min_ip = range.max_ip
-               = ct->master->tuplehash[!exp->dir].tuple.dst.ip;
-       /* hook doesn't matter, but it has to do source manip */
-       ip_nat_setup_info(ct, &range, NF_IP_POST_ROUTING);
-
-       /* For DST manip, map port here to where it's expected. */
-       range.flags = (IP_NAT_RANGE_MAP_IPS | IP_NAT_RANGE_PROTO_SPECIFIED);
-       range.min = range.max = exp->saved_proto;
-       range.min_ip = range.max_ip
-               = ct->master->tuplehash[!exp->dir].tuple.src.ip;
-       /* hook doesn't matter, but it has to do destination manip */
-       ip_nat_setup_info(ct, &range, NF_IP_PRE_ROUTING);
-}
-EXPORT_SYMBOL(ip_nat_follow_master);
diff --git a/net/ipv4/netfilter/ip_nat_helper_h323.c b/net/ipv4/netfilter/ip_nat_helper_h323.c
deleted file mode 100644 (file)
index bdc99ef..0000000
+++ /dev/null
@@ -1,611 +0,0 @@
-/*
- * H.323 extension for NAT alteration.
- *
- * Copyright (c) 2006 Jing Min Zhao <zhaojingmin@users.sourceforge.net>
- *
- * This source code is licensed under General Public License version 2.
- *
- * Based on the 'brute force' H.323 NAT module by
- * Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
- */
-
-#include <linux/module.h>
-#include <linux/netfilter_ipv4.h>
-#include <linux/netfilter.h>
-#include <linux/ip.h>
-#include <linux/tcp.h>
-#include <linux/moduleparam.h>
-#include <net/tcp.h>
-#include <linux/netfilter_ipv4/ip_nat.h>
-#include <linux/netfilter_ipv4/ip_nat_helper.h>
-#include <linux/netfilter_ipv4/ip_nat_rule.h>
-#include <linux/netfilter_ipv4/ip_conntrack_tuple.h>
-#include <linux/netfilter_ipv4/ip_conntrack_h323.h>
-#include <linux/netfilter_ipv4/ip_conntrack_helper.h>
-
-#if 0
-#define DEBUGP printk
-#else
-#define DEBUGP(format, args...)
-#endif
-
-/****************************************************************************/
-static int set_addr(struct sk_buff **pskb,
-                   unsigned char **data, int dataoff,
-                   unsigned int addroff, __be32 ip, u_int16_t port)
-{
-       enum ip_conntrack_info ctinfo;
-       struct ip_conntrack *ct = ip_conntrack_get(*pskb, &ctinfo);
-       struct {
-               __be32 ip;
-               __be16 port;
-       } __attribute__ ((__packed__)) buf;
-       struct tcphdr _tcph, *th;
-
-       buf.ip = ip;
-       buf.port = htons(port);
-       addroff += dataoff;
-
-       if ((*pskb)->nh.iph->protocol == IPPROTO_TCP) {
-               if (!ip_nat_mangle_tcp_packet(pskb, ct, ctinfo,
-                                             addroff, sizeof(buf),
-                                             (char *) &buf, sizeof(buf))) {
-                       if (net_ratelimit())
-                               printk("ip_nat_h323: ip_nat_mangle_tcp_packet"
-                                      " error\n");
-                       return -1;
-               }
-
-               /* Relocate data pointer */
-               th = skb_header_pointer(*pskb, (*pskb)->nh.iph->ihl * 4,
-                                       sizeof(_tcph), &_tcph);
-               if (th == NULL)
-                       return -1;
-               *data = (*pskb)->data + (*pskb)->nh.iph->ihl * 4 +
-                   th->doff * 4 + dataoff;
-       } else {
-               if (!ip_nat_mangle_udp_packet(pskb, ct, ctinfo,
-                                             addroff, sizeof(buf),
-                                             (char *) &buf, sizeof(buf))) {
-                       if (net_ratelimit())
-                               printk("ip_nat_h323: ip_nat_mangle_udp_packet"
-                                      " error\n");
-                       return -1;
-               }
-               /* ip_nat_mangle_udp_packet uses skb_make_writable() to copy
-                * or pull everything in a linear buffer, so we can safely
-                * use the skb pointers now */
-               *data = (*pskb)->data + (*pskb)->nh.iph->ihl * 4 +
-                   sizeof(struct udphdr);
-       }
-
-       return 0;
-}
-
-/****************************************************************************/
-static int set_h225_addr(struct sk_buff **pskb,
-                        unsigned char **data, int dataoff,
-                        TransportAddress * addr,
-                        __be32 ip, u_int16_t port)
-{
-       return set_addr(pskb, data, dataoff, addr->ipAddress.ip, ip, port);
-}
-
-/****************************************************************************/
-static int set_h245_addr(struct sk_buff **pskb,
-                        unsigned char **data, int dataoff,
-                        H245_TransportAddress * addr,
-                        __be32 ip, u_int16_t port)
-{
-       return set_addr(pskb, data, dataoff,
-                       addr->unicastAddress.iPAddress.network, ip, port);
-}
-
-/****************************************************************************/
-static int set_sig_addr(struct sk_buff **pskb, struct ip_conntrack *ct,
-                       enum ip_conntrack_info ctinfo,
-                       unsigned char **data,
-                       TransportAddress * addr, int count)
-{
-       struct ip_ct_h323_master *info = &ct->help.ct_h323_info;
-       int dir = CTINFO2DIR(ctinfo);
-       int i;
-       __be32 ip;
-       u_int16_t port;
-
-       for (i = 0; i < count; i++) {
-               if (get_h225_addr(*data, &addr[i], &ip, &port)) {
-                       if (ip == ct->tuplehash[dir].tuple.src.ip &&
-                           port == info->sig_port[dir]) {
-                               /* GW->GK */
-
-                               /* Fix for Gnomemeeting */
-                               if (i > 0 &&
-                                   get_h225_addr(*data, &addr[0],
-                                                 &ip, &port) &&
-                                   (ntohl(ip) & 0xff000000) == 0x7f000000)
-                                       i = 0;
-
-                               DEBUGP
-                                   ("ip_nat_ras: set signal address "
-                                    "%u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n",
-                                    NIPQUAD(ip), port,
-                                    NIPQUAD(ct->tuplehash[!dir].tuple.dst.
-                                            ip), info->sig_port[!dir]);
-                               return set_h225_addr(pskb, data, 0, &addr[i],
-                                                    ct->tuplehash[!dir].
-                                                    tuple.dst.ip,
-                                                    info->sig_port[!dir]);
-                       } else if (ip == ct->tuplehash[dir].tuple.dst.ip &&
-                                  port == info->sig_port[dir]) {
-                               /* GK->GW */
-                               DEBUGP
-                                   ("ip_nat_ras: set signal address "
-                                    "%u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n",
-                                    NIPQUAD(ip), port,
-                                    NIPQUAD(ct->tuplehash[!dir].tuple.src.
-                                            ip), info->sig_port[!dir]);
-                               return set_h225_addr(pskb, data, 0, &addr[i],
-                                                    ct->tuplehash[!dir].
-                                                    tuple.src.ip,
-                                                    info->sig_port[!dir]);
-                       }
-               }
-       }
-
-       return 0;
-}
-
-/****************************************************************************/
-static int set_ras_addr(struct sk_buff **pskb, struct ip_conntrack *ct,
-                       enum ip_conntrack_info ctinfo,
-                       unsigned char **data,
-                       TransportAddress * addr, int count)
-{
-       int dir = CTINFO2DIR(ctinfo);
-       int i;
-       __be32 ip;
-       u_int16_t port;
-
-       for (i = 0; i < count; i++) {
-               if (get_h225_addr(*data, &addr[i], &ip, &port) &&
-                   ip == ct->tuplehash[dir].tuple.src.ip &&
-                   port == ntohs(ct->tuplehash[dir].tuple.src.u.udp.port)) {
-                       DEBUGP("ip_nat_ras: set rasAddress "
-                              "%u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n",
-                              NIPQUAD(ip), port,
-                              NIPQUAD(ct->tuplehash[!dir].tuple.dst.ip),
-                              ntohs(ct->tuplehash[!dir].tuple.dst.u.udp.
-                                    port));
-                       return set_h225_addr(pskb, data, 0, &addr[i],
-                                            ct->tuplehash[!dir].tuple.dst.ip,
-                                            ntohs(ct->tuplehash[!dir].tuple.
-                                                  dst.u.udp.port));
-               }
-       }
-
-       return 0;
-}
-
-/****************************************************************************/
-static int nat_rtp_rtcp(struct sk_buff **pskb, struct ip_conntrack *ct,
-                       enum ip_conntrack_info ctinfo,
-                       unsigned char **data, int dataoff,
-                       H245_TransportAddress * addr,
-                       u_int16_t port, u_int16_t rtp_port,
-                       struct ip_conntrack_expect *rtp_exp,
-                       struct ip_conntrack_expect *rtcp_exp)
-{
-       struct ip_ct_h323_master *info = &ct->help.ct_h323_info;
-       int dir = CTINFO2DIR(ctinfo);
-       int i;
-       u_int16_t nated_port;
-
-       /* Set expectations for NAT */
-       rtp_exp->saved_proto.udp.port = rtp_exp->tuple.dst.u.udp.port;
-       rtp_exp->expectfn = ip_nat_follow_master;
-       rtp_exp->dir = !dir;
-       rtcp_exp->saved_proto.udp.port = rtcp_exp->tuple.dst.u.udp.port;
-       rtcp_exp->expectfn = ip_nat_follow_master;
-       rtcp_exp->dir = !dir;
-
-       /* Lookup existing expects */
-       for (i = 0; i < H323_RTP_CHANNEL_MAX; i++) {
-               if (info->rtp_port[i][dir] == rtp_port) {
-                       /* Expected */
-
-                       /* Use allocated ports first. This will refresh
-                        * the expects */
-                       rtp_exp->tuple.dst.u.udp.port =
-                           htons(info->rtp_port[i][dir]);
-                       rtcp_exp->tuple.dst.u.udp.port =
-                           htons(info->rtp_port[i][dir] + 1);
-                       break;
-               } else if (info->rtp_port[i][dir] == 0) {
-                       /* Not expected */
-                       break;
-               }
-       }
-
-       /* Run out of expectations */
-       if (i >= H323_RTP_CHANNEL_MAX) {
-               if (net_ratelimit())
-                       printk("ip_nat_h323: out of expectations\n");
-               return 0;
-       }
-
-       /* Try to get a pair of ports. */
-       for (nated_port = ntohs(rtp_exp->tuple.dst.u.udp.port);
-            nated_port != 0; nated_port += 2) {
-               rtp_exp->tuple.dst.u.udp.port = htons(nated_port);
-               if (ip_conntrack_expect_related(rtp_exp) == 0) {
-                       rtcp_exp->tuple.dst.u.udp.port =
-                           htons(nated_port + 1);
-                       if (ip_conntrack_expect_related(rtcp_exp) == 0)
-                               break;
-                       ip_conntrack_unexpect_related(rtp_exp);
-               }
-       }
-
-       if (nated_port == 0) {  /* No port available */
-               if (net_ratelimit())
-                       printk("ip_nat_h323: out of RTP ports\n");
-               return 0;
-       }
-
-       /* Modify signal */
-       if (set_h245_addr(pskb, data, dataoff, addr,
-                         ct->tuplehash[!dir].tuple.dst.ip,
-                         (port & 1) ? nated_port + 1 : nated_port) == 0) {
-               /* Save ports */
-               info->rtp_port[i][dir] = rtp_port;
-               info->rtp_port[i][!dir] = nated_port;
-       } else {
-               ip_conntrack_unexpect_related(rtp_exp);
-               ip_conntrack_unexpect_related(rtcp_exp);
-               return -1;
-       }
-
-       /* Success */
-       DEBUGP("ip_nat_h323: expect RTP %u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n",
-              NIPQUAD(rtp_exp->tuple.src.ip),
-              ntohs(rtp_exp->tuple.src.u.udp.port),
-              NIPQUAD(rtp_exp->tuple.dst.ip),
-              ntohs(rtp_exp->tuple.dst.u.udp.port));
-       DEBUGP("ip_nat_h323: expect RTCP %u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n",
-              NIPQUAD(rtcp_exp->tuple.src.ip),
-              ntohs(rtcp_exp->tuple.src.u.udp.port),
-              NIPQUAD(rtcp_exp->tuple.dst.ip),
-              ntohs(rtcp_exp->tuple.dst.u.udp.port));
-
-       return 0;
-}
-
-/****************************************************************************/
-static int nat_t120(struct sk_buff **pskb, struct ip_conntrack *ct,
-                   enum ip_conntrack_info ctinfo,
-                   unsigned char **data, int dataoff,
-                   H245_TransportAddress * addr, u_int16_t port,
-                   struct ip_conntrack_expect *exp)
-{
-       int dir = CTINFO2DIR(ctinfo);
-       u_int16_t nated_port = port;
-
-       /* Set expectations for NAT */
-       exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port;
-       exp->expectfn = ip_nat_follow_master;
-       exp->dir = !dir;
-
-       /* Try to get same port: if not, try to change it. */
-       for (; nated_port != 0; nated_port++) {
-               exp->tuple.dst.u.tcp.port = htons(nated_port);
-               if (ip_conntrack_expect_related(exp) == 0)
-                       break;
-       }
-
-       if (nated_port == 0) {  /* No port available */
-               if (net_ratelimit())
-                       printk("ip_nat_h323: out of TCP ports\n");
-               return 0;
-       }
-
-       /* Modify signal */
-       if (set_h245_addr(pskb, data, dataoff, addr,
-                         ct->tuplehash[!dir].tuple.dst.ip, nated_port) < 0) {
-               ip_conntrack_unexpect_related(exp);
-               return -1;
-       }
-
-       DEBUGP("ip_nat_h323: expect T.120 %u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n",
-              NIPQUAD(exp->tuple.src.ip), ntohs(exp->tuple.src.u.tcp.port),
-              NIPQUAD(exp->tuple.dst.ip), ntohs(exp->tuple.dst.u.tcp.port));
-
-       return 0;
-}
-
-/****************************************************************************
- * This conntrack expect function replaces ip_conntrack_h245_expect()
- * which was set by ip_conntrack_helper_h323.c. It calls both
- * ip_nat_follow_master() and ip_conntrack_h245_expect()
- ****************************************************************************/
-static void ip_nat_h245_expect(struct ip_conntrack *new,
-                              struct ip_conntrack_expect *this)
-{
-       ip_nat_follow_master(new, this);
-       ip_conntrack_h245_expect(new, this);
-}
-
-/****************************************************************************/
-static int nat_h245(struct sk_buff **pskb, struct ip_conntrack *ct,
-                   enum ip_conntrack_info ctinfo,
-                   unsigned char **data, int dataoff,
-                   TransportAddress * addr, u_int16_t port,
-                   struct ip_conntrack_expect *exp)
-{
-       struct ip_ct_h323_master *info = &ct->help.ct_h323_info;
-       int dir = CTINFO2DIR(ctinfo);
-       u_int16_t nated_port = port;
-
-       /* Set expectations for NAT */
-       exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port;
-       exp->expectfn = ip_nat_h245_expect;
-       exp->dir = !dir;
-
-       /* Check existing expects */
-       if (info->sig_port[dir] == port)
-               nated_port = info->sig_port[!dir];
-
-       /* Try to get same port: if not, try to change it. */
-       for (; nated_port != 0; nated_port++) {
-               exp->tuple.dst.u.tcp.port = htons(nated_port);
-               if (ip_conntrack_expect_related(exp) == 0)
-                       break;
-       }
-
-       if (nated_port == 0) {  /* No port available */
-               if (net_ratelimit())
-                       printk("ip_nat_q931: out of TCP ports\n");
-               return 0;
-       }
-
-       /* Modify signal */
-       if (set_h225_addr(pskb, data, dataoff, addr,
-                         ct->tuplehash[!dir].tuple.dst.ip,
-                         nated_port) == 0) {
-               /* Save ports */
-               info->sig_port[dir] = port;
-               info->sig_port[!dir] = nated_port;
-       } else {
-               ip_conntrack_unexpect_related(exp);
-               return -1;
-       }
-
-       DEBUGP("ip_nat_q931: expect H.245 %u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n",
-              NIPQUAD(exp->tuple.src.ip), ntohs(exp->tuple.src.u.tcp.port),
-              NIPQUAD(exp->tuple.dst.ip), ntohs(exp->tuple.dst.u.tcp.port));
-
-       return 0;
-}
-
-/****************************************************************************
- * This conntrack expect function replaces ip_conntrack_q931_expect()
- * which was set by ip_conntrack_helper_h323.c.
- ****************************************************************************/
-static void ip_nat_q931_expect(struct ip_conntrack *new,
-                              struct ip_conntrack_expect *this)
-{
-       struct ip_nat_range range;
-
-       if (this->tuple.src.ip != 0) {  /* Only accept calls from GK */
-               ip_nat_follow_master(new, this);
-               goto out;
-       }
-
-       /* This must be a fresh one. */
-       BUG_ON(new->status & IPS_NAT_DONE_MASK);
-
-       /* Change src to where master sends to */
-       range.flags = IP_NAT_RANGE_MAP_IPS;
-       range.min_ip = range.max_ip = new->tuplehash[!this->dir].tuple.src.ip;
-
-       /* hook doesn't matter, but it has to do source manip */
-       ip_nat_setup_info(new, &range, NF_IP_POST_ROUTING);
-
-       /* For DST manip, map port here to where it's expected. */
-       range.flags = (IP_NAT_RANGE_MAP_IPS | IP_NAT_RANGE_PROTO_SPECIFIED);
-       range.min = range.max = this->saved_proto;
-       range.min_ip = range.max_ip =
-           new->master->tuplehash[!this->dir].tuple.src.ip;
-
-       /* hook doesn't matter, but it has to do destination manip */
-       ip_nat_setup_info(new, &range, NF_IP_PRE_ROUTING);
-
-      out:
-       ip_conntrack_q931_expect(new, this);
-}
-
-/****************************************************************************/
-static int nat_q931(struct sk_buff **pskb, struct ip_conntrack *ct,
-                   enum ip_conntrack_info ctinfo,
-                   unsigned char **data, TransportAddress * addr, int idx,
-                   u_int16_t port, struct ip_conntrack_expect *exp)
-{
-       struct ip_ct_h323_master *info = &ct->help.ct_h323_info;
-       int dir = CTINFO2DIR(ctinfo);
-       u_int16_t nated_port = port;
-       __be32 ip;
-
-       /* Set expectations for NAT */
-       exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port;
-       exp->expectfn = ip_nat_q931_expect;
-       exp->dir = !dir;
-
-       /* Check existing expects */
-       if (info->sig_port[dir] == port)
-               nated_port = info->sig_port[!dir];
-
-       /* Try to get same port: if not, try to change it. */
-       for (; nated_port != 0; nated_port++) {
-               exp->tuple.dst.u.tcp.port = htons(nated_port);
-               if (ip_conntrack_expect_related(exp) == 0)
-                       break;
-       }
-
-       if (nated_port == 0) {  /* No port available */
-               if (net_ratelimit())
-                       printk("ip_nat_ras: out of TCP ports\n");
-               return 0;
-       }
-
-       /* Modify signal */
-       if (set_h225_addr(pskb, data, 0, &addr[idx],
-                         ct->tuplehash[!dir].tuple.dst.ip,
-                         nated_port) == 0) {
-               /* Save ports */
-               info->sig_port[dir] = port;
-               info->sig_port[!dir] = nated_port;
-
-               /* Fix for Gnomemeeting */
-               if (idx > 0 &&
-                   get_h225_addr(*data, &addr[0], &ip, &port) &&
-                   (ntohl(ip) & 0xff000000) == 0x7f000000) {
-                       set_h225_addr_hook(pskb, data, 0, &addr[0],
-                                          ct->tuplehash[!dir].tuple.dst.ip,
-                                          info->sig_port[!dir]);
-               }
-       } else {
-               ip_conntrack_unexpect_related(exp);
-               return -1;
-       }
-
-       /* Success */
-       DEBUGP("ip_nat_ras: expect Q.931 %u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n",
-              NIPQUAD(exp->tuple.src.ip), ntohs(exp->tuple.src.u.tcp.port),
-              NIPQUAD(exp->tuple.dst.ip), ntohs(exp->tuple.dst.u.tcp.port));
-
-       return 0;
-}
-
-/****************************************************************************/
-static void ip_nat_callforwarding_expect(struct ip_conntrack *new,
-                                        struct ip_conntrack_expect *this)
-{
-       struct ip_nat_range range;
-
-       /* This must be a fresh one. */
-       BUG_ON(new->status & IPS_NAT_DONE_MASK);
-
-       /* Change src to where master sends to */
-       range.flags = IP_NAT_RANGE_MAP_IPS;
-       range.min_ip = range.max_ip = new->tuplehash[!this->dir].tuple.src.ip;
-
-       /* hook doesn't matter, but it has to do source manip */
-       ip_nat_setup_info(new, &range, NF_IP_POST_ROUTING);
-
-       /* For DST manip, map port here to where it's expected. */
-       range.flags = (IP_NAT_RANGE_MAP_IPS | IP_NAT_RANGE_PROTO_SPECIFIED);
-       range.min = range.max = this->saved_proto;
-       range.min_ip = range.max_ip = this->saved_ip;
-
-       /* hook doesn't matter, but it has to do destination manip */
-       ip_nat_setup_info(new, &range, NF_IP_PRE_ROUTING);
-
-       ip_conntrack_q931_expect(new, this);
-}
-
-/****************************************************************************/
-static int nat_callforwarding(struct sk_buff **pskb, struct ip_conntrack *ct,
-                             enum ip_conntrack_info ctinfo,
-                             unsigned char **data, int dataoff,
-                             TransportAddress * addr, u_int16_t port,
-                             struct ip_conntrack_expect *exp)
-{
-       int dir = CTINFO2DIR(ctinfo);
-       u_int16_t nated_port;
-
-       /* Set expectations for NAT */
-       exp->saved_ip = exp->tuple.dst.ip;
-       exp->tuple.dst.ip = ct->tuplehash[!dir].tuple.dst.ip;
-       exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port;
-       exp->expectfn = ip_nat_callforwarding_expect;
-       exp->dir = !dir;
-
-       /* Try to get same port: if not, try to change it. */
-       for (nated_port = port; nated_port != 0; nated_port++) {
-               exp->tuple.dst.u.tcp.port = htons(nated_port);
-               if (ip_conntrack_expect_related(exp) == 0)
-                       break;
-       }
-
-       if (nated_port == 0) {  /* No port available */
-               if (net_ratelimit())
-                       printk("ip_nat_q931: out of TCP ports\n");
-               return 0;
-       }
-
-       /* Modify signal */
-       if (!set_h225_addr(pskb, data, dataoff, addr,
-                          ct->tuplehash[!dir].tuple.dst.ip,
-                          nated_port) == 0) {
-               ip_conntrack_unexpect_related(exp);
-               return -1;
-       }
-
-       /* Success */
-       DEBUGP("ip_nat_q931: expect Call Forwarding "
-              "%u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n",
-              NIPQUAD(exp->tuple.src.ip), ntohs(exp->tuple.src.u.tcp.port),
-              NIPQUAD(exp->tuple.dst.ip), ntohs(exp->tuple.dst.u.tcp.port));
-
-       return 0;
-}
-
-/****************************************************************************/
-static int __init init(void)
-{
-       BUG_ON(rcu_dereference(set_h245_addr_hook) != NULL);
-       BUG_ON(rcu_dereference(set_h225_addr_hook) != NULL);
-       BUG_ON(rcu_dereference(set_sig_addr_hook) != NULL);
-       BUG_ON(rcu_dereference(set_ras_addr_hook) != NULL);
-       BUG_ON(rcu_dereference(nat_rtp_rtcp_hook) != NULL);
-       BUG_ON(rcu_dereference(nat_t120_hook) != NULL);
-       BUG_ON(rcu_dereference(nat_h245_hook) != NULL);
-       BUG_ON(rcu_dereference(nat_callforwarding_hook) != NULL);
-       BUG_ON(rcu_dereference(nat_q931_hook) != NULL);
-
-       rcu_assign_pointer(set_h245_addr_hook, set_h245_addr);
-       rcu_assign_pointer(set_h225_addr_hook, set_h225_addr);
-       rcu_assign_pointer(set_sig_addr_hook, set_sig_addr);
-       rcu_assign_pointer(set_ras_addr_hook, set_ras_addr);
-       rcu_assign_pointer(nat_rtp_rtcp_hook, nat_rtp_rtcp);
-       rcu_assign_pointer(nat_t120_hook, nat_t120);
-       rcu_assign_pointer(nat_h245_hook, nat_h245);
-       rcu_assign_pointer(nat_callforwarding_hook, nat_callforwarding);
-       rcu_assign_pointer(nat_q931_hook, nat_q931);
-
-       DEBUGP("ip_nat_h323: init success\n");
-       return 0;
-}
-
-/****************************************************************************/
-static void __exit fini(void)
-{
-       rcu_assign_pointer(set_h245_addr_hook, NULL);
-       rcu_assign_pointer(set_h225_addr_hook, NULL);
-       rcu_assign_pointer(set_sig_addr_hook, NULL);
-       rcu_assign_pointer(set_ras_addr_hook, NULL);
-       rcu_assign_pointer(nat_rtp_rtcp_hook, NULL);
-       rcu_assign_pointer(nat_t120_hook, NULL);
-       rcu_assign_pointer(nat_h245_hook, NULL);
-       rcu_assign_pointer(nat_callforwarding_hook, NULL);
-       rcu_assign_pointer(nat_q931_hook, NULL);
-       synchronize_rcu();
-}
-
-/****************************************************************************/
-module_init(init);
-module_exit(fini);
-
-MODULE_AUTHOR("Jing Min Zhao <zhaojingmin@users.sourceforge.net>");
-MODULE_DESCRIPTION("H.323 NAT helper");
-MODULE_LICENSE("GPL");
diff --git a/net/ipv4/netfilter/ip_nat_helper_pptp.c b/net/ipv4/netfilter/ip_nat_helper_pptp.c
deleted file mode 100644 (file)
index 24ce4a5..0000000
+++ /dev/null
@@ -1,350 +0,0 @@
-/*
- * ip_nat_pptp.c       - Version 3.0
- *
- * NAT support for PPTP (Point to Point Tunneling Protocol).
- * PPTP is a a protocol for creating virtual private networks.
- * It is a specification defined by Microsoft and some vendors
- * working with Microsoft.  PPTP is built on top of a modified
- * version of the Internet Generic Routing Encapsulation Protocol.
- * GRE is defined in RFC 1701 and RFC 1702.  Documentation of
- * PPTP can be found in RFC 2637
- *
- * (C) 2000-2005 by Harald Welte <laforge@gnumonks.org>
- *
- * Development of this code funded by Astaro AG (http://www.astaro.com/)
- *
- * TODO: - NAT to a unique tuple, not to TCP source port
- *        (needs netfilter tuple reservation)
- *
- * Changes:
- *     2002-02-10 - Version 1.3
- *       - Use ip_nat_mangle_tcp_packet() because of cloned skb's
- *        in local connections (Philip Craig <philipc@snapgear.com>)
- *       - add checks for magicCookie and pptp version
- *       - make argument list of pptp_{out,in}bound_packet() shorter
- *       - move to C99 style initializers
- *       - print version number at module loadtime
- *     2003-09-22 - Version 1.5
- *       - use SNATed tcp sourceport as callid, since we get called before
- *        TCP header is mangled (Philip Craig <philipc@snapgear.com>)
- *     2004-10-22 - Version 2.0
- *       - kernel 2.6.x version
- *     2005-06-10 - Version 3.0
- *       - kernel >= 2.6.11 version,
- *        funded by Oxcoda NetBox Blue (http://www.netboxblue.com/)
- *
- */
-
-#include <linux/module.h>
-#include <linux/ip.h>
-#include <linux/tcp.h>
-#include <net/tcp.h>
-
-#include <linux/netfilter_ipv4/ip_nat.h>
-#include <linux/netfilter_ipv4/ip_nat_rule.h>
-#include <linux/netfilter_ipv4/ip_nat_helper.h>
-#include <linux/netfilter_ipv4/ip_nat_pptp.h>
-#include <linux/netfilter_ipv4/ip_conntrack_core.h>
-#include <linux/netfilter_ipv4/ip_conntrack_helper.h>
-#include <linux/netfilter_ipv4/ip_conntrack_proto_gre.h>
-#include <linux/netfilter_ipv4/ip_conntrack_pptp.h>
-
-#define IP_NAT_PPTP_VERSION "3.0"
-
-#define REQ_CID(req, off)              (*(__be16 *)((char *)(req) + (off)))
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Harald Welte <laforge@gnumonks.org>");
-MODULE_DESCRIPTION("Netfilter NAT helper module for PPTP");
-
-
-#if 0
-extern const char *pptp_msg_name[];
-#define DEBUGP(format, args...) printk(KERN_DEBUG "%s:%s: " format, __FILE__, \
-                                      __FUNCTION__, ## args)
-#else
-#define DEBUGP(format, args...)
-#endif
-
-static void pptp_nat_expected(struct ip_conntrack *ct,
-                             struct ip_conntrack_expect *exp)
-{
-       struct ip_conntrack *master = ct->master;
-       struct ip_conntrack_expect *other_exp;
-       struct ip_conntrack_tuple t;
-       struct ip_ct_pptp_master *ct_pptp_info;
-       struct ip_nat_pptp *nat_pptp_info;
-       struct ip_nat_range range;
-
-       ct_pptp_info = &master->help.ct_pptp_info;
-       nat_pptp_info = &master->nat.help.nat_pptp_info;
-
-       /* And here goes the grand finale of corrosion... */
-
-       if (exp->dir == IP_CT_DIR_ORIGINAL) {
-               DEBUGP("we are PNS->PAC\n");
-               /* therefore, build tuple for PAC->PNS */
-               t.src.ip = master->tuplehash[IP_CT_DIR_REPLY].tuple.src.ip;
-               t.src.u.gre.key = master->help.ct_pptp_info.pac_call_id;
-               t.dst.ip = master->tuplehash[IP_CT_DIR_REPLY].tuple.dst.ip;
-               t.dst.u.gre.key = master->help.ct_pptp_info.pns_call_id;
-               t.dst.protonum = IPPROTO_GRE;
-       } else {
-               DEBUGP("we are PAC->PNS\n");
-               /* build tuple for PNS->PAC */
-               t.src.ip = master->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.ip;
-               t.src.u.gre.key = master->nat.help.nat_pptp_info.pns_call_id;
-               t.dst.ip = master->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.ip;
-               t.dst.u.gre.key = master->nat.help.nat_pptp_info.pac_call_id;
-               t.dst.protonum = IPPROTO_GRE;
-       }
-
-       DEBUGP("trying to unexpect other dir: ");
-       DUMP_TUPLE(&t);
-       other_exp = ip_conntrack_expect_find_get(&t);
-       if (other_exp) {
-               ip_conntrack_unexpect_related(other_exp);
-               ip_conntrack_expect_put(other_exp);
-               DEBUGP("success\n");
-       } else {
-               DEBUGP("not found!\n");
-       }
-
-       /* This must be a fresh one. */
-       BUG_ON(ct->status & IPS_NAT_DONE_MASK);
-
-       /* Change src to where master sends to */
-       range.flags = IP_NAT_RANGE_MAP_IPS;
-       range.min_ip = range.max_ip
-               = ct->master->tuplehash[!exp->dir].tuple.dst.ip;
-       if (exp->dir == IP_CT_DIR_ORIGINAL) {
-               range.flags |= IP_NAT_RANGE_PROTO_SPECIFIED;
-               range.min = range.max = exp->saved_proto;
-       }
-       /* hook doesn't matter, but it has to do source manip */
-       ip_nat_setup_info(ct, &range, NF_IP_POST_ROUTING);
-
-       /* For DST manip, map port here to where it's expected. */
-       range.flags = IP_NAT_RANGE_MAP_IPS;
-       range.min_ip = range.max_ip
-               = ct->master->tuplehash[!exp->dir].tuple.src.ip;
-       if (exp->dir == IP_CT_DIR_REPLY) {
-               range.flags |= IP_NAT_RANGE_PROTO_SPECIFIED;
-               range.min = range.max = exp->saved_proto;
-       }
-       /* hook doesn't matter, but it has to do destination manip */
-       ip_nat_setup_info(ct, &range, NF_IP_PRE_ROUTING);
-}
-
-/* outbound packets == from PNS to PAC */
-static int
-pptp_outbound_pkt(struct sk_buff **pskb,
-                 struct ip_conntrack *ct,
-                 enum ip_conntrack_info ctinfo,
-                 struct PptpControlHeader *ctlh,
-                 union pptp_ctrl_union *pptpReq)
-
-{
-       struct ip_ct_pptp_master *ct_pptp_info = &ct->help.ct_pptp_info;
-       struct ip_nat_pptp *nat_pptp_info = &ct->nat.help.nat_pptp_info;
-       u_int16_t msg;
-       __be16 new_callid;
-       unsigned int cid_off;
-
-       new_callid = ct_pptp_info->pns_call_id;
-
-       switch (msg = ntohs(ctlh->messageType)) {
-       case PPTP_OUT_CALL_REQUEST:
-               cid_off = offsetof(union pptp_ctrl_union, ocreq.callID);
-               /* FIXME: ideally we would want to reserve a call ID
-                * here.  current netfilter NAT core is not able to do
-                * this :( For now we use TCP source port. This breaks
-                * multiple calls within one control session */
-
-               /* save original call ID in nat_info */
-               nat_pptp_info->pns_call_id = ct_pptp_info->pns_call_id;
-
-               /* don't use tcph->source since we are at a DSTmanip
-                * hook (e.g. PREROUTING) and pkt is not mangled yet */
-               new_callid = ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u.tcp.port;
-
-               /* save new call ID in ct info */
-               ct_pptp_info->pns_call_id = new_callid;
-               break;
-       case PPTP_IN_CALL_REPLY:
-               cid_off = offsetof(union pptp_ctrl_union, icack.callID);
-               break;
-       case PPTP_CALL_CLEAR_REQUEST:
-               cid_off = offsetof(union pptp_ctrl_union, clrreq.callID);
-               break;
-       default:
-               DEBUGP("unknown outbound packet 0x%04x:%s\n", msg,
-                     (msg <= PPTP_MSG_MAX)?
-                     pptp_msg_name[msg]:pptp_msg_name[0]);
-               /* fall through */
-
-       case PPTP_SET_LINK_INFO:
-               /* only need to NAT in case PAC is behind NAT box */
-       case PPTP_START_SESSION_REQUEST:
-       case PPTP_START_SESSION_REPLY:
-       case PPTP_STOP_SESSION_REQUEST:
-       case PPTP_STOP_SESSION_REPLY:
-       case PPTP_ECHO_REQUEST:
-       case PPTP_ECHO_REPLY:
-               /* no need to alter packet */
-               return NF_ACCEPT;
-       }
-
-       /* only OUT_CALL_REQUEST, IN_CALL_REPLY, CALL_CLEAR_REQUEST pass
-        * down to here */
-       DEBUGP("altering call id from 0x%04x to 0x%04x\n",
-               ntohs(REQ_CID(pptpReq, cid_off)), ntohs(new_callid));
-
-       /* mangle packet */
-       if (ip_nat_mangle_tcp_packet(pskb, ct, ctinfo,
-                                    cid_off + sizeof(struct pptp_pkt_hdr) +
-                                    sizeof(struct PptpControlHeader),
-                                    sizeof(new_callid), (char *)&new_callid,
-                                    sizeof(new_callid)) == 0)
-               return NF_DROP;
-
-       return NF_ACCEPT;
-}
-
-static void
-pptp_exp_gre(struct ip_conntrack_expect *expect_orig,
-            struct ip_conntrack_expect *expect_reply)
-{
-       struct ip_conntrack *ct = expect_orig->master;
-       struct ip_ct_pptp_master *ct_pptp_info = &ct->help.ct_pptp_info;
-       struct ip_nat_pptp *nat_pptp_info = &ct->nat.help.nat_pptp_info;
-
-       /* save original PAC call ID in nat_info */
-       nat_pptp_info->pac_call_id = ct_pptp_info->pac_call_id;
-
-       /* alter expectation for PNS->PAC direction */
-       expect_orig->saved_proto.gre.key = ct_pptp_info->pns_call_id;
-       expect_orig->tuple.src.u.gre.key = nat_pptp_info->pns_call_id;
-       expect_orig->tuple.dst.u.gre.key = ct_pptp_info->pac_call_id;
-       expect_orig->dir = IP_CT_DIR_ORIGINAL;
-
-       /* alter expectation for PAC->PNS direction */
-       expect_reply->saved_proto.gre.key = nat_pptp_info->pns_call_id;
-       expect_reply->tuple.src.u.gre.key = nat_pptp_info->pac_call_id;
-       expect_reply->tuple.dst.u.gre.key = ct_pptp_info->pns_call_id;
-       expect_reply->dir = IP_CT_DIR_REPLY;
-}
-
-/* inbound packets == from PAC to PNS */
-static int
-pptp_inbound_pkt(struct sk_buff **pskb,
-                struct ip_conntrack *ct,
-                enum ip_conntrack_info ctinfo,
-                struct PptpControlHeader *ctlh,
-                union pptp_ctrl_union *pptpReq)
-{
-       struct ip_nat_pptp *nat_pptp_info = &ct->nat.help.nat_pptp_info;
-       u_int16_t msg;
-       __be16 new_pcid;
-       unsigned int pcid_off;
-
-       new_pcid = nat_pptp_info->pns_call_id;
-
-       switch (msg = ntohs(ctlh->messageType)) {
-       case PPTP_OUT_CALL_REPLY:
-               pcid_off = offsetof(union pptp_ctrl_union, ocack.peersCallID);
-               break;
-       case PPTP_IN_CALL_CONNECT:
-               pcid_off = offsetof(union pptp_ctrl_union, iccon.peersCallID);
-               break;
-       case PPTP_IN_CALL_REQUEST:
-               /* only need to nat in case PAC is behind NAT box */
-               return NF_ACCEPT;
-       case PPTP_WAN_ERROR_NOTIFY:
-               pcid_off = offsetof(union pptp_ctrl_union, wanerr.peersCallID);
-               break;
-       case PPTP_CALL_DISCONNECT_NOTIFY:
-               pcid_off = offsetof(union pptp_ctrl_union, disc.callID);
-               break;
-       case PPTP_SET_LINK_INFO:
-               pcid_off = offsetof(union pptp_ctrl_union, setlink.peersCallID);
-               break;
-
-       default:
-               DEBUGP("unknown inbound packet %s\n", (msg <= PPTP_MSG_MAX)?
-                       pptp_msg_name[msg]:pptp_msg_name[0]);
-               /* fall through */
-
-       case PPTP_START_SESSION_REQUEST:
-       case PPTP_START_SESSION_REPLY:
-       case PPTP_STOP_SESSION_REQUEST:
-       case PPTP_STOP_SESSION_REPLY:
-       case PPTP_ECHO_REQUEST:
-       case PPTP_ECHO_REPLY:
-               /* no need to alter packet */
-               return NF_ACCEPT;
-       }
-
-       /* only OUT_CALL_REPLY, IN_CALL_CONNECT, IN_CALL_REQUEST,
-        * WAN_ERROR_NOTIFY, CALL_DISCONNECT_NOTIFY pass down here */
-
-       /* mangle packet */
-       DEBUGP("altering peer call id from 0x%04x to 0x%04x\n",
-               ntohs(REQ_CID(pptpReq, pcid_off)), ntohs(new_pcid));
-
-       if (ip_nat_mangle_tcp_packet(pskb, ct, ctinfo,
-                                    pcid_off + sizeof(struct pptp_pkt_hdr) +
-                                    sizeof(struct PptpControlHeader),
-                                    sizeof(new_pcid), (char *)&new_pcid,
-                                    sizeof(new_pcid)) == 0)
-               return NF_DROP;
-       return NF_ACCEPT;
-}
-
-
-extern int __init ip_nat_proto_gre_init(void);
-extern void __exit ip_nat_proto_gre_fini(void);
-
-static int __init ip_nat_helper_pptp_init(void)
-{
-       int ret;
-
-       DEBUGP("%s: registering NAT helper\n", __FILE__);
-
-       ret = ip_nat_proto_gre_init();
-       if (ret < 0)
-               return ret;
-
-       BUG_ON(rcu_dereference(ip_nat_pptp_hook_outbound));
-       rcu_assign_pointer(ip_nat_pptp_hook_outbound, pptp_outbound_pkt);
-
-       BUG_ON(rcu_dereference(ip_nat_pptp_hook_inbound));
-       rcu_assign_pointer(ip_nat_pptp_hook_inbound, pptp_inbound_pkt);
-
-       BUG_ON(rcu_dereference(ip_nat_pptp_hook_exp_gre));
-       rcu_assign_pointer(ip_nat_pptp_hook_exp_gre, pptp_exp_gre);
-
-       BUG_ON(rcu_dereference(ip_nat_pptp_hook_expectfn));
-       rcu_assign_pointer(ip_nat_pptp_hook_expectfn, pptp_nat_expected);
-
-       printk("ip_nat_pptp version %s loaded\n", IP_NAT_PPTP_VERSION);
-       return 0;
-}
-
-static void __exit ip_nat_helper_pptp_fini(void)
-{
-       DEBUGP("cleanup_module\n" );
-
-       rcu_assign_pointer(ip_nat_pptp_hook_expectfn, NULL);
-       rcu_assign_pointer(ip_nat_pptp_hook_exp_gre, NULL);
-       rcu_assign_pointer(ip_nat_pptp_hook_inbound, NULL);
-       rcu_assign_pointer(ip_nat_pptp_hook_outbound, NULL);
-       synchronize_rcu();
-
-       ip_nat_proto_gre_fini();
-
-       printk("ip_nat_pptp version %s unloaded\n", IP_NAT_PPTP_VERSION);
-}
-
-module_init(ip_nat_helper_pptp_init);
-module_exit(ip_nat_helper_pptp_fini);
diff --git a/net/ipv4/netfilter/ip_nat_irc.c b/net/ipv4/netfilter/ip_nat_irc.c
deleted file mode 100644 (file)
index cfaeea3..0000000
+++ /dev/null
@@ -1,122 +0,0 @@
-/* IRC extension for TCP NAT alteration.
- * (C) 2000-2001 by Harald Welte <laforge@gnumonks.org>
- * (C) 2004 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
- * based on a copy of RR's ip_nat_ftp.c
- *
- * ip_nat_irc.c,v 1.16 2001/12/06 07:42:10 laforge Exp
- *
- *      This program is free software; you can redistribute it and/or
- *      modify it under the terms of the GNU General Public License
- *      as published by the Free Software Foundation; either version
- *      2 of the License, or (at your option) any later version.
- */
-
-#include <linux/module.h>
-#include <linux/netfilter_ipv4.h>
-#include <linux/ip.h>
-#include <linux/tcp.h>
-#include <linux/kernel.h>
-#include <net/tcp.h>
-#include <linux/netfilter_ipv4/ip_nat.h>
-#include <linux/netfilter_ipv4/ip_nat_helper.h>
-#include <linux/netfilter_ipv4/ip_nat_rule.h>
-#include <linux/netfilter_ipv4/ip_conntrack_irc.h>
-#include <linux/netfilter_ipv4/ip_conntrack_helper.h>
-#include <linux/moduleparam.h>
-
-#if 0
-#define DEBUGP printk
-#else
-#define DEBUGP(format, args...)
-#endif
-
-MODULE_AUTHOR("Harald Welte <laforge@gnumonks.org>");
-MODULE_DESCRIPTION("IRC (DCC) NAT helper");
-MODULE_LICENSE("GPL");
-
-static unsigned int help(struct sk_buff **pskb,
-                        enum ip_conntrack_info ctinfo,
-                        unsigned int matchoff,
-                        unsigned int matchlen,
-                        struct ip_conntrack_expect *exp)
-{
-       u_int16_t port;
-       unsigned int ret;
-
-       /* "4294967296 65635 " */
-       char buffer[18];
-
-       DEBUGP("IRC_NAT: info (seq %u + %u) in %u\n",
-              expect->seq, exp_irc_info->len,
-              ntohl(tcph->seq));
-
-       /* Reply comes from server. */
-       exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port;
-       exp->dir = IP_CT_DIR_REPLY;
-
-       /* When you see the packet, we need to NAT it the same as the
-        * this one. */
-       exp->expectfn = ip_nat_follow_master;
-
-       /* Try to get same port: if not, try to change it. */
-       for (port = ntohs(exp->saved_proto.tcp.port); port != 0; port++) {
-               exp->tuple.dst.u.tcp.port = htons(port);
-               if (ip_conntrack_expect_related(exp) == 0)
-                       break;
-       }
-
-       if (port == 0)
-               return NF_DROP;
-
-       /*      strlen("\1DCC CHAT chat AAAAAAAA P\1\n")=27
-        *      strlen("\1DCC SCHAT chat AAAAAAAA P\1\n")=28
-        *      strlen("\1DCC SEND F AAAAAAAA P S\1\n")=26
-        *      strlen("\1DCC MOVE F AAAAAAAA P S\1\n")=26
-        *      strlen("\1DCC TSEND F AAAAAAAA P S\1\n")=27
-        *              AAAAAAAAA: bound addr (1.0.0.0==16777216, min 8 digits,
-        *                      255.255.255.255==4294967296, 10 digits)
-        *              P:         bound port (min 1 d, max 5d (65635))
-        *              F:         filename   (min 1 d )
-        *              S:         size       (min 1 d )
-        *              0x01, \n:  terminators
-        */
-
-       /* AAA = "us", ie. where server normally talks to. */
-       sprintf(buffer, "%u %u",
-               ntohl(exp->master->tuplehash[IP_CT_DIR_REPLY].tuple.dst.ip),
-               port);
-       DEBUGP("ip_nat_irc: Inserting '%s' == %u.%u.%u.%u, port %u\n",
-              buffer, NIPQUAD(exp->tuple.src.ip), port);
-
-       ret = ip_nat_mangle_tcp_packet(pskb, exp->master, ctinfo,
-                                      matchoff, matchlen, buffer,
-                                      strlen(buffer));
-       if (ret != NF_ACCEPT)
-               ip_conntrack_unexpect_related(exp);
-       return ret;
-}
-
-static void __exit ip_nat_irc_fini(void)
-{
-       rcu_assign_pointer(ip_nat_irc_hook, NULL);
-       synchronize_rcu();
-}
-
-static int __init ip_nat_irc_init(void)
-{
-       BUG_ON(rcu_dereference(ip_nat_irc_hook));
-       rcu_assign_pointer(ip_nat_irc_hook, help);
-       return 0;
-}
-
-/* Prior to 2.6.11, we had a ports param.  No longer, but don't break users. */
-static int warn_set(const char *val, struct kernel_param *kp)
-{
-       printk(KERN_INFO KBUILD_MODNAME
-              ": kernel >= 2.6.10 only uses 'ports' for conntrack modules\n");
-       return 0;
-}
-module_param_call(ports, warn_set, NULL, NULL, 0);
-
-module_init(ip_nat_irc_init);
-module_exit(ip_nat_irc_fini);
diff --git a/net/ipv4/netfilter/ip_nat_proto_gre.c b/net/ipv4/netfilter/ip_nat_proto_gre.c
deleted file mode 100644 (file)
index 9581020..0000000
+++ /dev/null
@@ -1,174 +0,0 @@
-/*
- * ip_nat_proto_gre.c - Version 2.0
- *
- * NAT protocol helper module for GRE.
- *
- * GRE is a generic encapsulation protocol, which is generally not very
- * suited for NAT, as it has no protocol-specific part as port numbers.
- *
- * It has an optional key field, which may help us distinguishing two
- * connections between the same two hosts.
- *
- * GRE is defined in RFC 1701 and RFC 1702, as well as RFC 2784
- *
- * PPTP is built on top of a modified version of GRE, and has a mandatory
- * field called "CallID", which serves us for the same purpose as the key
- * field in plain GRE.
- *
- * Documentation about PPTP can be found in RFC 2637
- *
- * (C) 2000-2005 by Harald Welte <laforge@gnumonks.org>
- *
- * Development of this code funded by Astaro AG (http://www.astaro.com/)
- *
- */
-
-#include <linux/module.h>
-#include <linux/ip.h>
-#include <linux/netfilter_ipv4/ip_nat.h>
-#include <linux/netfilter_ipv4/ip_nat_rule.h>
-#include <linux/netfilter_ipv4/ip_nat_protocol.h>
-#include <linux/netfilter_ipv4/ip_conntrack_proto_gre.h>
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Harald Welte <laforge@gnumonks.org>");
-MODULE_DESCRIPTION("Netfilter NAT protocol helper module for GRE");
-
-#if 0
-#define DEBUGP(format, args...) printk(KERN_DEBUG "%s:%s: " format, __FILE__, \
-                                      __FUNCTION__, ## args)
-#else
-#define DEBUGP(x, args...)
-#endif
-
-/* is key in given range between min and max */
-static int
-gre_in_range(const struct ip_conntrack_tuple *tuple,
-            enum ip_nat_manip_type maniptype,
-            const union ip_conntrack_manip_proto *min,
-            const union ip_conntrack_manip_proto *max)
-{
-       __be16 key;
-
-       if (maniptype == IP_NAT_MANIP_SRC)
-               key = tuple->src.u.gre.key;
-       else
-               key = tuple->dst.u.gre.key;
-
-       return ntohs(key) >= ntohs(min->gre.key)
-               && ntohs(key) <= ntohs(max->gre.key);
-}
-
-/* generate unique tuple ... */
-static int
-gre_unique_tuple(struct ip_conntrack_tuple *tuple,
-                const struct ip_nat_range *range,
-                enum ip_nat_manip_type maniptype,
-                const struct ip_conntrack *conntrack)
-{
-       static u_int16_t key;
-       __be16 *keyptr;
-       unsigned int min, i, range_size;
-
-       if (maniptype == IP_NAT_MANIP_SRC)
-               keyptr = &tuple->src.u.gre.key;
-       else
-               keyptr = &tuple->dst.u.gre.key;
-
-       if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED)) {
-               DEBUGP("%p: NATing GRE PPTP\n", conntrack);
-               min = 1;
-               range_size = 0xffff;
-       } else {
-               min = ntohs(range->min.gre.key);
-               range_size = ntohs(range->max.gre.key) - min + 1;
-       }
-
-       DEBUGP("min = %u, range_size = %u\n", min, range_size);
-
-       for (i = 0; i < range_size; i++, key++) {
-               *keyptr = htons(min + key % range_size);
-               if (!ip_nat_used_tuple(tuple, conntrack))
-                       return 1;
-       }
-
-       DEBUGP("%p: no NAT mapping\n", conntrack);
-
-       return 0;
-}
-
-/* manipulate a GRE packet according to maniptype */
-static int
-gre_manip_pkt(struct sk_buff **pskb,
-             unsigned int iphdroff,
-             const struct ip_conntrack_tuple *tuple,
-             enum ip_nat_manip_type maniptype)
-{
-       struct gre_hdr *greh;
-       struct gre_hdr_pptp *pgreh;
-       struct iphdr *iph = (struct iphdr *)((*pskb)->data + iphdroff);
-       unsigned int hdroff = iphdroff + iph->ihl*4;
-
-       /* pgreh includes two optional 32bit fields which are not required
-        * to be there.  That's where the magic '8' comes from */
-       if (!skb_make_writable(pskb, hdroff + sizeof(*pgreh)-8))
-               return 0;
-
-       greh = (void *)(*pskb)->data + hdroff;
-       pgreh = (struct gre_hdr_pptp *) greh;
-
-       /* we only have destination manip of a packet, since 'source key'
-        * is not present in the packet itself */
-       if (maniptype == IP_NAT_MANIP_DST) {
-               /* key manipulation is always dest */
-               switch (greh->version) {
-               case 0:
-                       if (!greh->key) {
-                               DEBUGP("can't nat GRE w/o key\n");
-                               break;
-                       }
-                       if (greh->csum) {
-                               /* FIXME: Never tested this code... */
-                               nf_proto_csum_replace4(gre_csum(greh), *pskb,
-                                                       *(gre_key(greh)),
-                                                       tuple->dst.u.gre.key, 0);
-                       }
-                       *(gre_key(greh)) = tuple->dst.u.gre.key;
-                       break;
-               case GRE_VERSION_PPTP:
-                       DEBUGP("call_id -> 0x%04x\n",
-                               ntohs(tuple->dst.u.gre.key));
-                       pgreh->call_id = tuple->dst.u.gre.key;
-                       break;
-               default:
-                       DEBUGP("can't nat unknown GRE version\n");
-                       return 0;
-                       break;
-               }
-       }
-       return 1;
-}
-
-/* nat helper struct */
-static struct ip_nat_protocol gre = {
-       .name           = "GRE",
-       .protonum       = IPPROTO_GRE,
-       .manip_pkt      = gre_manip_pkt,
-       .in_range       = gre_in_range,
-       .unique_tuple   = gre_unique_tuple,
-#if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \
-    defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE)
-       .range_to_nfattr        = ip_nat_port_range_to_nfattr,
-       .nfattr_to_range        = ip_nat_port_nfattr_to_range,
-#endif
-};
-
-int __init ip_nat_proto_gre_init(void)
-{
-       return ip_nat_protocol_register(&gre);
-}
-
-void __exit ip_nat_proto_gre_fini(void)
-{
-       ip_nat_protocol_unregister(&gre);
-}
diff --git a/net/ipv4/netfilter/ip_nat_proto_icmp.c b/net/ipv4/netfilter/ip_nat_proto_icmp.c
deleted file mode 100644 (file)
index 22a528a..0000000
+++ /dev/null
@@ -1,87 +0,0 @@
-/* (C) 1999-2001 Paul `Rusty' Russell
- * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/netfilter.h>
-#include <linux/ip.h>
-#include <linux/icmp.h>
-#include <linux/if.h>
-
-#include <linux/netfilter_ipv4/ip_nat.h>
-#include <linux/netfilter_ipv4/ip_nat_core.h>
-#include <linux/netfilter_ipv4/ip_nat_rule.h>
-#include <linux/netfilter_ipv4/ip_nat_protocol.h>
-
-static int
-icmp_in_range(const struct ip_conntrack_tuple *tuple,
-             enum ip_nat_manip_type maniptype,
-             const union ip_conntrack_manip_proto *min,
-             const union ip_conntrack_manip_proto *max)
-{
-       return ntohs(tuple->src.u.icmp.id) >= ntohs(min->icmp.id) &&
-              ntohs(tuple->src.u.icmp.id) <= ntohs(max->icmp.id);
-}
-
-static int
-icmp_unique_tuple(struct ip_conntrack_tuple *tuple,
-                 const struct ip_nat_range *range,
-                 enum ip_nat_manip_type maniptype,
-                 const struct ip_conntrack *conntrack)
-{
-       static u_int16_t id;
-       unsigned int range_size;
-       unsigned int i;
-
-       range_size = ntohs(range->max.icmp.id) - ntohs(range->min.icmp.id) + 1;
-       /* If no range specified... */
-       if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED))
-               range_size = 0xFFFF;
-
-       for (i = 0; i < range_size; i++, id++) {
-               tuple->src.u.icmp.id = htons(ntohs(range->min.icmp.id) +
-                                            (id % range_size));
-               if (!ip_nat_used_tuple(tuple, conntrack))
-                       return 1;
-       }
-       return 0;
-}
-
-static int
-icmp_manip_pkt(struct sk_buff **pskb,
-              unsigned int iphdroff,
-              const struct ip_conntrack_tuple *tuple,
-              enum ip_nat_manip_type maniptype)
-{
-       struct iphdr *iph = (struct iphdr *)((*pskb)->data + iphdroff);
-       struct icmphdr *hdr;
-       unsigned int hdroff = iphdroff + iph->ihl*4;
-
-       if (!skb_make_writable(pskb, hdroff + sizeof(*hdr)))
-               return 0;
-
-       hdr = (struct icmphdr *)((*pskb)->data + hdroff);
-       nf_proto_csum_replace2(&hdr->checksum, *pskb,
-                              hdr->un.echo.id, tuple->src.u.icmp.id, 0);
-       hdr->un.echo.id = tuple->src.u.icmp.id;
-       return 1;
-}
-
-struct ip_nat_protocol ip_nat_protocol_icmp = {
-       .name                   = "ICMP",
-       .protonum               = IPPROTO_ICMP,
-       .me                     = THIS_MODULE,
-       .manip_pkt              = icmp_manip_pkt,
-       .in_range               = icmp_in_range,
-       .unique_tuple           = icmp_unique_tuple,
-#if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \
-    defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE)
-       .range_to_nfattr        = ip_nat_port_range_to_nfattr,
-       .nfattr_to_range        = ip_nat_port_nfattr_to_range,
-#endif
-};
diff --git a/net/ipv4/netfilter/ip_nat_proto_tcp.c b/net/ipv4/netfilter/ip_nat_proto_tcp.c
deleted file mode 100644 (file)
index 14ff24f..0000000
+++ /dev/null
@@ -1,154 +0,0 @@
-/* (C) 1999-2001 Paul `Rusty' Russell
- * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/random.h>
-#include <linux/netfilter.h>
-#include <linux/ip.h>
-#include <linux/tcp.h>
-#include <linux/if.h>
-#include <linux/netfilter/nfnetlink_conntrack.h>
-#include <linux/netfilter_ipv4/ip_nat.h>
-#include <linux/netfilter_ipv4/ip_nat_rule.h>
-#include <linux/netfilter_ipv4/ip_nat_protocol.h>
-#include <linux/netfilter_ipv4/ip_nat_core.h>
-
-static int
-tcp_in_range(const struct ip_conntrack_tuple *tuple,
-            enum ip_nat_manip_type maniptype,
-            const union ip_conntrack_manip_proto *min,
-            const union ip_conntrack_manip_proto *max)
-{
-       __be16 port;
-
-       if (maniptype == IP_NAT_MANIP_SRC)
-               port = tuple->src.u.tcp.port;
-       else
-               port = tuple->dst.u.tcp.port;
-
-       return ntohs(port) >= ntohs(min->tcp.port)
-               && ntohs(port) <= ntohs(max->tcp.port);
-}
-
-static int
-tcp_unique_tuple(struct ip_conntrack_tuple *tuple,
-                const struct ip_nat_range *range,
-                enum ip_nat_manip_type maniptype,
-                const struct ip_conntrack *conntrack)
-{
-       static u_int16_t port;
-       __be16 *portptr;
-       unsigned int range_size, min, i;
-
-       if (maniptype == IP_NAT_MANIP_SRC)
-               portptr = &tuple->src.u.tcp.port;
-       else
-               portptr = &tuple->dst.u.tcp.port;
-
-       /* If no range specified... */
-       if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED)) {
-               /* If it's dst rewrite, can't change port */
-               if (maniptype == IP_NAT_MANIP_DST)
-                       return 0;
-
-               /* Map privileged onto privileged. */
-               if (ntohs(*portptr) < 1024) {
-                       /* Loose convention: >> 512 is credential passing */
-                       if (ntohs(*portptr)<512) {
-                               min = 1;
-                               range_size = 511 - min + 1;
-                       } else {
-                               min = 600;
-                               range_size = 1023 - min + 1;
-                       }
-               } else {
-                       min = 1024;
-                       range_size = 65535 - 1024 + 1;
-               }
-       } else {
-               min = ntohs(range->min.tcp.port);
-               range_size = ntohs(range->max.tcp.port) - min + 1;
-       }
-
-       /* Start from random port to avoid prediction */
-       if (range->flags & IP_NAT_RANGE_PROTO_RANDOM)
-               port =  net_random();
-
-       for (i = 0; i < range_size; i++, port++) {
-               *portptr = htons(min + port % range_size);
-               if (!ip_nat_used_tuple(tuple, conntrack)) {
-                       return 1;
-               }
-       }
-       return 0;
-}
-
-static int
-tcp_manip_pkt(struct sk_buff **pskb,
-             unsigned int iphdroff,
-             const struct ip_conntrack_tuple *tuple,
-             enum ip_nat_manip_type maniptype)
-{
-       struct iphdr *iph = (struct iphdr *)((*pskb)->data + iphdroff);
-       struct tcphdr *hdr;
-       unsigned int hdroff = iphdroff + iph->ihl*4;
-       __be32 oldip, newip;
-       __be16 *portptr, newport, oldport;
-       int hdrsize = 8; /* TCP connection tracking guarantees this much */
-
-       /* this could be a inner header returned in icmp packet; in such
-          cases we cannot update the checksum field since it is outside of
-          the 8 bytes of transport layer headers we are guaranteed */
-       if ((*pskb)->len >= hdroff + sizeof(struct tcphdr))
-               hdrsize = sizeof(struct tcphdr);
-
-       if (!skb_make_writable(pskb, hdroff + hdrsize))
-               return 0;
-
-       iph = (struct iphdr *)((*pskb)->data + iphdroff);
-       hdr = (struct tcphdr *)((*pskb)->data + hdroff);
-
-       if (maniptype == IP_NAT_MANIP_SRC) {
-               /* Get rid of src ip and src pt */
-               oldip = iph->saddr;
-               newip = tuple->src.ip;
-               newport = tuple->src.u.tcp.port;
-               portptr = &hdr->source;
-       } else {
-               /* Get rid of dst ip and dst pt */
-               oldip = iph->daddr;
-               newip = tuple->dst.ip;
-               newport = tuple->dst.u.tcp.port;
-               portptr = &hdr->dest;
-       }
-
-       oldport = *portptr;
-       *portptr = newport;
-
-       if (hdrsize < sizeof(*hdr))
-               return 1;
-
-       nf_proto_csum_replace4(&hdr->check, *pskb, oldip, newip, 1);
-       nf_proto_csum_replace2(&hdr->check, *pskb, oldport, newport, 0);
-       return 1;
-}
-
-struct ip_nat_protocol ip_nat_protocol_tcp = {
-       .name                   = "TCP",
-       .protonum               = IPPROTO_TCP,
-       .me                     = THIS_MODULE,
-       .manip_pkt              = tcp_manip_pkt,
-       .in_range               = tcp_in_range,
-       .unique_tuple           = tcp_unique_tuple,
-#if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \
-    defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE)
-       .range_to_nfattr        = ip_nat_port_range_to_nfattr,
-       .nfattr_to_range        = ip_nat_port_nfattr_to_range,
-#endif
-};
diff --git a/net/ipv4/netfilter/ip_nat_proto_udp.c b/net/ipv4/netfilter/ip_nat_proto_udp.c
deleted file mode 100644 (file)
index dfd5216..0000000
+++ /dev/null
@@ -1,144 +0,0 @@
-/* (C) 1999-2001 Paul `Rusty' Russell
- * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/random.h>
-#include <linux/netfilter.h>
-#include <linux/ip.h>
-#include <linux/udp.h>
-#include <linux/if.h>
-
-#include <linux/netfilter_ipv4/ip_nat.h>
-#include <linux/netfilter_ipv4/ip_nat_core.h>
-#include <linux/netfilter_ipv4/ip_nat_rule.h>
-#include <linux/netfilter_ipv4/ip_nat_protocol.h>
-
-static int
-udp_in_range(const struct ip_conntrack_tuple *tuple,
-            enum ip_nat_manip_type maniptype,
-            const union ip_conntrack_manip_proto *min,
-            const union ip_conntrack_manip_proto *max)
-{
-       __be16 port;
-
-       if (maniptype == IP_NAT_MANIP_SRC)
-               port = tuple->src.u.udp.port;
-       else
-               port = tuple->dst.u.udp.port;
-
-       return ntohs(port) >= ntohs(min->udp.port)
-               && ntohs(port) <= ntohs(max->udp.port);
-}
-
-static int
-udp_unique_tuple(struct ip_conntrack_tuple *tuple,
-                const struct ip_nat_range *range,
-                enum ip_nat_manip_type maniptype,
-                const struct ip_conntrack *conntrack)
-{
-       static u_int16_t port;
-       __be16 *portptr;
-       unsigned int range_size, min, i;
-
-       if (maniptype == IP_NAT_MANIP_SRC)
-               portptr = &tuple->src.u.udp.port;
-       else
-               portptr = &tuple->dst.u.udp.port;
-
-       /* If no range specified... */
-       if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED)) {
-               /* If it's dst rewrite, can't change port */
-               if (maniptype == IP_NAT_MANIP_DST)
-                       return 0;
-
-               if (ntohs(*portptr) < 1024) {
-                       /* Loose convention: >> 512 is credential passing */
-                       if (ntohs(*portptr)<512) {
-                               min = 1;
-                               range_size = 511 - min + 1;
-                       } else {
-                               min = 600;
-                               range_size = 1023 - min + 1;
-                       }
-               } else {
-                       min = 1024;
-                       range_size = 65535 - 1024 + 1;
-               }
-       } else {
-               min = ntohs(range->min.udp.port);
-               range_size = ntohs(range->max.udp.port) - min + 1;
-       }
-
-       /* Start from random port to avoid prediction */
-       if (range->flags & IP_NAT_RANGE_PROTO_RANDOM)
-               port = net_random();
-
-       for (i = 0; i < range_size; i++, port++) {
-               *portptr = htons(min + port % range_size);
-               if (!ip_nat_used_tuple(tuple, conntrack))
-                       return 1;
-       }
-       return 0;
-}
-
-static int
-udp_manip_pkt(struct sk_buff **pskb,
-             unsigned int iphdroff,
-             const struct ip_conntrack_tuple *tuple,
-             enum ip_nat_manip_type maniptype)
-{
-       struct iphdr *iph = (struct iphdr *)((*pskb)->data + iphdroff);
-       struct udphdr *hdr;
-       unsigned int hdroff = iphdroff + iph->ihl*4;
-       __be32 oldip, newip;
-       __be16 *portptr, newport;
-
-       if (!skb_make_writable(pskb, hdroff + sizeof(*hdr)))
-               return 0;
-
-       iph = (struct iphdr *)((*pskb)->data + iphdroff);
-       hdr = (struct udphdr *)((*pskb)->data + hdroff);
-
-       if (maniptype == IP_NAT_MANIP_SRC) {
-               /* Get rid of src ip and src pt */
-               oldip = iph->saddr;
-               newip = tuple->src.ip;
-               newport = tuple->src.u.udp.port;
-               portptr = &hdr->source;
-       } else {
-               /* Get rid of dst ip and dst pt */
-               oldip = iph->daddr;
-               newip = tuple->dst.ip;
-               newport = tuple->dst.u.udp.port;
-               portptr = &hdr->dest;
-       }
-
-       if (hdr->check || (*pskb)->ip_summed == CHECKSUM_PARTIAL) {
-               nf_proto_csum_replace4(&hdr->check, *pskb, oldip, newip, 1);
-               nf_proto_csum_replace2(&hdr->check, *pskb, *portptr, newport, 0);
-               if (!hdr->check)
-                       hdr->check = CSUM_MANGLED_0;
-       }
-       *portptr = newport;
-       return 1;
-}
-
-struct ip_nat_protocol ip_nat_protocol_udp = {
-       .name                   = "UDP",
-       .protonum               = IPPROTO_UDP,
-       .me                     = THIS_MODULE,
-       .manip_pkt              = udp_manip_pkt,
-       .in_range               = udp_in_range,
-       .unique_tuple           = udp_unique_tuple,
-#if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \
-    defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE)
-       .range_to_nfattr        = ip_nat_port_range_to_nfattr,
-       .nfattr_to_range        = ip_nat_port_nfattr_to_range,
-#endif
-};
diff --git a/net/ipv4/netfilter/ip_nat_proto_unknown.c b/net/ipv4/netfilter/ip_nat_proto_unknown.c
deleted file mode 100644 (file)
index 3bf0495..0000000
+++ /dev/null
@@ -1,55 +0,0 @@
-/* The "unknown" protocol.  This is what is used for protocols we
- * don't understand.  It's returned by ip_ct_find_proto().
- */
-
-/* (C) 1999-2001 Paul `Rusty' Russell
- * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/netfilter.h>
-#include <linux/if.h>
-
-#include <linux/netfilter_ipv4/ip_nat.h>
-#include <linux/netfilter_ipv4/ip_nat_rule.h>
-#include <linux/netfilter_ipv4/ip_nat_protocol.h>
-
-static int unknown_in_range(const struct ip_conntrack_tuple *tuple,
-                           enum ip_nat_manip_type manip_type,
-                           const union ip_conntrack_manip_proto *min,
-                           const union ip_conntrack_manip_proto *max)
-{
-       return 1;
-}
-
-static int unknown_unique_tuple(struct ip_conntrack_tuple *tuple,
-                               const struct ip_nat_range *range,
-                               enum ip_nat_manip_type maniptype,
-                               const struct ip_conntrack *conntrack)
-{
-       /* Sorry: we can't help you; if it's not unique, we can't frob
-          anything. */
-       return 0;
-}
-
-static int
-unknown_manip_pkt(struct sk_buff **pskb,
-                 unsigned int iphdroff,
-                 const struct ip_conntrack_tuple *tuple,
-                 enum ip_nat_manip_type maniptype)
-{
-       return 1;
-}
-
-struct ip_nat_protocol ip_nat_unknown_protocol = {
-       .name                   = "unknown",
-       /* .me isn't set: getting a ref to this cannot fail. */
-       .manip_pkt              = unknown_manip_pkt,
-       .in_range               = unknown_in_range,
-       .unique_tuple           = unknown_unique_tuple,
-};
diff --git a/net/ipv4/netfilter/ip_nat_rule.c b/net/ipv4/netfilter/ip_nat_rule.c
deleted file mode 100644 (file)
index 080eb1d..0000000
+++ /dev/null
@@ -1,314 +0,0 @@
-/* (C) 1999-2001 Paul `Rusty' Russell
- * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-/* Everything about the rules for NAT. */
-#include <linux/types.h>
-#include <linux/ip.h>
-#include <linux/netfilter.h>
-#include <linux/netfilter_ipv4.h>
-#include <linux/module.h>
-#include <linux/kmod.h>
-#include <linux/skbuff.h>
-#include <linux/proc_fs.h>
-#include <net/checksum.h>
-#include <net/route.h>
-#include <linux/bitops.h>
-
-#include <linux/netfilter_ipv4/ip_tables.h>
-#include <linux/netfilter_ipv4/ip_nat.h>
-#include <linux/netfilter_ipv4/ip_nat_core.h>
-#include <linux/netfilter_ipv4/ip_nat_rule.h>
-
-#if 0
-#define DEBUGP printk
-#else
-#define DEBUGP(format, args...)
-#endif
-
-#define NAT_VALID_HOOKS ((1<<NF_IP_PRE_ROUTING) | (1<<NF_IP_POST_ROUTING) | (1<<NF_IP_LOCAL_OUT))
-
-static struct
-{
-       struct ipt_replace repl;
-       struct ipt_standard entries[3];
-       struct ipt_error term;
-} nat_initial_table __initdata
-= { { "nat", NAT_VALID_HOOKS, 4,
-      sizeof(struct ipt_standard) * 3 + sizeof(struct ipt_error),
-      { [NF_IP_PRE_ROUTING] = 0,
-       [NF_IP_POST_ROUTING] = sizeof(struct ipt_standard),
-       [NF_IP_LOCAL_OUT] = sizeof(struct ipt_standard) * 2 },
-      { [NF_IP_PRE_ROUTING] = 0,
-       [NF_IP_POST_ROUTING] = sizeof(struct ipt_standard),
-       [NF_IP_LOCAL_OUT] = sizeof(struct ipt_standard) * 2 },
-      0, NULL, { } },
-    {
-           /* PRE_ROUTING */
-           { { { { 0 }, { 0 }, { 0 }, { 0 }, "", "", { 0 }, { 0 }, 0, 0, 0 },
-               0,
-               sizeof(struct ipt_entry),
-               sizeof(struct ipt_standard),
-               0, { 0, 0 }, { } },
-             { { { { IPT_ALIGN(sizeof(struct ipt_standard_target)), "" } }, { } },
-               -NF_ACCEPT - 1 } },
-           /* POST_ROUTING */
-           { { { { 0 }, { 0 }, { 0 }, { 0 }, "", "", { 0 }, { 0 }, 0, 0, 0 },
-               0,
-               sizeof(struct ipt_entry),
-               sizeof(struct ipt_standard),
-               0, { 0, 0 }, { } },
-             { { { { IPT_ALIGN(sizeof(struct ipt_standard_target)), "" } }, { } },
-               -NF_ACCEPT - 1 } },
-           /* LOCAL_OUT */
-           { { { { 0 }, { 0 }, { 0 }, { 0 }, "", "", { 0 }, { 0 }, 0, 0, 0 },
-               0,
-               sizeof(struct ipt_entry),
-               sizeof(struct ipt_standard),
-               0, { 0, 0 }, { } },
-             { { { { IPT_ALIGN(sizeof(struct ipt_standard_target)), "" } }, { } },
-               -NF_ACCEPT - 1 } }
-    },
-    /* ERROR */
-    { { { { 0 }, { 0 }, { 0 }, { 0 }, "", "", { 0 }, { 0 }, 0, 0, 0 },
-       0,
-       sizeof(struct ipt_entry),
-       sizeof(struct ipt_error),
-       0, { 0, 0 }, { } },
-      { { { { IPT_ALIGN(sizeof(struct ipt_error_target)), IPT_ERROR_TARGET } },
-         { } },
-       "ERROR"
-      }
-    }
-};
-
-static struct xt_table nat_table = {
-       .name           = "nat",
-       .valid_hooks    = NAT_VALID_HOOKS,
-       .lock           = RW_LOCK_UNLOCKED,
-       .me             = THIS_MODULE,
-       .af             = AF_INET,
-};
-
-/* Source NAT */
-static unsigned int ipt_snat_target(struct sk_buff **pskb,
-                                   const struct net_device *in,
-                                   const struct net_device *out,
-                                   unsigned int hooknum,
-                                   const struct xt_target *target,
-                                   const void *targinfo)
-{
-       struct ip_conntrack *ct;
-       enum ip_conntrack_info ctinfo;
-       const struct ip_nat_multi_range_compat *mr = targinfo;
-
-       IP_NF_ASSERT(hooknum == NF_IP_POST_ROUTING);
-
-       ct = ip_conntrack_get(*pskb, &ctinfo);
-
-       /* Connection must be valid and new. */
-       IP_NF_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED
-                           || ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY));
-       IP_NF_ASSERT(out);
-
-       return ip_nat_setup_info(ct, &mr->range[0], hooknum);
-}
-
-/* Before 2.6.11 we did implicit source NAT if required. Warn about change. */
-static void warn_if_extra_mangle(__be32 dstip, __be32 srcip)
-{
-       static int warned = 0;
-       struct flowi fl = { .nl_u = { .ip4_u = { .daddr = dstip } } };
-       struct rtable *rt;
-
-       if (ip_route_output_key(&rt, &fl) != 0)
-               return;
-
-       if (rt->rt_src != srcip && !warned) {
-               printk("NAT: no longer support implicit source local NAT\n");
-               printk("NAT: packet src %u.%u.%u.%u -> dst %u.%u.%u.%u\n",
-                      NIPQUAD(srcip), NIPQUAD(dstip));
-               warned = 1;
-       }
-       ip_rt_put(rt);
-}
-
-static unsigned int ipt_dnat_target(struct sk_buff **pskb,
-                                   const struct net_device *in,
-                                   const struct net_device *out,
-                                   unsigned int hooknum,
-                                   const struct xt_target *target,
-                                   const void *targinfo)
-{
-       struct ip_conntrack *ct;
-       enum ip_conntrack_info ctinfo;
-       const struct ip_nat_multi_range_compat *mr = targinfo;
-
-       IP_NF_ASSERT(hooknum == NF_IP_PRE_ROUTING
-                    || hooknum == NF_IP_LOCAL_OUT);
-
-       ct = ip_conntrack_get(*pskb, &ctinfo);
-
-       /* Connection must be valid and new. */
-       IP_NF_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED));
-
-       if (hooknum == NF_IP_LOCAL_OUT
-           && mr->range[0].flags & IP_NAT_RANGE_MAP_IPS)
-               warn_if_extra_mangle((*pskb)->nh.iph->daddr,
-                                    mr->range[0].min_ip);
-
-       return ip_nat_setup_info(ct, &mr->range[0], hooknum);
-}
-
-static int ipt_snat_checkentry(const char *tablename,
-                              const void *entry,
-                              const struct xt_target *target,
-                              void *targinfo,
-                              unsigned int hook_mask)
-{
-       struct ip_nat_multi_range_compat *mr = targinfo;
-
-       /* Must be a valid range */
-       if (mr->rangesize != 1) {
-               printk("SNAT: multiple ranges no longer supported\n");
-               return 0;
-       }
-       return 1;
-}
-
-static int ipt_dnat_checkentry(const char *tablename,
-                              const void *entry,
-                              const struct xt_target *target,
-                              void *targinfo,
-                              unsigned int hook_mask)
-{
-       struct ip_nat_multi_range_compat *mr = targinfo;
-
-       /* Must be a valid range */
-       if (mr->rangesize != 1) {
-               printk("DNAT: multiple ranges no longer supported\n");
-               return 0;
-       }
-       if (mr->range[0].flags & IP_NAT_RANGE_PROTO_RANDOM) {
-               printk("DNAT: port randomization not supported\n");
-               return 0;
-       }
-       return 1;
-}
-
-inline unsigned int
-alloc_null_binding(struct ip_conntrack *conntrack,
-                  struct ip_nat_info *info,
-                  unsigned int hooknum)
-{
-       /* Force range to this IP; let proto decide mapping for
-          per-proto parts (hence not IP_NAT_RANGE_PROTO_SPECIFIED).
-          Use reply in case it's already been mangled (eg local packet).
-       */
-       __be32 ip
-               = (HOOK2MANIP(hooknum) == IP_NAT_MANIP_SRC
-                  ? conntrack->tuplehash[IP_CT_DIR_REPLY].tuple.dst.ip
-                  : conntrack->tuplehash[IP_CT_DIR_REPLY].tuple.src.ip);
-       struct ip_nat_range range
-               = { IP_NAT_RANGE_MAP_IPS, ip, ip, { 0 }, { 0 } };
-
-       DEBUGP("Allocating NULL binding for %p (%u.%u.%u.%u)\n", conntrack,
-              NIPQUAD(ip));
-       return ip_nat_setup_info(conntrack, &range, hooknum);
-}
-
-unsigned int
-alloc_null_binding_confirmed(struct ip_conntrack *conntrack,
-                            struct ip_nat_info *info,
-                            unsigned int hooknum)
-{
-       __be32 ip
-               = (HOOK2MANIP(hooknum) == IP_NAT_MANIP_SRC
-                  ? conntrack->tuplehash[IP_CT_DIR_REPLY].tuple.dst.ip
-                  : conntrack->tuplehash[IP_CT_DIR_REPLY].tuple.src.ip);
-       u_int16_t all
-               = (HOOK2MANIP(hooknum) == IP_NAT_MANIP_SRC
-                  ? conntrack->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u.all
-                  : conntrack->tuplehash[IP_CT_DIR_REPLY].tuple.src.u.all);
-       struct ip_nat_range range
-               = { IP_NAT_RANGE_MAP_IPS, ip, ip, { all }, { all } };
-
-       DEBUGP("Allocating NULL binding for confirmed %p (%u.%u.%u.%u)\n",
-              conntrack, NIPQUAD(ip));
-       return ip_nat_setup_info(conntrack, &range, hooknum);
-}
-
-int ip_nat_rule_find(struct sk_buff **pskb,
-                    unsigned int hooknum,
-                    const struct net_device *in,
-                    const struct net_device *out,
-                    struct ip_conntrack *ct,
-                    struct ip_nat_info *info)
-{
-       int ret;
-
-       ret = ipt_do_table(pskb, hooknum, in, out, &nat_table);
-
-       if (ret == NF_ACCEPT) {
-               if (!ip_nat_initialized(ct, HOOK2MANIP(hooknum)))
-                       /* NUL mapping */
-                       ret = alloc_null_binding(ct, info, hooknum);
-       }
-       return ret;
-}
-
-static struct xt_target ipt_snat_reg = {
-       .name           = "SNAT",
-       .family         = AF_INET,
-       .target         = ipt_snat_target,
-       .targetsize     = sizeof(struct ip_nat_multi_range_compat),
-       .table          = "nat",
-       .hooks          = 1 << NF_IP_POST_ROUTING,
-       .checkentry     = ipt_snat_checkentry,
-};
-
-static struct xt_target ipt_dnat_reg = {
-       .name           = "DNAT",
-       .family         = AF_INET,
-       .target         = ipt_dnat_target,
-       .targetsize     = sizeof(struct ip_nat_multi_range_compat),
-       .table          = "nat",
-       .hooks          = (1 << NF_IP_PRE_ROUTING) | (1 << NF_IP_LOCAL_OUT),
-       .checkentry     = ipt_dnat_checkentry,
-};
-
-int __init ip_nat_rule_init(void)
-{
-       int ret;
-
-       ret = ipt_register_table(&nat_table, &nat_initial_table.repl);
-       if (ret != 0)
-               return ret;
-       ret = xt_register_target(&ipt_snat_reg);
-       if (ret != 0)
-               goto unregister_table;
-
-       ret = xt_register_target(&ipt_dnat_reg);
-       if (ret != 0)
-               goto unregister_snat;
-
-       return ret;
-
- unregister_snat:
-       xt_unregister_target(&ipt_snat_reg);
- unregister_table:
-       xt_unregister_table(&nat_table);
-
-       return ret;
-}
-
-void ip_nat_rule_cleanup(void)
-{
-       xt_unregister_target(&ipt_dnat_reg);
-       xt_unregister_target(&ipt_snat_reg);
-       ipt_unregister_table(&nat_table);
-}
diff --git a/net/ipv4/netfilter/ip_nat_sip.c b/net/ipv4/netfilter/ip_nat_sip.c
deleted file mode 100644 (file)
index 325c5a9..0000000
+++ /dev/null
@@ -1,282 +0,0 @@
-/* SIP extension for UDP NAT alteration.
- *
- * (C) 2005 by Christian Hentschel <chentschel@arnet.com.ar>
- * based on RR's ip_nat_ftp.c and other modules.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/skbuff.h>
-#include <linux/ip.h>
-#include <linux/udp.h>
-
-#include <linux/netfilter_ipv4.h>
-#include <linux/netfilter_ipv4/ip_nat.h>
-#include <linux/netfilter_ipv4/ip_nat_helper.h>
-#include <linux/netfilter_ipv4/ip_conntrack_helper.h>
-#include <linux/netfilter_ipv4/ip_conntrack_sip.h>
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Christian Hentschel <chentschel@arnet.com.ar>");
-MODULE_DESCRIPTION("SIP NAT helper");
-
-#if 0
-#define DEBUGP printk
-#else
-#define DEBUGP(format, args...)
-#endif
-
-struct addr_map {
-       struct {
-               char            src[sizeof("nnn.nnn.nnn.nnn:nnnnn")];
-               char            dst[sizeof("nnn.nnn.nnn.nnn:nnnnn")];
-               unsigned int    srclen, srciplen;
-               unsigned int    dstlen, dstiplen;
-       } addr[IP_CT_DIR_MAX];
-};
-
-static void addr_map_init(struct ip_conntrack *ct, struct addr_map *map)
-{
-       struct ip_conntrack_tuple *t;
-       enum ip_conntrack_dir dir;
-       unsigned int n;
-
-       for (dir = 0; dir < IP_CT_DIR_MAX; dir++) {
-               t = &ct->tuplehash[dir].tuple;
-
-               n = sprintf(map->addr[dir].src, "%u.%u.%u.%u",
-                           NIPQUAD(t->src.ip));
-               map->addr[dir].srciplen = n;
-               n += sprintf(map->addr[dir].src + n, ":%u",
-                            ntohs(t->src.u.udp.port));
-               map->addr[dir].srclen = n;
-
-               n = sprintf(map->addr[dir].dst, "%u.%u.%u.%u",
-                           NIPQUAD(t->dst.ip));
-               map->addr[dir].dstiplen = n;
-               n += sprintf(map->addr[dir].dst + n, ":%u",
-                            ntohs(t->dst.u.udp.port));
-               map->addr[dir].dstlen = n;
-       }
-}
-
-static int map_sip_addr(struct sk_buff **pskb, enum ip_conntrack_info ctinfo,
-                       struct ip_conntrack *ct, const char **dptr, size_t dlen,
-                       enum sip_header_pos pos, struct addr_map *map)
-{
-       enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
-       unsigned int matchlen, matchoff, addrlen;
-       char *addr;
-
-       if (ct_sip_get_info(*dptr, dlen, &matchoff, &matchlen, pos) <= 0)
-               return 1;
-
-       if ((matchlen == map->addr[dir].srciplen ||
-            matchlen == map->addr[dir].srclen) &&
-           memcmp(*dptr + matchoff, map->addr[dir].src, matchlen) == 0) {
-               addr    = map->addr[!dir].dst;
-               addrlen = map->addr[!dir].dstlen;
-       } else if ((matchlen == map->addr[dir].dstiplen ||
-                   matchlen == map->addr[dir].dstlen) &&
-                  memcmp(*dptr + matchoff, map->addr[dir].dst, matchlen) == 0) {
-               addr    = map->addr[!dir].src;
-               addrlen = map->addr[!dir].srclen;
-       } else
-               return 1;
-
-       if (!ip_nat_mangle_udp_packet(pskb, ct, ctinfo,
-                                     matchoff, matchlen, addr, addrlen))
-               return 0;
-       *dptr = (*pskb)->data + (*pskb)->nh.iph->ihl*4 + sizeof(struct udphdr);
-       return 1;
-
-}
-
-static unsigned int ip_nat_sip(struct sk_buff **pskb,
-                              enum ip_conntrack_info ctinfo,
-                              struct ip_conntrack *ct,
-                              const char **dptr)
-{
-       enum sip_header_pos pos;
-       struct addr_map map;
-       int dataoff, datalen;
-
-       dataoff = (*pskb)->nh.iph->ihl*4 + sizeof(struct udphdr);
-       datalen = (*pskb)->len - dataoff;
-       if (datalen < sizeof("SIP/2.0") - 1)
-               return NF_DROP;
-
-       addr_map_init(ct, &map);
-
-       /* Basic rules: requests and responses. */
-       if (strncmp(*dptr, "SIP/2.0", sizeof("SIP/2.0") - 1) != 0) {
-               /* 10.2: Constructing the REGISTER Request:
-                *
-                * The "userinfo" and "@" components of the SIP URI MUST NOT
-                * be present.
-                */
-               if (datalen >= sizeof("REGISTER") - 1 &&
-                   strncmp(*dptr, "REGISTER", sizeof("REGISTER") - 1) == 0)
-                       pos = POS_REG_REQ_URI;
-               else
-                       pos = POS_REQ_URI;
-
-               if (!map_sip_addr(pskb, ctinfo, ct, dptr, datalen, pos, &map))
-                       return NF_DROP;
-       }
-
-       if (!map_sip_addr(pskb, ctinfo, ct, dptr, datalen, POS_FROM, &map) ||
-           !map_sip_addr(pskb, ctinfo, ct, dptr, datalen, POS_TO, &map) ||
-           !map_sip_addr(pskb, ctinfo, ct, dptr, datalen, POS_VIA, &map) ||
-           !map_sip_addr(pskb, ctinfo, ct, dptr, datalen, POS_CONTACT, &map))
-               return NF_DROP;
-       return NF_ACCEPT;
-}
-
-static unsigned int mangle_sip_packet(struct sk_buff **pskb,
-                                     enum ip_conntrack_info ctinfo,
-                                     struct ip_conntrack *ct,
-                                     const char **dptr, size_t dlen,
-                                     char *buffer, int bufflen,
-                                     enum sip_header_pos pos)
-{
-       unsigned int matchlen, matchoff;
-
-       if (ct_sip_get_info(*dptr, dlen, &matchoff, &matchlen, pos) <= 0)
-               return 0;
-
-       if (!ip_nat_mangle_udp_packet(pskb, ct, ctinfo,
-                                     matchoff, matchlen, buffer, bufflen))
-               return 0;
-
-       /* We need to reload this. Thanks Patrick. */
-       *dptr = (*pskb)->data + (*pskb)->nh.iph->ihl*4 + sizeof(struct udphdr);
-       return 1;
-}
-
-static int mangle_content_len(struct sk_buff **pskb,
-                             enum ip_conntrack_info ctinfo,
-                             struct ip_conntrack *ct,
-                             const char *dptr)
-{
-       unsigned int dataoff, matchoff, matchlen;
-       char buffer[sizeof("65536")];
-       int bufflen;
-
-       dataoff = (*pskb)->nh.iph->ihl*4 + sizeof(struct udphdr);
-
-       /* Get actual SDP lenght */
-       if (ct_sip_get_info(dptr, (*pskb)->len - dataoff, &matchoff,
-                           &matchlen, POS_SDP_HEADER) > 0) {
-
-               /* since ct_sip_get_info() give us a pointer passing 'v='
-                  we need to add 2 bytes in this count. */
-               int c_len = (*pskb)->len - dataoff - matchoff + 2;
-
-               /* Now, update SDP lenght */
-               if (ct_sip_get_info(dptr, (*pskb)->len - dataoff, &matchoff,
-                                   &matchlen, POS_CONTENT) > 0) {
-
-                       bufflen = sprintf(buffer, "%u", c_len);
-
-                       return ip_nat_mangle_udp_packet(pskb, ct, ctinfo,
-                                                       matchoff, matchlen,
-                                                       buffer, bufflen);
-               }
-       }
-       return 0;
-}
-
-static unsigned int mangle_sdp(struct sk_buff **pskb,
-                              enum ip_conntrack_info ctinfo,
-                              struct ip_conntrack *ct,
-                              __be32 newip, u_int16_t port,
-                              const char *dptr)
-{
-       char buffer[sizeof("nnn.nnn.nnn.nnn")];
-       unsigned int dataoff, bufflen;
-
-       dataoff = (*pskb)->nh.iph->ihl*4 + sizeof(struct udphdr);
-
-       /* Mangle owner and contact info. */
-       bufflen = sprintf(buffer, "%u.%u.%u.%u", NIPQUAD(newip));
-       if (!mangle_sip_packet(pskb, ctinfo, ct, &dptr, (*pskb)->len - dataoff,
-                              buffer, bufflen, POS_OWNER))
-               return 0;
-
-       if (!mangle_sip_packet(pskb, ctinfo, ct, &dptr, (*pskb)->len - dataoff,
-                              buffer, bufflen, POS_CONNECTION))
-               return 0;
-
-       /* Mangle media port. */
-       bufflen = sprintf(buffer, "%u", port);
-       if (!mangle_sip_packet(pskb, ctinfo, ct, &dptr, (*pskb)->len - dataoff,
-                              buffer, bufflen, POS_MEDIA))
-               return 0;
-
-       return mangle_content_len(pskb, ctinfo, ct, dptr);
-}
-
-/* So, this packet has hit the connection tracking matching code.
-   Mangle it, and change the expectation to match the new version. */
-static unsigned int ip_nat_sdp(struct sk_buff **pskb,
-                              enum ip_conntrack_info ctinfo,
-                              struct ip_conntrack_expect *exp,
-                              const char *dptr)
-{
-       struct ip_conntrack *ct = exp->master;
-       enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
-       __be32 newip;
-       u_int16_t port;
-
-       DEBUGP("ip_nat_sdp():\n");
-
-       /* Connection will come from reply */
-       newip = ct->tuplehash[!dir].tuple.dst.ip;
-
-       exp->tuple.dst.ip = newip;
-       exp->saved_proto.udp.port = exp->tuple.dst.u.udp.port;
-       exp->dir = !dir;
-
-       /* When you see the packet, we need to NAT it the same as the
-          this one. */
-       exp->expectfn = ip_nat_follow_master;
-
-       /* Try to get same port: if not, try to change it. */
-       for (port = ntohs(exp->saved_proto.udp.port); port != 0; port++) {
-               exp->tuple.dst.u.udp.port = htons(port);
-               if (ip_conntrack_expect_related(exp) == 0)
-                       break;
-       }
-
-       if (port == 0)
-               return NF_DROP;
-
-       if (!mangle_sdp(pskb, ctinfo, ct, newip, port, dptr)) {
-               ip_conntrack_unexpect_related(exp);
-               return NF_DROP;
-       }
-       return NF_ACCEPT;
-}
-
-static void __exit fini(void)
-{
-       rcu_assign_pointer(ip_nat_sip_hook, NULL);
-       rcu_assign_pointer(ip_nat_sdp_hook, NULL);
-       synchronize_rcu();
-}
-
-static int __init init(void)
-{
-       BUG_ON(rcu_dereference(ip_nat_sip_hook));
-       BUG_ON(rcu_dereference(ip_nat_sdp_hook));
-       rcu_assign_pointer(ip_nat_sip_hook, ip_nat_sip);
-       rcu_assign_pointer(ip_nat_sdp_hook, ip_nat_sdp);
-       return 0;
-}
-
-module_init(init);
-module_exit(fini);
diff --git a/net/ipv4/netfilter/ip_nat_snmp_basic.c b/net/ipv4/netfilter/ip_nat_snmp_basic.c
deleted file mode 100644 (file)
index e41d0ef..0000000
+++ /dev/null
@@ -1,1333 +0,0 @@
-/*
- * ip_nat_snmp_basic.c
- *
- * Basic SNMP Application Layer Gateway
- *
- * This IP NAT module is intended for use with SNMP network
- * discovery and monitoring applications where target networks use
- * conflicting private address realms.
- *
- * Static NAT is used to remap the networks from the view of the network
- * management system at the IP layer, and this module remaps some application
- * layer addresses to match.
- *
- * The simplest form of ALG is performed, where only tagged IP addresses
- * are modified.  The module does not need to be MIB aware and only scans
- * messages at the ASN.1/BER level.
- *
- * Currently, only SNMPv1 and SNMPv2 are supported.
- *
- * More information on ALG and associated issues can be found in
- * RFC 2962
- *
- * The ASB.1/BER parsing code is derived from the gxsnmp package by Gregory
- * McLean & Jochen Friedrich, stripped down for use in the kernel.
- *
- * Copyright (c) 2000 RP Internet (www.rpi.net.au).
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
- *
- * Author: James Morris <jmorris@intercode.com.au>
- *
- * Updates:
- * 2000-08-06: Convert to new helper API (Harald Welte).
- *
- */
-#include <linux/in.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/moduleparam.h>
-#include <linux/netfilter_ipv4.h>
-#include <linux/netfilter_ipv4/ip_nat.h>
-#include <linux/netfilter_ipv4/ip_conntrack_helper.h>
-#include <linux/netfilter_ipv4/ip_nat_helper.h>
-#include <linux/ip.h>
-#include <linux/udp.h>
-#include <net/checksum.h>
-#include <net/udp.h>
-#include <asm/uaccess.h>
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>");
-MODULE_DESCRIPTION("Basic SNMP Application Layer Gateway");
-
-#define SNMP_PORT 161
-#define SNMP_TRAP_PORT 162
-#define NOCT1(n) (*(u8 *)n)
-
-static int debug;
-static DEFINE_SPINLOCK(snmp_lock);
-
-/*
- * Application layer address mapping mimics the NAT mapping, but
- * only for the first octet in this case (a more flexible system
- * can be implemented if needed).
- */
-struct oct1_map
-{
-       u_int8_t from;
-       u_int8_t to;
-};
-
-
-/*****************************************************************************
- *
- * Basic ASN.1 decoding routines (gxsnmp author Dirk Wisse)
- *
- *****************************************************************************/
-
-/* Class */
-#define ASN1_UNI       0       /* Universal */
-#define ASN1_APL       1       /* Application */
-#define ASN1_CTX       2       /* Context */
-#define ASN1_PRV       3       /* Private */
-
-/* Tag */
-#define ASN1_EOC       0       /* End Of Contents */
-#define ASN1_BOL       1       /* Boolean */
-#define ASN1_INT       2       /* Integer */
-#define ASN1_BTS       3       /* Bit String */
-#define ASN1_OTS       4       /* Octet String */
-#define ASN1_NUL       5       /* Null */
-#define ASN1_OJI       6       /* Object Identifier  */
-#define ASN1_OJD       7       /* Object Description */
-#define ASN1_EXT       8       /* External */
-#define ASN1_SEQ       16      /* Sequence */
-#define ASN1_SET       17      /* Set */
-#define ASN1_NUMSTR    18      /* Numerical String */
-#define ASN1_PRNSTR    19      /* Printable String */
-#define ASN1_TEXSTR    20      /* Teletext String */
-#define ASN1_VIDSTR    21      /* Video String */
-#define ASN1_IA5STR    22      /* IA5 String */
-#define ASN1_UNITIM    23      /* Universal Time */
-#define ASN1_GENTIM    24      /* General Time */
-#define ASN1_GRASTR    25      /* Graphical String */
-#define ASN1_VISSTR    26      /* Visible String */
-#define ASN1_GENSTR    27      /* General String */
-
-/* Primitive / Constructed methods*/
-#define ASN1_PRI       0       /* Primitive */
-#define ASN1_CON       1       /* Constructed */
-
-/*
- * Error codes.
- */
-#define ASN1_ERR_NOERROR               0
-#define ASN1_ERR_DEC_EMPTY             2
-#define ASN1_ERR_DEC_EOC_MISMATCH      3
-#define ASN1_ERR_DEC_LENGTH_MISMATCH   4
-#define ASN1_ERR_DEC_BADVALUE          5
-
-/*
- * ASN.1 context.
- */
-struct asn1_ctx
-{
-       int error;                      /* Error condition */
-       unsigned char *pointer;         /* Octet just to be decoded */
-       unsigned char *begin;           /* First octet */
-       unsigned char *end;             /* Octet after last octet */
-};
-
-/*
- * Octet string (not null terminated)
- */
-struct asn1_octstr
-{
-       unsigned char *data;
-       unsigned int len;
-};
-
-static void asn1_open(struct asn1_ctx *ctx,
-                     unsigned char *buf,
-                     unsigned int len)
-{
-       ctx->begin = buf;
-       ctx->end = buf + len;
-       ctx->pointer = buf;
-       ctx->error = ASN1_ERR_NOERROR;
-}
-
-static unsigned char asn1_octet_decode(struct asn1_ctx *ctx, unsigned char *ch)
-{
-       if (ctx->pointer >= ctx->end) {
-               ctx->error = ASN1_ERR_DEC_EMPTY;
-               return 0;
-       }
-       *ch = *(ctx->pointer)++;
-       return 1;
-}
-
-static unsigned char asn1_tag_decode(struct asn1_ctx *ctx, unsigned int *tag)
-{
-       unsigned char ch;
-
-       *tag = 0;
-
-       do
-       {
-               if (!asn1_octet_decode(ctx, &ch))
-                       return 0;
-               *tag <<= 7;
-               *tag |= ch & 0x7F;
-       } while ((ch & 0x80) == 0x80);
-       return 1;
-}
-
-static unsigned char asn1_id_decode(struct asn1_ctx *ctx,
-                                   unsigned int *cls,
-                                   unsigned int *con,
-                                   unsigned int *tag)
-{
-       unsigned char ch;
-
-       if (!asn1_octet_decode(ctx, &ch))
-               return 0;
-
-       *cls = (ch & 0xC0) >> 6;
-       *con = (ch & 0x20) >> 5;
-       *tag = (ch & 0x1F);
-
-       if (*tag == 0x1F) {
-               if (!asn1_tag_decode(ctx, tag))
-                       return 0;
-       }
-       return 1;
-}
-
-static unsigned char asn1_length_decode(struct asn1_ctx *ctx,
-                                       unsigned int *def,
-                                       unsigned int *len)
-{
-       unsigned char ch, cnt;
-
-       if (!asn1_octet_decode(ctx, &ch))
-               return 0;
-
-       if (ch == 0x80)
-               *def = 0;
-       else {
-               *def = 1;
-
-               if (ch < 0x80)
-                       *len = ch;
-               else {
-                       cnt = (unsigned char) (ch & 0x7F);
-                       *len = 0;
-
-                       while (cnt > 0) {
-                               if (!asn1_octet_decode(ctx, &ch))
-                                       return 0;
-                               *len <<= 8;
-                               *len |= ch;
-                               cnt--;
-                       }
-               }
-       }
-       return 1;
-}
-
-static unsigned char asn1_header_decode(struct asn1_ctx *ctx,
-                                       unsigned char **eoc,
-                                       unsigned int *cls,
-                                       unsigned int *con,
-                                       unsigned int *tag)
-{
-       unsigned int def, len;
-
-       if (!asn1_id_decode(ctx, cls, con, tag))
-               return 0;
-
-       def = len = 0;
-       if (!asn1_length_decode(ctx, &def, &len))
-               return 0;
-
-       if (def)
-               *eoc = ctx->pointer + len;
-       else
-               *eoc = NULL;
-       return 1;
-}
-
-static unsigned char asn1_eoc_decode(struct asn1_ctx *ctx, unsigned char *eoc)
-{
-       unsigned char ch;
-
-       if (eoc == 0) {
-               if (!asn1_octet_decode(ctx, &ch))
-                       return 0;
-
-               if (ch != 0x00) {
-                       ctx->error = ASN1_ERR_DEC_EOC_MISMATCH;
-                       return 0;
-               }
-
-               if (!asn1_octet_decode(ctx, &ch))
-                       return 0;
-
-               if (ch != 0x00) {
-                       ctx->error = ASN1_ERR_DEC_EOC_MISMATCH;
-                       return 0;
-               }
-               return 1;
-       } else {
-               if (ctx->pointer != eoc) {
-                       ctx->error = ASN1_ERR_DEC_LENGTH_MISMATCH;
-                       return 0;
-               }
-               return 1;
-       }
-}
-
-static unsigned char asn1_null_decode(struct asn1_ctx *ctx, unsigned char *eoc)
-{
-       ctx->pointer = eoc;
-       return 1;
-}
-
-static unsigned char asn1_long_decode(struct asn1_ctx *ctx,
-                                     unsigned char *eoc,
-                                     long *integer)
-{
-       unsigned char ch;
-       unsigned int  len;
-
-       if (!asn1_octet_decode(ctx, &ch))
-               return 0;
-
-       *integer = (signed char) ch;
-       len = 1;
-
-       while (ctx->pointer < eoc) {
-               if (++len > sizeof (long)) {
-                       ctx->error = ASN1_ERR_DEC_BADVALUE;
-                       return 0;
-               }
-
-               if (!asn1_octet_decode(ctx, &ch))
-                       return 0;
-
-               *integer <<= 8;
-               *integer |= ch;
-       }
-       return 1;
-}
-
-static unsigned char asn1_uint_decode(struct asn1_ctx *ctx,
-                                     unsigned char *eoc,
-                                     unsigned int *integer)
-{
-       unsigned char ch;
-       unsigned int  len;
-
-       if (!asn1_octet_decode(ctx, &ch))
-               return 0;
-
-       *integer = ch;
-       if (ch == 0) len = 0;
-       else len = 1;
-
-       while (ctx->pointer < eoc) {
-               if (++len > sizeof (unsigned int)) {
-                       ctx->error = ASN1_ERR_DEC_BADVALUE;
-                       return 0;
-               }
-
-               if (!asn1_octet_decode(ctx, &ch))
-                       return 0;
-
-               *integer <<= 8;
-               *integer |= ch;
-       }
-       return 1;
-}
-
-static unsigned char asn1_ulong_decode(struct asn1_ctx *ctx,
-                                      unsigned char *eoc,
-                                      unsigned long *integer)
-{
-       unsigned char ch;
-       unsigned int  len;
-
-       if (!asn1_octet_decode(ctx, &ch))
-               return 0;
-
-       *integer = ch;
-       if (ch == 0) len = 0;
-       else len = 1;
-
-       while (ctx->pointer < eoc) {
-               if (++len > sizeof (unsigned long)) {
-                       ctx->error = ASN1_ERR_DEC_BADVALUE;
-                       return 0;
-               }
-
-               if (!asn1_octet_decode(ctx, &ch))
-                       return 0;
-
-               *integer <<= 8;
-               *integer |= ch;
-       }
-       return 1;
-}
-
-static unsigned char asn1_octets_decode(struct asn1_ctx *ctx,
-                                       unsigned char *eoc,
-                                       unsigned char **octets,
-                                       unsigned int *len)
-{
-       unsigned char *ptr;
-
-       *len = 0;
-
-       *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
-       if (*octets == NULL) {
-               if (net_ratelimit())
-                       printk("OOM in bsalg (%d)\n", __LINE__);
-               return 0;
-       }
-
-       ptr = *octets;
-       while (ctx->pointer < eoc) {
-               if (!asn1_octet_decode(ctx, (unsigned char *)ptr++)) {
-                       kfree(*octets);
-                       *octets = NULL;
-                       return 0;
-               }
-               (*len)++;
-       }
-       return 1;
-}
-
-static unsigned char asn1_subid_decode(struct asn1_ctx *ctx,
-                                      unsigned long *subid)
-{
-       unsigned char ch;
-
-       *subid = 0;
-
-       do {
-               if (!asn1_octet_decode(ctx, &ch))
-                       return 0;
-
-               *subid <<= 7;
-               *subid |= ch & 0x7F;
-       } while ((ch & 0x80) == 0x80);
-       return 1;
-}
-
-static unsigned char asn1_oid_decode(struct asn1_ctx *ctx,
-                                    unsigned char *eoc,
-                                    unsigned long **oid,
-                                    unsigned int *len)
-{
-       unsigned long subid;
-       unsigned int  size;
-       unsigned long *optr;
-
-       size = eoc - ctx->pointer + 1;
-       *oid = kmalloc(size * sizeof(unsigned long), GFP_ATOMIC);
-       if (*oid == NULL) {
-               if (net_ratelimit())
-                       printk("OOM in bsalg (%d)\n", __LINE__);
-               return 0;
-       }
-
-       optr = *oid;
-
-       if (!asn1_subid_decode(ctx, &subid)) {
-               kfree(*oid);
-               *oid = NULL;
-               return 0;
-       }
-
-       if (subid < 40) {
-               optr [0] = 0;
-               optr [1] = subid;
-       } else if (subid < 80) {
-               optr [0] = 1;
-               optr [1] = subid - 40;
-       } else {
-               optr [0] = 2;
-               optr [1] = subid - 80;
-       }
-
-       *len = 2;
-       optr += 2;
-
-       while (ctx->pointer < eoc) {
-               if (++(*len) > size) {
-                       ctx->error = ASN1_ERR_DEC_BADVALUE;
-                       kfree(*oid);
-                       *oid = NULL;
-                       return 0;
-               }
-
-               if (!asn1_subid_decode(ctx, optr++)) {
-                       kfree(*oid);
-                       *oid = NULL;
-                       return 0;
-               }
-       }
-       return 1;
-}
-
-/*****************************************************************************
- *
- * SNMP decoding routines (gxsnmp author Dirk Wisse)
- *
- *****************************************************************************/
-
-/* SNMP Versions */
-#define SNMP_V1                                0
-#define SNMP_V2C                       1
-#define SNMP_V2                                2
-#define SNMP_V3                                3
-
-/* Default Sizes */
-#define SNMP_SIZE_COMM                 256
-#define SNMP_SIZE_OBJECTID             128
-#define SNMP_SIZE_BUFCHR               256
-#define SNMP_SIZE_BUFINT               128
-#define SNMP_SIZE_SMALLOBJECTID                16
-
-/* Requests */
-#define SNMP_PDU_GET                   0
-#define SNMP_PDU_NEXT                  1
-#define SNMP_PDU_RESPONSE              2
-#define SNMP_PDU_SET                   3
-#define SNMP_PDU_TRAP1                 4
-#define SNMP_PDU_BULK                  5
-#define SNMP_PDU_INFORM                        6
-#define SNMP_PDU_TRAP2                 7
-
-/* Errors */
-#define SNMP_NOERROR                   0
-#define SNMP_TOOBIG                    1
-#define SNMP_NOSUCHNAME                        2
-#define SNMP_BADVALUE                  3
-#define SNMP_READONLY                  4
-#define SNMP_GENERROR                  5
-#define SNMP_NOACCESS                  6
-#define SNMP_WRONGTYPE                 7
-#define SNMP_WRONGLENGTH               8
-#define SNMP_WRONGENCODING             9
-#define SNMP_WRONGVALUE                        10
-#define SNMP_NOCREATION                        11
-#define SNMP_INCONSISTENTVALUE         12
-#define SNMP_RESOURCEUNAVAILABLE       13
-#define SNMP_COMMITFAILED              14
-#define SNMP_UNDOFAILED                        15
-#define SNMP_AUTHORIZATIONERROR                16
-#define SNMP_NOTWRITABLE               17
-#define SNMP_INCONSISTENTNAME          18
-
-/* General SNMP V1 Traps */
-#define SNMP_TRAP_COLDSTART            0
-#define SNMP_TRAP_WARMSTART            1
-#define SNMP_TRAP_LINKDOWN             2
-#define SNMP_TRAP_LINKUP               3
-#define SNMP_TRAP_AUTFAILURE           4
-#define SNMP_TRAP_EQPNEIGHBORLOSS      5
-#define SNMP_TRAP_ENTSPECIFIC          6
-
-/* SNMPv1 Types */
-#define SNMP_NULL                0
-#define SNMP_INTEGER             1    /* l  */
-#define SNMP_OCTETSTR            2    /* c  */
-#define SNMP_DISPLAYSTR          2    /* c  */
-#define SNMP_OBJECTID            3    /* ul */
-#define SNMP_IPADDR              4    /* uc */
-#define SNMP_COUNTER             5    /* ul */
-#define SNMP_GAUGE               6    /* ul */
-#define SNMP_TIMETICKS           7    /* ul */
-#define SNMP_OPAQUE              8    /* c  */
-
-/* Additional SNMPv2 Types */
-#define SNMP_UINTEGER            5    /* ul */
-#define SNMP_BITSTR              9    /* uc */
-#define SNMP_NSAP               10    /* uc */
-#define SNMP_COUNTER64          11    /* ul */
-#define SNMP_NOSUCHOBJECT       12
-#define SNMP_NOSUCHINSTANCE     13
-#define SNMP_ENDOFMIBVIEW       14
-
-union snmp_syntax
-{
-       unsigned char uc[0];    /* 8 bit unsigned */
-       char c[0];              /* 8 bit signed */
-       unsigned long ul[0];    /* 32 bit unsigned */
-       long l[0];              /* 32 bit signed */
-};
-
-struct snmp_object
-{
-       unsigned long *id;
-       unsigned int id_len;
-       unsigned short type;
-       unsigned int syntax_len;
-       union snmp_syntax syntax;
-};
-
-struct snmp_request
-{
-       unsigned long id;
-       unsigned int error_status;
-       unsigned int error_index;
-};
-
-struct snmp_v1_trap
-{
-       unsigned long *id;
-       unsigned int id_len;
-       unsigned long ip_address;       /* pointer  */
-       unsigned int general;
-       unsigned int specific;
-       unsigned long time;
-};
-
-/* SNMP types */
-#define SNMP_IPA    0
-#define SNMP_CNT    1
-#define SNMP_GGE    2
-#define SNMP_TIT    3
-#define SNMP_OPQ    4
-#define SNMP_C64    6
-
-/* SNMP errors */
-#define SERR_NSO    0
-#define SERR_NSI    1
-#define SERR_EOM    2
-
-static inline void mangle_address(unsigned char *begin,
-                                 unsigned char *addr,
-                                 const struct oct1_map *map,
-                                 __sum16 *check);
-struct snmp_cnv
-{
-       unsigned int class;
-       unsigned int tag;
-       int syntax;
-};
-
-static struct snmp_cnv snmp_conv [] =
-{
-       {ASN1_UNI, ASN1_NUL, SNMP_NULL},
-       {ASN1_UNI, ASN1_INT, SNMP_INTEGER},
-       {ASN1_UNI, ASN1_OTS, SNMP_OCTETSTR},
-       {ASN1_UNI, ASN1_OTS, SNMP_DISPLAYSTR},
-       {ASN1_UNI, ASN1_OJI, SNMP_OBJECTID},
-       {ASN1_APL, SNMP_IPA, SNMP_IPADDR},
-       {ASN1_APL, SNMP_CNT, SNMP_COUNTER},     /* Counter32 */
-       {ASN1_APL, SNMP_GGE, SNMP_GAUGE},       /* Gauge32 == Unsigned32  */
-       {ASN1_APL, SNMP_TIT, SNMP_TIMETICKS},
-       {ASN1_APL, SNMP_OPQ, SNMP_OPAQUE},
-
-       /* SNMPv2 data types and errors */
-       {ASN1_UNI, ASN1_BTS, SNMP_BITSTR},
-       {ASN1_APL, SNMP_C64, SNMP_COUNTER64},
-       {ASN1_CTX, SERR_NSO, SNMP_NOSUCHOBJECT},
-       {ASN1_CTX, SERR_NSI, SNMP_NOSUCHINSTANCE},
-       {ASN1_CTX, SERR_EOM, SNMP_ENDOFMIBVIEW},
-       {0,       0,       -1}
-};
-
-static unsigned char snmp_tag_cls2syntax(unsigned int tag,
-                                        unsigned int cls,
-                                        unsigned short *syntax)
-{
-       struct snmp_cnv *cnv;
-
-       cnv = snmp_conv;
-
-       while (cnv->syntax != -1) {
-               if (cnv->tag == tag && cnv->class == cls) {
-                       *syntax = cnv->syntax;
-                       return 1;
-               }
-               cnv++;
-       }
-       return 0;
-}
-
-static unsigned char snmp_object_decode(struct asn1_ctx *ctx,
-                                       struct snmp_object **obj)
-{
-       unsigned int cls, con, tag, len, idlen;
-       unsigned short type;
-       unsigned char *eoc, *end, *p;
-       unsigned long *lp, *id;
-       unsigned long ul;
-       long l;
-
-       *obj = NULL;
-       id = NULL;
-
-       if (!asn1_header_decode(ctx, &eoc, &cls, &con, &tag))
-               return 0;
-
-       if (cls != ASN1_UNI || con != ASN1_CON || tag != ASN1_SEQ)
-               return 0;
-
-       if (!asn1_header_decode(ctx, &end, &cls, &con, &tag))
-               return 0;
-
-       if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_OJI)
-               return 0;
-
-       if (!asn1_oid_decode(ctx, end, &id, &idlen))
-               return 0;
-
-       if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) {
-               kfree(id);
-               return 0;
-       }
-
-       if (con != ASN1_PRI) {
-               kfree(id);
-               return 0;
-       }
-
-       type = 0;
-       if (!snmp_tag_cls2syntax(tag, cls, &type)) {
-               kfree(id);
-               return 0;
-       }
-
-       l = 0;
-       switch (type) {
-               case SNMP_INTEGER:
-                       len = sizeof(long);
-                       if (!asn1_long_decode(ctx, end, &l)) {
-                               kfree(id);
-                               return 0;
-                       }
-                       *obj = kmalloc(sizeof(struct snmp_object) + len,
-                                      GFP_ATOMIC);
-                       if (*obj == NULL) {
-                               kfree(id);
-                               if (net_ratelimit())
-                                       printk("OOM in bsalg (%d)\n", __LINE__);
-                               return 0;
-                       }
-                       (*obj)->syntax.l[0] = l;
-                       break;
-               case SNMP_OCTETSTR:
-               case SNMP_OPAQUE:
-                       if (!asn1_octets_decode(ctx, end, &p, &len)) {
-                               kfree(id);
-                               return 0;
-                       }
-                       *obj = kmalloc(sizeof(struct snmp_object) + len,
-                                      GFP_ATOMIC);
-                       if (*obj == NULL) {
-                               kfree(id);
-                               if (net_ratelimit())
-                                       printk("OOM in bsalg (%d)\n", __LINE__);
-                               return 0;
-                       }
-                       memcpy((*obj)->syntax.c, p, len);
-                       kfree(p);
-                       break;
-               case SNMP_NULL:
-               case SNMP_NOSUCHOBJECT:
-               case SNMP_NOSUCHINSTANCE:
-               case SNMP_ENDOFMIBVIEW:
-                       len = 0;
-                       *obj = kmalloc(sizeof(struct snmp_object), GFP_ATOMIC);
-                       if (*obj == NULL) {
-                               kfree(id);
-                               if (net_ratelimit())
-                                       printk("OOM in bsalg (%d)\n", __LINE__);
-                               return 0;
-                       }
-                       if (!asn1_null_decode(ctx, end)) {
-                               kfree(id);
-                               kfree(*obj);
-                               *obj = NULL;
-                               return 0;
-                       }
-                       break;
-               case SNMP_OBJECTID:
-                       if (!asn1_oid_decode(ctx, end, (unsigned long **)&lp, &len)) {
-                               kfree(id);
-                               return 0;
-                       }
-                       len *= sizeof(unsigned long);
-                       *obj = kmalloc(sizeof(struct snmp_object) + len, GFP_ATOMIC);
-                       if (*obj == NULL) {
-                               kfree(lp);
-                               kfree(id);
-                               if (net_ratelimit())
-                                       printk("OOM in bsalg (%d)\n", __LINE__);
-                               return 0;
-                       }
-                       memcpy((*obj)->syntax.ul, lp, len);
-                       kfree(lp);
-                       break;
-               case SNMP_IPADDR:
-                       if (!asn1_octets_decode(ctx, end, &p, &len)) {
-                               kfree(id);
-                               return 0;
-                       }
-                       if (len != 4) {
-                               kfree(p);
-                               kfree(id);
-                               return 0;
-                       }
-                       *obj = kmalloc(sizeof(struct snmp_object) + len, GFP_ATOMIC);
-                       if (*obj == NULL) {
-                               kfree(p);
-                               kfree(id);
-                               if (net_ratelimit())
-                                       printk("OOM in bsalg (%d)\n", __LINE__);
-                               return 0;
-                       }
-                       memcpy((*obj)->syntax.uc, p, len);
-                       kfree(p);
-                       break;
-               case SNMP_COUNTER:
-               case SNMP_GAUGE:
-               case SNMP_TIMETICKS:
-                       len = sizeof(unsigned long);
-                       if (!asn1_ulong_decode(ctx, end, &ul)) {
-                               kfree(id);
-                               return 0;
-                       }
-                       *obj = kmalloc(sizeof(struct snmp_object) + len, GFP_ATOMIC);
-                       if (*obj == NULL) {
-                               kfree(id);
-                               if (net_ratelimit())
-                                       printk("OOM in bsalg (%d)\n", __LINE__);
-                               return 0;
-                       }
-                       (*obj)->syntax.ul[0] = ul;
-                       break;
-               default:
-                       kfree(id);
-                       return 0;
-       }
-
-       (*obj)->syntax_len = len;
-       (*obj)->type = type;
-       (*obj)->id = id;
-       (*obj)->id_len = idlen;
-
-       if (!asn1_eoc_decode(ctx, eoc)) {
-               kfree(id);
-               kfree(*obj);
-               *obj = NULL;
-               return 0;
-       }
-       return 1;
-}
-
-static unsigned char snmp_request_decode(struct asn1_ctx *ctx,
-                                        struct snmp_request *request)
-{
-       unsigned int cls, con, tag;
-       unsigned char *end;
-
-       if (!asn1_header_decode(ctx, &end, &cls, &con, &tag))
-               return 0;
-
-       if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_INT)
-               return 0;
-
-       if (!asn1_ulong_decode(ctx, end, &request->id))
-               return 0;
-
-       if (!asn1_header_decode(ctx, &end, &cls, &con, &tag))
-               return 0;
-
-       if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_INT)
-               return 0;
-
-       if (!asn1_uint_decode(ctx, end, &request->error_status))
-               return 0;
-
-       if (!asn1_header_decode(ctx, &end, &cls, &con, &tag))
-               return 0;
-
-       if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_INT)
-               return 0;
-
-       if (!asn1_uint_decode(ctx, end, &request->error_index))
-               return 0;
-
-       return 1;
-}
-
-/*
- * Fast checksum update for possibly oddly-aligned UDP byte, from the
- * code example in the draft.
- */
-static void fast_csum(__sum16 *csum,
-                     const unsigned char *optr,
-                     const unsigned char *nptr,
-                     int offset)
-{
-       unsigned char s[4];
-
-       if (offset & 1) {
-               s[0] = s[2] = 0;
-               s[1] = ~*optr;
-               s[3] = *nptr;
-       } else {
-               s[1] = s[3] = 0;
-               s[0] = ~*optr;
-               s[2] = *nptr;
-       }
-
-       *csum = csum_fold(csum_partial(s, 4, ~csum_unfold(*csum)));
-}
-
-/*
- * Mangle IP address.
- *     - begin points to the start of the snmp messgae
- *      - addr points to the start of the address
- */
-static inline void mangle_address(unsigned char *begin,
-                                 unsigned char *addr,
-                                 const struct oct1_map *map,
-                                 __sum16 *check)
-{
-       if (map->from == NOCT1(addr)) {
-               u_int32_t old;
-
-               if (debug)
-                       memcpy(&old, (unsigned char *)addr, sizeof(old));
-
-               *addr = map->to;
-
-               /* Update UDP checksum if being used */
-               if (*check) {
-                       fast_csum(check,
-                                 &map->from, &map->to, addr - begin);
-               }
-
-               if (debug)
-                       printk(KERN_DEBUG "bsalg: mapped %u.%u.%u.%u to "
-                              "%u.%u.%u.%u\n", NIPQUAD(old), NIPQUAD(*addr));
-       }
-}
-
-static unsigned char snmp_trap_decode(struct asn1_ctx *ctx,
-                                     struct snmp_v1_trap *trap,
-                                     const struct oct1_map *map,
-                                     __sum16 *check)
-{
-       unsigned int cls, con, tag, len;
-       unsigned char *end;
-
-       if (!asn1_header_decode(ctx, &end, &cls, &con, &tag))
-               return 0;
-
-       if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_OJI)
-               return 0;
-
-       if (!asn1_oid_decode(ctx, end, &trap->id, &trap->id_len))
-               return 0;
-
-       if (!asn1_header_decode(ctx, &end, &cls, &con, &tag))
-               goto err_id_free;
-
-       if (!((cls == ASN1_APL && con == ASN1_PRI && tag == SNMP_IPA) ||
-             (cls == ASN1_UNI && con == ASN1_PRI && tag == ASN1_OTS)))
-               goto err_id_free;
-
-       if (!asn1_octets_decode(ctx, end, (unsigned char **)&trap->ip_address, &len))
-               goto err_id_free;
-
-       /* IPv4 only */
-       if (len != 4)
-               goto err_addr_free;
-
-       mangle_address(ctx->begin, ctx->pointer - 4, map, check);
-
-       if (!asn1_header_decode(ctx, &end, &cls, &con, &tag))
-               goto err_addr_free;
-
-       if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_INT)
-               goto err_addr_free;
-
-       if (!asn1_uint_decode(ctx, end, &trap->general))
-               goto err_addr_free;
-
-       if (!asn1_header_decode(ctx, &end, &cls, &con, &tag))
-               goto err_addr_free;
-
-       if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_INT)
-               goto err_addr_free;
-
-       if (!asn1_uint_decode(ctx, end, &trap->specific))
-               goto err_addr_free;
-
-       if (!asn1_header_decode(ctx, &end, &cls, &con, &tag))
-               goto err_addr_free;
-
-       if (!((cls == ASN1_APL && con == ASN1_PRI && tag == SNMP_TIT) ||
-             (cls == ASN1_UNI && con == ASN1_PRI && tag == ASN1_INT)))
-               goto err_addr_free;
-
-       if (!asn1_ulong_decode(ctx, end, &trap->time))
-               goto err_addr_free;
-
-       return 1;
-
-err_addr_free:
-       kfree((unsigned long *)trap->ip_address);
-
-err_id_free:
-       kfree(trap->id);
-
-       return 0;
-}
-
-/*****************************************************************************
- *
- * Misc. routines
- *
- *****************************************************************************/
-
-static void hex_dump(unsigned char *buf, size_t len)
-{
-       size_t i;
-
-       for (i = 0; i < len; i++) {
-               if (i && !(i % 16))
-                       printk("\n");
-               printk("%02x ", *(buf + i));
-       }
-       printk("\n");
-}
-
-/*
- * Parse and mangle SNMP message according to mapping.
- * (And this is the fucking 'basic' method).
- */
-static int snmp_parse_mangle(unsigned char *msg,
-                            u_int16_t len,
-                            const struct oct1_map *map,
-                            __sum16 *check)
-{
-       unsigned char *eoc, *end;
-       unsigned int cls, con, tag, vers, pdutype;
-       struct asn1_ctx ctx;
-       struct asn1_octstr comm;
-       struct snmp_object **obj;
-
-       if (debug > 1)
-               hex_dump(msg, len);
-
-       asn1_open(&ctx, msg, len);
-
-       /*
-        * Start of SNMP message.
-        */
-       if (!asn1_header_decode(&ctx, &eoc, &cls, &con, &tag))
-               return 0;
-       if (cls != ASN1_UNI || con != ASN1_CON || tag != ASN1_SEQ)
-               return 0;
-
-       /*
-        * Version 1 or 2 handled.
-        */
-       if (!asn1_header_decode(&ctx, &end, &cls, &con, &tag))
-               return 0;
-       if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_INT)
-               return 0;
-       if (!asn1_uint_decode (&ctx, end, &vers))
-               return 0;
-       if (debug > 1)
-               printk(KERN_DEBUG "bsalg: snmp version: %u\n", vers + 1);
-       if (vers > 1)
-               return 1;
-
-       /*
-        * Community.
-        */
-       if (!asn1_header_decode (&ctx, &end, &cls, &con, &tag))
-               return 0;
-       if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_OTS)
-               return 0;
-       if (!asn1_octets_decode(&ctx, end, &comm.data, &comm.len))
-               return 0;
-       if (debug > 1) {
-               unsigned int i;
-
-               printk(KERN_DEBUG "bsalg: community: ");
-               for (i = 0; i < comm.len; i++)
-                       printk("%c", comm.data[i]);
-               printk("\n");
-       }
-       kfree(comm.data);
-
-       /*
-        * PDU type
-        */
-       if (!asn1_header_decode(&ctx, &eoc, &cls, &con, &pdutype))
-               return 0;
-       if (cls != ASN1_CTX || con != ASN1_CON)
-               return 0;
-       if (debug > 1) {
-               unsigned char *pdus[] = {
-                       [SNMP_PDU_GET] = "get",
-                       [SNMP_PDU_NEXT] = "get-next",
-                       [SNMP_PDU_RESPONSE] = "response",
-                       [SNMP_PDU_SET] = "set",
-                       [SNMP_PDU_TRAP1] = "trapv1",
-                       [SNMP_PDU_BULK] = "bulk",
-                       [SNMP_PDU_INFORM] = "inform",
-                       [SNMP_PDU_TRAP2] = "trapv2"
-               };
-
-               if (pdutype > SNMP_PDU_TRAP2)
-                       printk(KERN_DEBUG "bsalg: bad pdu type %u\n", pdutype);
-               else
-                       printk(KERN_DEBUG "bsalg: pdu: %s\n", pdus[pdutype]);
-       }
-       if (pdutype != SNMP_PDU_RESPONSE &&
-           pdutype != SNMP_PDU_TRAP1 && pdutype != SNMP_PDU_TRAP2)
-               return 1;
-
-       /*
-        * Request header or v1 trap
-        */
-       if (pdutype == SNMP_PDU_TRAP1) {
-               struct snmp_v1_trap trap;
-               unsigned char ret = snmp_trap_decode(&ctx, &trap, map, check);
-
-               if (ret) {
-                       kfree(trap.id);
-                       kfree((unsigned long *)trap.ip_address);
-               } else
-                       return ret;
-
-       } else {
-               struct snmp_request req;
-
-               if (!snmp_request_decode(&ctx, &req))
-                       return 0;
-
-               if (debug > 1)
-                       printk(KERN_DEBUG "bsalg: request: id=0x%lx error_status=%u "
-                       "error_index=%u\n", req.id, req.error_status,
-                       req.error_index);
-       }
-
-       /*
-        * Loop through objects, look for IP addresses to mangle.
-        */
-       if (!asn1_header_decode(&ctx, &eoc, &cls, &con, &tag))
-               return 0;
-
-       if (cls != ASN1_UNI || con != ASN1_CON || tag != ASN1_SEQ)
-               return 0;
-
-       obj = kmalloc(sizeof(struct snmp_object), GFP_ATOMIC);
-       if (obj == NULL) {
-               if (net_ratelimit())
-                       printk(KERN_WARNING "OOM in bsalg(%d)\n", __LINE__);
-               return 0;
-       }
-
-       while (!asn1_eoc_decode(&ctx, eoc)) {
-               unsigned int i;
-
-               if (!snmp_object_decode(&ctx, obj)) {
-                       if (*obj) {
-                               kfree((*obj)->id);
-                               kfree(*obj);
-                       }
-                       kfree(obj);
-                       return 0;
-               }
-
-               if (debug > 1) {
-                       printk(KERN_DEBUG "bsalg: object: ");
-                       for (i = 0; i < (*obj)->id_len; i++) {
-                               if (i > 0)
-                                       printk(".");
-                               printk("%lu", (*obj)->id[i]);
-                       }
-                       printk(": type=%u\n", (*obj)->type);
-
-               }
-
-               if ((*obj)->type == SNMP_IPADDR)
-                       mangle_address(ctx.begin, ctx.pointer - 4 , map, check);
-
-               kfree((*obj)->id);
-               kfree(*obj);
-       }
-       kfree(obj);
-
-       if (!asn1_eoc_decode(&ctx, eoc))
-               return 0;
-
-       return 1;
-}
-
-/*****************************************************************************
- *
- * NAT routines.
- *
- *****************************************************************************/
-
-/*
- * SNMP translation routine.
- */
-static int snmp_translate(struct ip_conntrack *ct,
-                         enum ip_conntrack_info ctinfo,
-                         struct sk_buff **pskb)
-{
-       struct iphdr *iph = (*pskb)->nh.iph;
-       struct udphdr *udph = (struct udphdr *)((__be32 *)iph + iph->ihl);
-       u_int16_t udplen = ntohs(udph->len);
-       u_int16_t paylen = udplen - sizeof(struct udphdr);
-       int dir = CTINFO2DIR(ctinfo);
-       struct oct1_map map;
-
-       /*
-        * Determine mappping for application layer addresses based
-        * on NAT manipulations for the packet.
-        */
-       if (dir == IP_CT_DIR_ORIGINAL) {
-               /* SNAT traps */
-               map.from = NOCT1(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.ip);
-               map.to = NOCT1(&ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.ip);
-       } else {
-               /* DNAT replies */
-               map.from = NOCT1(&ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.ip);
-               map.to = NOCT1(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.ip);
-       }
-
-       if (map.from == map.to)
-               return NF_ACCEPT;
-
-       if (!snmp_parse_mangle((unsigned char *)udph + sizeof(struct udphdr),
-                              paylen, &map, &udph->check)) {
-               if (net_ratelimit())
-                       printk(KERN_WARNING "bsalg: parser failed\n");
-               return NF_DROP;
-       }
-       return NF_ACCEPT;
-}
-
-/* We don't actually set up expectations, just adjust internal IP
- * addresses if this is being NATted */
-static int help(struct sk_buff **pskb,
-               struct ip_conntrack *ct,
-               enum ip_conntrack_info ctinfo)
-{
-       int dir = CTINFO2DIR(ctinfo);
-       unsigned int ret;
-       struct iphdr *iph = (*pskb)->nh.iph;
-       struct udphdr *udph = (struct udphdr *)((u_int32_t *)iph + iph->ihl);
-
-       /* SNMP replies and originating SNMP traps get mangled */
-       if (udph->source == htons(SNMP_PORT) && dir != IP_CT_DIR_REPLY)
-               return NF_ACCEPT;
-       if (udph->dest == htons(SNMP_TRAP_PORT) && dir != IP_CT_DIR_ORIGINAL)
-               return NF_ACCEPT;
-
-       /* No NAT? */
-       if (!(ct->status & IPS_NAT_MASK))
-               return NF_ACCEPT;
-
-       /*
-        * Make sure the packet length is ok.  So far, we were only guaranteed
-        * to have a valid length IP header plus 8 bytes, which means we have
-        * enough room for a UDP header.  Just verify the UDP length field so we
-        * can mess around with the payload.
-        */
-       if (ntohs(udph->len) != (*pskb)->len - (iph->ihl << 2)) {
-                if (net_ratelimit())
-                        printk(KERN_WARNING "SNMP: dropping malformed packet "
-                               "src=%u.%u.%u.%u dst=%u.%u.%u.%u\n",
-                               NIPQUAD(iph->saddr), NIPQUAD(iph->daddr));
-                return NF_DROP;
-       }
-
-       if (!skb_make_writable(pskb, (*pskb)->len))
-               return NF_DROP;
-
-       spin_lock_bh(&snmp_lock);
-       ret = snmp_translate(ct, ctinfo, pskb);
-       spin_unlock_bh(&snmp_lock);
-       return ret;
-}
-
-static struct ip_conntrack_helper snmp_helper = {
-       .max_expected = 0,
-       .timeout = 180,
-       .me = THIS_MODULE,
-       .help = help,
-       .name = "snmp",
-
-       .tuple = {.src = {.u = {.udp = {.port = __constant_htons(SNMP_PORT)}}},
-                 .dst = {.protonum = IPPROTO_UDP},
-       },
-       .mask = {.src = {.u = {0xFFFF}},
-                .dst = {.protonum = 0xFF},
-       },
-};
-
-static struct ip_conntrack_helper snmp_trap_helper = {
-       .max_expected = 0,
-       .timeout = 180,
-       .me = THIS_MODULE,
-       .help = help,
-       .name = "snmp_trap",
-
-       .tuple = {.src = {.u = {.udp = {.port = __constant_htons(SNMP_TRAP_PORT)}}},
-                 .dst = {.protonum = IPPROTO_UDP},
-       },
-       .mask = {.src = {.u = {0xFFFF}},
-                .dst = {.protonum = 0xFF},
-       },
-};
-
-/*****************************************************************************
- *
- * Module stuff.
- *
- *****************************************************************************/
-
-static int __init ip_nat_snmp_basic_init(void)
-{
-       int ret = 0;
-
-       ret = ip_conntrack_helper_register(&snmp_helper);
-       if (ret < 0)
-               return ret;
-       ret = ip_conntrack_helper_register(&snmp_trap_helper);
-       if (ret < 0) {
-               ip_conntrack_helper_unregister(&snmp_helper);
-               return ret;
-       }
-       return ret;
-}
-
-static void __exit ip_nat_snmp_basic_fini(void)
-{
-       ip_conntrack_helper_unregister(&snmp_helper);
-       ip_conntrack_helper_unregister(&snmp_trap_helper);
-}
-
-module_init(ip_nat_snmp_basic_init);
-module_exit(ip_nat_snmp_basic_fini);
-
-module_param(debug, int, 0600);
diff --git a/net/ipv4/netfilter/ip_nat_standalone.c b/net/ipv4/netfilter/ip_nat_standalone.c
deleted file mode 100644 (file)
index 6bcfdf6..0000000
+++ /dev/null
@@ -1,388 +0,0 @@
-/* This file contains all the functions required for the standalone
-   ip_nat module.
-
-   These are not required by the compatibility layer.
-*/
-
-/* (C) 1999-2001 Paul `Rusty' Russell
- * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-/*
- * 23 Apr 2001: Harald Welte <laforge@gnumonks.org>
- *     - new API and handling of conntrack/nat helpers
- *     - now capable of multiple expectations for one master
- * */
-
-#include <linux/types.h>
-#include <linux/icmp.h>
-#include <linux/ip.h>
-#include <linux/netfilter.h>
-#include <linux/netfilter_ipv4.h>
-#include <linux/module.h>
-#include <linux/skbuff.h>
-#include <linux/proc_fs.h>
-#include <net/ip.h>
-#include <net/checksum.h>
-#include <linux/spinlock.h>
-
-#include <linux/netfilter_ipv4/ip_nat.h>
-#include <linux/netfilter_ipv4/ip_nat_rule.h>
-#include <linux/netfilter_ipv4/ip_nat_protocol.h>
-#include <linux/netfilter_ipv4/ip_nat_core.h>
-#include <linux/netfilter_ipv4/ip_nat_helper.h>
-#include <linux/netfilter_ipv4/ip_tables.h>
-#include <linux/netfilter_ipv4/ip_conntrack_core.h>
-
-#if 0
-#define DEBUGP printk
-#else
-#define DEBUGP(format, args...)
-#endif
-
-#ifdef CONFIG_XFRM
-static void nat_decode_session(struct sk_buff *skb, struct flowi *fl)
-{
-       struct ip_conntrack *ct;
-       struct ip_conntrack_tuple *t;
-       enum ip_conntrack_info ctinfo;
-       enum ip_conntrack_dir dir;
-       unsigned long statusbit;
-
-       ct = ip_conntrack_get(skb, &ctinfo);
-       if (ct == NULL)
-               return;
-       dir = CTINFO2DIR(ctinfo);
-       t = &ct->tuplehash[dir].tuple;
-
-       if (dir == IP_CT_DIR_ORIGINAL)
-               statusbit = IPS_DST_NAT;
-       else
-               statusbit = IPS_SRC_NAT;
-
-       if (ct->status & statusbit) {
-               fl->fl4_dst = t->dst.ip;
-               if (t->dst.protonum == IPPROTO_TCP ||
-                   t->dst.protonum == IPPROTO_UDP)
-                       fl->fl_ip_dport = t->dst.u.tcp.port;
-       }
-
-       statusbit ^= IPS_NAT_MASK;
-
-       if (ct->status & statusbit) {
-               fl->fl4_src = t->src.ip;
-               if (t->dst.protonum == IPPROTO_TCP ||
-                   t->dst.protonum == IPPROTO_UDP)
-                       fl->fl_ip_sport = t->src.u.tcp.port;
-       }
-}
-#endif
-
-static unsigned int
-ip_nat_fn(unsigned int hooknum,
-         struct sk_buff **pskb,
-         const struct net_device *in,
-         const struct net_device *out,
-         int (*okfn)(struct sk_buff *))
-{
-       struct ip_conntrack *ct;
-       enum ip_conntrack_info ctinfo;
-       struct ip_nat_info *info;
-       /* maniptype == SRC for postrouting. */
-       enum ip_nat_manip_type maniptype = HOOK2MANIP(hooknum);
-
-       /* We never see fragments: conntrack defrags on pre-routing
-          and local-out, and ip_nat_out protects post-routing. */
-       IP_NF_ASSERT(!((*pskb)->nh.iph->frag_off
-                      & htons(IP_MF|IP_OFFSET)));
-
-       ct = ip_conntrack_get(*pskb, &ctinfo);
-       /* Can't track?  It's not due to stress, or conntrack would
-          have dropped it.  Hence it's the user's responsibilty to
-          packet filter it out, or implement conntrack/NAT for that
-          protocol. 8) --RR */
-       if (!ct) {
-               /* Exception: ICMP redirect to new connection (not in
-                  hash table yet).  We must not let this through, in
-                  case we're doing NAT to the same network. */
-               if ((*pskb)->nh.iph->protocol == IPPROTO_ICMP) {
-                       struct icmphdr _hdr, *hp;
-
-                       hp = skb_header_pointer(*pskb,
-                                               (*pskb)->nh.iph->ihl*4,
-                                               sizeof(_hdr), &_hdr);
-                       if (hp != NULL &&
-                           hp->type == ICMP_REDIRECT)
-                               return NF_DROP;
-               }
-               return NF_ACCEPT;
-       }
-
-       /* Don't try to NAT if this packet is not conntracked */
-       if (ct == &ip_conntrack_untracked)
-               return NF_ACCEPT;
-
-       switch (ctinfo) {
-       case IP_CT_RELATED:
-       case IP_CT_RELATED+IP_CT_IS_REPLY:
-               if ((*pskb)->nh.iph->protocol == IPPROTO_ICMP) {
-                       if (!ip_nat_icmp_reply_translation(ct, ctinfo,
-                                                          hooknum, pskb))
-                               return NF_DROP;
-                       else
-                               return NF_ACCEPT;
-               }
-               /* Fall thru... (Only ICMPs can be IP_CT_IS_REPLY) */
-       case IP_CT_NEW:
-               info = &ct->nat.info;
-
-               /* Seen it before?  This can happen for loopback, retrans,
-                  or local packets.. */
-               if (!ip_nat_initialized(ct, maniptype)) {
-                       unsigned int ret;
-
-                       if (unlikely(is_confirmed(ct)))
-                               /* NAT module was loaded late */
-                               ret = alloc_null_binding_confirmed(ct, info,
-                                                                  hooknum);
-                       else if (hooknum == NF_IP_LOCAL_IN)
-                               /* LOCAL_IN hook doesn't have a chain!  */
-                               ret = alloc_null_binding(ct, info, hooknum);
-                       else
-                               ret = ip_nat_rule_find(pskb, hooknum,
-                                                      in, out, ct,
-                                                      info);
-
-                       if (ret != NF_ACCEPT) {
-                               return ret;
-                       }
-               } else
-                       DEBUGP("Already setup manip %s for ct %p\n",
-                              maniptype == IP_NAT_MANIP_SRC ? "SRC" : "DST",
-                              ct);
-               break;
-
-       default:
-               /* ESTABLISHED */
-               IP_NF_ASSERT(ctinfo == IP_CT_ESTABLISHED
-                            || ctinfo == (IP_CT_ESTABLISHED+IP_CT_IS_REPLY));
-               info = &ct->nat.info;
-       }
-
-       IP_NF_ASSERT(info);
-       return ip_nat_packet(ct, ctinfo, hooknum, pskb);
-}
-
-static unsigned int
-ip_nat_in(unsigned int hooknum,
-         struct sk_buff **pskb,
-         const struct net_device *in,
-         const struct net_device *out,
-         int (*okfn)(struct sk_buff *))
-{
-       unsigned int ret;
-       __be32 daddr = (*pskb)->nh.iph->daddr;
-
-       ret = ip_nat_fn(hooknum, pskb, in, out, okfn);
-       if (ret != NF_DROP && ret != NF_STOLEN
-           && daddr != (*pskb)->nh.iph->daddr) {
-               dst_release((*pskb)->dst);
-               (*pskb)->dst = NULL;
-       }
-       return ret;
-}
-
-static unsigned int
-ip_nat_out(unsigned int hooknum,
-          struct sk_buff **pskb,
-          const struct net_device *in,
-          const struct net_device *out,
-          int (*okfn)(struct sk_buff *))
-{
-#ifdef CONFIG_XFRM
-       struct ip_conntrack *ct;
-       enum ip_conntrack_info ctinfo;
-#endif
-       unsigned int ret;
-
-       /* root is playing with raw sockets. */
-       if ((*pskb)->len < sizeof(struct iphdr)
-           || (*pskb)->nh.iph->ihl * 4 < sizeof(struct iphdr))
-               return NF_ACCEPT;
-
-       ret = ip_nat_fn(hooknum, pskb, in, out, okfn);
-#ifdef CONFIG_XFRM
-       if (ret != NF_DROP && ret != NF_STOLEN
-           && (ct = ip_conntrack_get(*pskb, &ctinfo)) != NULL) {
-               enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
-
-               if (ct->tuplehash[dir].tuple.src.ip !=
-                   ct->tuplehash[!dir].tuple.dst.ip
-                   || ct->tuplehash[dir].tuple.src.u.all !=
-                      ct->tuplehash[!dir].tuple.dst.u.all
-                   )
-                       return ip_xfrm_me_harder(pskb) == 0 ? ret : NF_DROP;
-       }
-#endif
-       return ret;
-}
-
-static unsigned int
-ip_nat_local_fn(unsigned int hooknum,
-               struct sk_buff **pskb,
-               const struct net_device *in,
-               const struct net_device *out,
-               int (*okfn)(struct sk_buff *))
-{
-       struct ip_conntrack *ct;
-       enum ip_conntrack_info ctinfo;
-       unsigned int ret;
-
-       /* root is playing with raw sockets. */
-       if ((*pskb)->len < sizeof(struct iphdr)
-           || (*pskb)->nh.iph->ihl * 4 < sizeof(struct iphdr))
-               return NF_ACCEPT;
-
-       ret = ip_nat_fn(hooknum, pskb, in, out, okfn);
-       if (ret != NF_DROP && ret != NF_STOLEN
-           && (ct = ip_conntrack_get(*pskb, &ctinfo)) != NULL) {
-               enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
-
-               if (ct->tuplehash[dir].tuple.dst.ip !=
-                   ct->tuplehash[!dir].tuple.src.ip) {
-                       if (ip_route_me_harder(pskb, RTN_UNSPEC))
-                               ret = NF_DROP;
-               }
-#ifdef CONFIG_XFRM
-               else if (ct->tuplehash[dir].tuple.dst.u.all !=
-                        ct->tuplehash[!dir].tuple.src.u.all)
-                       if (ip_xfrm_me_harder(pskb))
-                               ret = NF_DROP;
-#endif
-
-       }
-       return ret;
-}
-
-static unsigned int
-ip_nat_adjust(unsigned int hooknum,
-             struct sk_buff **pskb,
-             const struct net_device *in,
-             const struct net_device *out,
-             int (*okfn)(struct sk_buff *))
-{
-       struct ip_conntrack *ct;
-       enum ip_conntrack_info ctinfo;
-
-       ct = ip_conntrack_get(*pskb, &ctinfo);
-       if (ct && test_bit(IPS_SEQ_ADJUST_BIT, &ct->status)) {
-               DEBUGP("ip_nat_standalone: adjusting sequence number\n");
-               if (!ip_nat_seq_adjust(pskb, ct, ctinfo))
-                       return NF_DROP;
-       }
-       return NF_ACCEPT;
-}
-
-/* We must be after connection tracking and before packet filtering. */
-
-static struct nf_hook_ops ip_nat_ops[] = {
-       /* Before packet filtering, change destination */
-       {
-               .hook           = ip_nat_in,
-               .owner          = THIS_MODULE,
-               .pf             = PF_INET,
-               .hooknum        = NF_IP_PRE_ROUTING,
-               .priority       = NF_IP_PRI_NAT_DST,
-       },
-       /* After packet filtering, change source */
-       {
-               .hook           = ip_nat_out,
-               .owner          = THIS_MODULE,
-               .pf             = PF_INET,
-               .hooknum        = NF_IP_POST_ROUTING,
-               .priority       = NF_IP_PRI_NAT_SRC,
-       },
-       /* After conntrack, adjust sequence number */
-       {
-               .hook           = ip_nat_adjust,
-               .owner          = THIS_MODULE,
-               .pf             = PF_INET,
-               .hooknum        = NF_IP_POST_ROUTING,
-               .priority       = NF_IP_PRI_NAT_SEQ_ADJUST,
-       },
-       /* Before packet filtering, change destination */
-       {
-               .hook           = ip_nat_local_fn,
-               .owner          = THIS_MODULE,
-               .pf             = PF_INET,
-               .hooknum        = NF_IP_LOCAL_OUT,
-               .priority       = NF_IP_PRI_NAT_DST,
-       },
-       /* After packet filtering, change source */
-       {
-               .hook           = ip_nat_fn,
-               .owner          = THIS_MODULE,
-               .pf             = PF_INET,
-               .hooknum        = NF_IP_LOCAL_IN,
-               .priority       = NF_IP_PRI_NAT_SRC,
-       },
-       /* After conntrack, adjust sequence number */
-       {
-               .hook           = ip_nat_adjust,
-               .owner          = THIS_MODULE,
-               .pf             = PF_INET,
-               .hooknum        = NF_IP_LOCAL_IN,
-               .priority       = NF_IP_PRI_NAT_SEQ_ADJUST,
-       },
-};
-
-static int __init ip_nat_standalone_init(void)
-{
-       int ret = 0;
-
-       need_conntrack();
-
-#ifdef CONFIG_XFRM
-       BUG_ON(ip_nat_decode_session != NULL);
-       ip_nat_decode_session = nat_decode_session;
-#endif
-       ret = ip_nat_rule_init();
-       if (ret < 0) {
-               printk("ip_nat_init: can't setup rules.\n");
-               goto cleanup_decode_session;
-       }
-       ret = nf_register_hooks(ip_nat_ops, ARRAY_SIZE(ip_nat_ops));
-       if (ret < 0) {
-               printk("ip_nat_init: can't register hooks.\n");
-               goto cleanup_rule_init;
-       }
-       return ret;
-
- cleanup_rule_init:
-       ip_nat_rule_cleanup();
- cleanup_decode_session:
-#ifdef CONFIG_XFRM
-       ip_nat_decode_session = NULL;
-       synchronize_net();
-#endif
-       return ret;
-}
-
-static void __exit ip_nat_standalone_fini(void)
-{
-       nf_unregister_hooks(ip_nat_ops, ARRAY_SIZE(ip_nat_ops));
-       ip_nat_rule_cleanup();
-#ifdef CONFIG_XFRM
-       ip_nat_decode_session = NULL;
-       synchronize_net();
-#endif
-}
-
-module_init(ip_nat_standalone_init);
-module_exit(ip_nat_standalone_fini);
-
-MODULE_LICENSE("GPL");
diff --git a/net/ipv4/netfilter/ip_nat_tftp.c b/net/ipv4/netfilter/ip_nat_tftp.c
deleted file mode 100644 (file)
index 6047935..0000000
+++ /dev/null
@@ -1,70 +0,0 @@
-/* (C) 2001-2002 Magnus Boden <mb@ozaba.mine.nu>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Version: 0.0.7
- *
- * Thu 21 Mar 2002 Harald Welte <laforge@gnumonks.org>
- *     - Port to newnat API
- *
- * This module currently supports DNAT:
- * iptables -t nat -A PREROUTING -d x.x.x.x -j DNAT --to-dest x.x.x.y
- *
- * and SNAT:
- * iptables -t nat -A POSTROUTING { -j MASQUERADE , -j SNAT --to-source x.x.x.x }
- *
- * It has not been tested with
- * -j SNAT --to-source x.x.x.x-x.x.x.y since I only have one external ip
- * If you do test this please let me know if it works or not.
- *
- */
-
-#include <linux/module.h>
-#include <linux/netfilter_ipv4.h>
-#include <linux/ip.h>
-#include <linux/udp.h>
-
-#include <linux/netfilter.h>
-#include <linux/netfilter_ipv4/ip_tables.h>
-#include <linux/netfilter_ipv4/ip_conntrack_helper.h>
-#include <linux/netfilter_ipv4/ip_conntrack_tftp.h>
-#include <linux/netfilter_ipv4/ip_nat_helper.h>
-#include <linux/netfilter_ipv4/ip_nat_rule.h>
-#include <linux/moduleparam.h>
-
-MODULE_AUTHOR("Magnus Boden <mb@ozaba.mine.nu>");
-MODULE_DESCRIPTION("tftp NAT helper");
-MODULE_LICENSE("GPL");
-
-static unsigned int help(struct sk_buff **pskb,
-                        enum ip_conntrack_info ctinfo,
-                        struct ip_conntrack_expect *exp)
-{
-       struct ip_conntrack *ct = exp->master;
-
-       exp->saved_proto.udp.port
-               = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.udp.port;
-       exp->dir = IP_CT_DIR_REPLY;
-       exp->expectfn = ip_nat_follow_master;
-       if (ip_conntrack_expect_related(exp) != 0)
-               return NF_DROP;
-       return NF_ACCEPT;
-}
-
-static void __exit ip_nat_tftp_fini(void)
-{
-       rcu_assign_pointer(ip_nat_tftp_hook, NULL);
-       synchronize_rcu();
-}
-
-static int __init ip_nat_tftp_init(void)
-{
-       BUG_ON(rcu_dereference(ip_nat_tftp_hook));
-       rcu_assign_pointer(ip_nat_tftp_hook, help);
-       return 0;
-}
-
-module_init(ip_nat_tftp_init);
-module_exit(ip_nat_tftp_fini);
index a14798a..702d94d 100644 (file)
@@ -8,18 +8,6 @@
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
- *
- * 2000-03-27: Simplified code (thanks to Andi Kleen for clues).
- * 2000-05-20: Fixed notifier problems (following Miguel Freitas' report).
- * 2000-06-19: Fixed so nfmark is copied to metadata (reported by Sebastian
- *             Zander).
- * 2000-08-01: Added Nick Williams' MAC support.
- * 2002-06-25: Code cleanup.
- * 2005-01-10: Added /proc counter for dropped packets; fixed so
- *             packets aren't delivered to user space if they're going
- *             to be dropped.
- * 2005-05-26: local_bh_{disable,enable} around nf_reinject (Harald Welte)
- *
  */
 #include <linux/module.h>
 #include <linux/skbuff.h>
@@ -191,12 +179,13 @@ ipq_flush(int verdict)
 static struct sk_buff *
 ipq_build_packet_message(struct ipq_queue_entry *entry, int *errp)
 {
-       unsigned char *old_tail;
+       sk_buff_data_t old_tail;
        size_t size = 0;
        size_t data_len = 0;
        struct sk_buff *skb;
        struct ipq_packet_msg *pmsg;
        struct nlmsghdr *nlh;
+       struct timeval tv;
 
        read_lock_bh(&queue_lock);
 
@@ -234,15 +223,16 @@ ipq_build_packet_message(struct ipq_queue_entry *entry, int *errp)
        if (!skb)
                goto nlmsg_failure;
 
-       old_tail= skb->tail;
+       old_tail = skb->tail;
        nlh = NLMSG_PUT(skb, 0, 0, IPQM_PACKET, size - sizeof(*nlh));
        pmsg = NLMSG_DATA(nlh);
        memset(pmsg, 0, sizeof(*pmsg));
 
        pmsg->packet_id       = (unsigned long )entry;
        pmsg->data_len        = data_len;
-       pmsg->timestamp_sec   = entry->skb->tstamp.off_sec;
-       pmsg->timestamp_usec  = entry->skb->tstamp.off_usec;
+       tv = ktime_to_timeval(entry->skb->tstamp);
+       pmsg->timestamp_sec   = tv.tv_sec;
+       pmsg->timestamp_usec  = tv.tv_usec;
        pmsg->mark            = entry->skb->mark;
        pmsg->hook            = entry->info->hook;
        pmsg->hw_protocol     = entry->skb->protocol;
@@ -378,7 +368,7 @@ ipq_mangle_ipv4(ipq_verdict_msg_t *v, struct ipq_queue_entry *e)
        }
        if (!skb_make_writable(&e->skb, v->data_len))
                return -ENOMEM;
-       memcpy(e->skb->data, v->payload, v->data_len);
+       skb_copy_to_linear_data(e->skb, v->payload, v->data_len);
        e->skb->ip_summed = CHECKSUM_NONE;
 
        return 0;
@@ -495,7 +485,7 @@ ipq_rcv_skb(struct sk_buff *skb)
        if (skblen < sizeof(*nlh))
                return;
 
-       nlh = (struct nlmsghdr *)skb->data;
+       nlh = nlmsg_hdr(skb);
        nlmsglen = nlh->nlmsg_len;
        if (nlmsglen < sizeof(*nlh) || skblen < nlmsglen)
                return;
@@ -678,7 +668,7 @@ static int __init ip_queue_init(void)
 
        netlink_register_notifier(&ipq_nl_notifier);
        ipqnl = netlink_kernel_create(NETLINK_FIREWALL, 0, ipq_rcv_sk,
-                                     THIS_MODULE);
+                                     NULL, THIS_MODULE);
        if (ipqnl == NULL) {
                printk(KERN_ERR "ip_queue: failed to create netlink socket\n");
                goto cleanup_netlink_notifier;
index 50cc4b9..e3f83bf 100644 (file)
@@ -7,12 +7,6 @@
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
- *
- * 19 Jan 2002 Harald Welte <laforge@gnumonks.org>
- *     - increase module usage count as soon as we have rules inside
- *       a table
- * 08 Oct 2005 Harald Welte <lafore@netfilter.org>
- *     - Generalize into "x_tables" layer and "{ip,ip6,arp}_tables"
  */
 #include <linux/cache.h>
 #include <linux/capability.h>
@@ -198,7 +192,7 @@ int do_match(struct ipt_entry_match *m,
 {
        /* Stop iteration if it doesn't match */
        if (!m->u.kernel.match->match(skb, in, out, m->u.kernel.match, m->data,
-                                     offset, skb->nh.iph->ihl*4, hotdrop))
+                                     offset, ip_hdrlen(skb), hotdrop))
                return 1;
        else
                return 0;
@@ -231,7 +225,7 @@ ipt_do_table(struct sk_buff **pskb,
        struct xt_table_info *private;
 
        /* Initialization */
-       ip = (*pskb)->nh.iph;
+       ip = ip_hdr(*pskb);
        datalen = (*pskb)->len - ip->ihl * 4;
        indev = in ? in->name : nulldevname;
        outdev = out ? out->name : nulldevname;
@@ -320,7 +314,7 @@ ipt_do_table(struct sk_buff **pskb,
                                        = 0x57acc001;
 #endif
                                /* Target might have changed stuff. */
-                               ip = (*pskb)->nh.iph;
+                               ip = ip_hdr(*pskb);
                                datalen = (*pskb)->len - ip->ihl * 4;
 
                                if (verdict == IPT_CONTINUE)
index 42b0802..40e2734 100644 (file)
 #include <linux/if_arp.h>
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
-
-#include <net/checksum.h>
-
 #include <linux/netfilter_arp.h>
-
 #include <linux/netfilter/x_tables.h>
 #include <linux/netfilter_ipv4/ip_tables.h>
 #include <linux/netfilter_ipv4/ipt_CLUSTERIP.h>
-#include <net/netfilter/nf_conntrack_compat.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/checksum.h>
 
 #define CLUSTERIP_VERSION "0.8"
 
@@ -240,7 +237,7 @@ clusterip_del_node(struct clusterip_config *c, u_int16_t nodenum)
 static inline u_int32_t
 clusterip_hashfn(struct sk_buff *skb, struct clusterip_config *config)
 {
-       struct iphdr *iph = skb->nh.iph;
+       struct iphdr *iph = ip_hdr(skb);
        unsigned long hashval;
        u_int16_t sport, dport;
        u_int16_t *ports;
@@ -310,15 +307,16 @@ target(struct sk_buff **pskb,
        const void *targinfo)
 {
        const struct ipt_clusterip_tgt_info *cipinfo = targinfo;
+       struct nf_conn *ct;
        enum ip_conntrack_info ctinfo;
-       u_int32_t *mark, hash;
+       u_int32_t hash;
 
        /* don't need to clusterip_config_get() here, since refcount
         * is only decremented by destroy() - and ip_tables guarantees
         * that the ->target() function isn't called after ->destroy() */
 
-       mark = nf_ct_get_mark((*pskb), &ctinfo);
-       if (mark == NULL) {
+       ct = nf_ct_get(*pskb, &ctinfo);
+       if (ct == NULL) {
                printk(KERN_ERR "CLUSTERIP: no conntrack!\n");
                        /* FIXME: need to drop invalid ones, since replies
                         * to outgoing connections of other nodes will be
@@ -328,7 +326,7 @@ target(struct sk_buff **pskb,
 
        /* special case: ICMP error handling. conntrack distinguishes between
         * error messages (RELATED) and information requests (see below) */
-       if ((*pskb)->nh.iph->protocol == IPPROTO_ICMP
+       if (ip_hdr(*pskb)->protocol == IPPROTO_ICMP
            && (ctinfo == IP_CT_RELATED
                || ctinfo == IP_CT_RELATED+IP_CT_IS_REPLY))
                return XT_CONTINUE;
@@ -341,7 +339,7 @@ target(struct sk_buff **pskb,
 
        switch (ctinfo) {
                case IP_CT_NEW:
-                       *mark = hash;
+                       ct->mark = hash;
                        break;
                case IP_CT_RELATED:
                case IP_CT_RELATED+IP_CT_IS_REPLY:
@@ -358,7 +356,7 @@ target(struct sk_buff **pskb,
 #ifdef DEBUG_CLUSTERP
        DUMP_TUPLE(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
 #endif
-       DEBUGP("hash=%u ct_hash=%u ", hash, *mark);
+       DEBUGP("hash=%u ct_hash=%u ", hash, ct->mark);
        if (!clusterip_responsible(cipinfo->config, hash)) {
                DEBUGP("not responsible\n");
                return NF_DROP;
@@ -521,7 +519,7 @@ arp_mangle(unsigned int hook,
           const struct net_device *out,
           int (*okfn)(struct sk_buff *))
 {
-       struct arphdr *arp = (*pskb)->nh.arph;
+       struct arphdr *arp = arp_hdr(*pskb);
        struct arp_payload *payload;
        struct clusterip_config *c;
 
index 4f56563..918ca92 100644 (file)
@@ -5,14 +5,13 @@
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
- *
- * ipt_ECN.c,v 1.5 2002/08/18 19:36:51 laforge Exp
 */
 
 #include <linux/in.h>
 #include <linux/module.h>
 #include <linux/skbuff.h>
 #include <linux/ip.h>
+#include <net/ip.h>
 #include <linux/tcp.h>
 #include <net/checksum.h>
 
@@ -29,13 +28,13 @@ MODULE_DESCRIPTION("iptables ECN modification module");
 static inline int
 set_ect_ip(struct sk_buff **pskb, const struct ipt_ECN_info *einfo)
 {
-       struct iphdr *iph = (*pskb)->nh.iph;
+       struct iphdr *iph = ip_hdr(*pskb);
 
        if ((iph->tos & IPT_ECN_IP_MASK) != (einfo->ip_ect & IPT_ECN_IP_MASK)) {
                __u8 oldtos;
                if (!skb_make_writable(pskb, sizeof(struct iphdr)))
                        return 0;
-               iph = (*pskb)->nh.iph;
+               iph = ip_hdr(*pskb);
                oldtos = iph->tos;
                iph->tos &= ~IPT_ECN_IP_MASK;
                iph->tos |= (einfo->ip_ect & IPT_ECN_IP_MASK);
@@ -52,7 +51,7 @@ set_ect_tcp(struct sk_buff **pskb, const struct ipt_ECN_info *einfo)
        __be16 oldval;
 
        /* Not enought header? */
-       tcph = skb_header_pointer(*pskb, (*pskb)->nh.iph->ihl*4,
+       tcph = skb_header_pointer(*pskb, ip_hdrlen(*pskb),
                                  sizeof(_tcph), &_tcph);
        if (!tcph)
                return 0;
@@ -63,9 +62,9 @@ set_ect_tcp(struct sk_buff **pskb, const struct ipt_ECN_info *einfo)
             tcph->cwr == einfo->proto.tcp.cwr)))
                return 1;
 
-       if (!skb_make_writable(pskb, (*pskb)->nh.iph->ihl*4+sizeof(*tcph)))
+       if (!skb_make_writable(pskb, ip_hdrlen(*pskb) + sizeof(*tcph)))
                return 0;
-       tcph = (void *)(*pskb)->nh.iph + (*pskb)->nh.iph->ihl*4;
+       tcph = (void *)ip_hdr(*pskb) + ip_hdrlen(*pskb);
 
        oldval = ((__be16 *)tcph)[6];
        if (einfo->operation & IPT_ECN_OP_SET_ECE)
@@ -93,7 +92,7 @@ target(struct sk_buff **pskb,
                        return NF_DROP;
 
        if (einfo->operation & (IPT_ECN_OP_SET_ECE | IPT_ECN_OP_SET_CWR)
-           && (*pskb)->nh.iph->protocol == IPPROTO_TCP)
+           && ip_hdr(*pskb)->protocol == IPPROTO_TCP)
                if (!set_ect_tcp(pskb, einfo))
                        return NF_DROP;
 
index d9c37fd..a42c5cd 100644 (file)
@@ -399,9 +399,9 @@ ipt_log_packet(unsigned int pf,
                /* MAC logging for input chain only. */
                printk("MAC=");
                if (skb->dev && skb->dev->hard_header_len
-                   && skb->mac.raw != (void*)skb->nh.iph) {
+                   && skb->mac_header != skb->network_header) {
                        int i;
-                       unsigned char *p = skb->mac.raw;
+                       const unsigned char *p = skb_mac_header(skb);
                        for (i = 0; i < skb->dev->hard_header_len; i++,p++)
                                printk("%02x%c", *p,
                                       i==skb->dev->hard_header_len - 1
@@ -477,14 +477,10 @@ static int __init ipt_log_init(void)
        ret = xt_register_target(&ipt_log_reg);
        if (ret < 0)
                return ret;
-       if (nf_log_register(PF_INET, &ipt_log_logger) < 0) {
-               printk(KERN_WARNING "ipt_LOG: not logging via system console "
-                      "since somebody else already registered for PF_INET\n");
-               /* we cannot make module load fail here, since otherwise
-                * iptables userspace would abort */
-       }
-
-       return 0;
+       ret = nf_log_register(PF_INET, &ipt_log_logger);
+       if (ret < 0 && ret != -EEXIST)
+               xt_unregister_target(&ipt_log_reg);
+       return ret;
 }
 
 static void __exit ipt_log_fini(void)
index b5955f3..d4f2d77 100644 (file)
 #include <net/ip.h>
 #include <net/checksum.h>
 #include <net/route.h>
-#include <linux/netfilter_ipv4.h>
-#ifdef CONFIG_NF_NAT_NEEDED
 #include <net/netfilter/nf_nat_rule.h>
-#else
-#include <linux/netfilter_ipv4/ip_nat_rule.h>
-#endif
+#include <linux/netfilter_ipv4.h>
 #include <linux/netfilter/x_tables.h>
 
 MODULE_LICENSE("GPL");
@@ -48,7 +44,7 @@ masquerade_check(const char *tablename,
                 void *targinfo,
                 unsigned int hook_mask)
 {
-       const struct ip_nat_multi_range_compat *mr = targinfo;
+       const struct nf_nat_multi_range_compat *mr = targinfo;
 
        if (mr->range[0].flags & IP_NAT_RANGE_MAP_IPS) {
                DEBUGP("masquerade_check: bad MAP_IPS.\n");
@@ -69,33 +65,26 @@ masquerade_target(struct sk_buff **pskb,
                  const struct xt_target *target,
                  const void *targinfo)
 {
-#ifdef CONFIG_NF_NAT_NEEDED
+       struct nf_conn *ct;
        struct nf_conn_nat *nat;
-#endif
-       struct ip_conntrack *ct;
        enum ip_conntrack_info ctinfo;
-       struct ip_nat_range newrange;
-       const struct ip_nat_multi_range_compat *mr;
+       struct nf_nat_range newrange;
+       const struct nf_nat_multi_range_compat *mr;
        struct rtable *rt;
        __be32 newsrc;
 
-       IP_NF_ASSERT(hooknum == NF_IP_POST_ROUTING);
+       NF_CT_ASSERT(hooknum == NF_IP_POST_ROUTING);
 
-       ct = ip_conntrack_get(*pskb, &ctinfo);
-#ifdef CONFIG_NF_NAT_NEEDED
+       ct = nf_ct_get(*pskb, &ctinfo);
        nat = nfct_nat(ct);
-#endif
-       IP_NF_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED
+
+       NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED
                            || ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY));
 
        /* Source address is 0.0.0.0 - locally generated packet that is
         * probably not supposed to be masqueraded.
         */
-#ifdef CONFIG_NF_NAT_NEEDED
        if (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip == 0)
-#else
-       if (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.ip == 0)
-#endif
                return NF_ACCEPT;
 
        mr = targinfo;
@@ -107,40 +96,30 @@ masquerade_target(struct sk_buff **pskb,
        }
 
        write_lock_bh(&masq_lock);
-#ifdef CONFIG_NF_NAT_NEEDED
        nat->masq_index = out->ifindex;
-#else
-       ct->nat.masq_index = out->ifindex;
-#endif
        write_unlock_bh(&masq_lock);
 
        /* Transfer from original range. */
-       newrange = ((struct ip_nat_range)
+       newrange = ((struct nf_nat_range)
                { mr->range[0].flags | IP_NAT_RANGE_MAP_IPS,
                  newsrc, newsrc,
                  mr->range[0].min, mr->range[0].max });
 
        /* Hand modified range to generic setup. */
-       return ip_nat_setup_info(ct, &newrange, hooknum);
+       return nf_nat_setup_info(ct, &newrange, hooknum);
 }
 
 static inline int
-device_cmp(struct ip_conntrack *i, void *ifindex)
+device_cmp(struct nf_conn *i, void *ifindex)
 {
-       int ret;
-#ifdef CONFIG_NF_NAT_NEEDED
        struct nf_conn_nat *nat = nfct_nat(i);
+       int ret;
 
        if (!nat)
                return 0;
-#endif
 
        read_lock_bh(&masq_lock);
-#ifdef CONFIG_NF_NAT_NEEDED
        ret = (nat->masq_index == (int)(long)ifindex);
-#else
-       ret = (i->nat.masq_index == (int)(long)ifindex);
-#endif
        read_unlock_bh(&masq_lock);
 
        return ret;
@@ -156,9 +135,9 @@ static int masq_device_event(struct notifier_block *this,
                /* Device was downed.  Search entire table for
                   conntracks which were associated with that device,
                   and forget them. */
-               IP_NF_ASSERT(dev->ifindex != 0);
+               NF_CT_ASSERT(dev->ifindex != 0);
 
-               ip_ct_iterate_cleanup(device_cmp, (void *)(long)dev->ifindex);
+               nf_ct_iterate_cleanup(device_cmp, (void *)(long)dev->ifindex);
        }
 
        return NOTIFY_DONE;
@@ -174,9 +153,9 @@ static int masq_inet_event(struct notifier_block *this,
                /* IP address was deleted.  Search entire table for
                   conntracks which were associated with that device,
                   and forget them. */
-               IP_NF_ASSERT(dev->ifindex != 0);
+               NF_CT_ASSERT(dev->ifindex != 0);
 
-               ip_ct_iterate_cleanup(device_cmp, (void *)(long)dev->ifindex);
+               nf_ct_iterate_cleanup(device_cmp, (void *)(long)dev->ifindex);
        }
 
        return NOTIFY_DONE;
@@ -194,7 +173,7 @@ static struct xt_target masquerade = {
        .name           = "MASQUERADE",
        .family         = AF_INET,
        .target         = masquerade_target,
-       .targetsize     = sizeof(struct ip_nat_multi_range_compat),
+       .targetsize     = sizeof(struct nf_nat_multi_range_compat),
        .table          = "nat",
        .hooks          = 1 << NF_IP_POST_ROUTING,
        .checkentry     = masquerade_check,
index fd7aaa3..068c69b 100644 (file)
 #include <linux/netfilter.h>
 #include <linux/netfilter_ipv4.h>
 #include <linux/netfilter/x_tables.h>
-#ifdef CONFIG_NF_NAT_NEEDED
 #include <net/netfilter/nf_nat_rule.h>
-#else
-#include <linux/netfilter_ipv4/ip_nat_rule.h>
-#endif
 
 #define MODULENAME "NETMAP"
 MODULE_LICENSE("GPL");
@@ -40,7 +36,7 @@ check(const char *tablename,
       void *targinfo,
       unsigned int hook_mask)
 {
-       const struct ip_nat_multi_range_compat *mr = targinfo;
+       const struct nf_nat_multi_range_compat *mr = targinfo;
 
        if (!(mr->range[0].flags & IP_NAT_RANGE_MAP_IPS)) {
                DEBUGP(MODULENAME":check: bad MAP_IPS.\n");
@@ -61,39 +57,39 @@ target(struct sk_buff **pskb,
        const struct xt_target *target,
        const void *targinfo)
 {
-       struct ip_conntrack *ct;
+       struct nf_conn *ct;
        enum ip_conntrack_info ctinfo;
        __be32 new_ip, netmask;
-       const struct ip_nat_multi_range_compat *mr = targinfo;
-       struct ip_nat_range newrange;
+       const struct nf_nat_multi_range_compat *mr = targinfo;
+       struct nf_nat_range newrange;
 
-       IP_NF_ASSERT(hooknum == NF_IP_PRE_ROUTING
+       NF_CT_ASSERT(hooknum == NF_IP_PRE_ROUTING
                     || hooknum == NF_IP_POST_ROUTING
                     || hooknum == NF_IP_LOCAL_OUT);
-       ct = ip_conntrack_get(*pskb, &ctinfo);
+       ct = nf_ct_get(*pskb, &ctinfo);
 
        netmask = ~(mr->range[0].min_ip ^ mr->range[0].max_ip);
 
        if (hooknum == NF_IP_PRE_ROUTING || hooknum == NF_IP_LOCAL_OUT)
-               new_ip = (*pskb)->nh.iph->daddr & ~netmask;
+               new_ip = ip_hdr(*pskb)->daddr & ~netmask;
        else
-               new_ip = (*pskb)->nh.iph->saddr & ~netmask;
+               new_ip = ip_hdr(*pskb)->saddr & ~netmask;
        new_ip |= mr->range[0].min_ip & netmask;
 
-       newrange = ((struct ip_nat_range)
+       newrange = ((struct nf_nat_range)
                { mr->range[0].flags | IP_NAT_RANGE_MAP_IPS,
                  new_ip, new_ip,
                  mr->range[0].min, mr->range[0].max });
 
        /* Hand modified range to generic setup. */
-       return ip_nat_setup_info(ct, &newrange, hooknum);
+       return nf_nat_setup_info(ct, &newrange, hooknum);
 }
 
 static struct xt_target target_module = {
        .name           = MODULENAME,
        .family         = AF_INET,
        .target         = target,
-       .targetsize     = sizeof(struct ip_nat_multi_range_compat),
+       .targetsize     = sizeof(struct nf_nat_multi_range_compat),
        .table          = "nat",
        .hooks          = (1 << NF_IP_PRE_ROUTING) | (1 << NF_IP_POST_ROUTING) |
                          (1 << NF_IP_LOCAL_OUT),
index c2b6b80..68cc76a 100644 (file)
 #include <net/checksum.h>
 #include <linux/netfilter_ipv4.h>
 #include <linux/netfilter/x_tables.h>
-#ifdef CONFIG_NF_NAT_NEEDED
 #include <net/netfilter/nf_nat_rule.h>
-#else
-#include <linux/netfilter_ipv4/ip_nat_rule.h>
-#endif
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
@@ -43,7 +39,7 @@ redirect_check(const char *tablename,
               void *targinfo,
               unsigned int hook_mask)
 {
-       const struct ip_nat_multi_range_compat *mr = targinfo;
+       const struct nf_nat_multi_range_compat *mr = targinfo;
 
        if (mr->range[0].flags & IP_NAT_RANGE_MAP_IPS) {
                DEBUGP("redirect_check: bad MAP_IPS.\n");
@@ -64,17 +60,17 @@ redirect_target(struct sk_buff **pskb,
                const struct xt_target *target,
                const void *targinfo)
 {
-       struct ip_conntrack *ct;
+       struct nf_conn *ct;
        enum ip_conntrack_info ctinfo;
        __be32 newdst;
-       const struct ip_nat_multi_range_compat *mr = targinfo;
-       struct ip_nat_range newrange;
+       const struct nf_nat_multi_range_compat *mr = targinfo;
+       struct nf_nat_range newrange;
 
-       IP_NF_ASSERT(hooknum == NF_IP_PRE_ROUTING
+       NF_CT_ASSERT(hooknum == NF_IP_PRE_ROUTING
                     || hooknum == NF_IP_LOCAL_OUT);
 
-       ct = ip_conntrack_get(*pskb, &ctinfo);
-       IP_NF_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED));
+       ct = nf_ct_get(*pskb, &ctinfo);
+       NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED));
 
        /* Local packets: make them go to loopback */
        if (hooknum == NF_IP_LOCAL_OUT)
@@ -96,20 +92,20 @@ redirect_target(struct sk_buff **pskb,
        }
 
        /* Transfer from original range. */
-       newrange = ((struct ip_nat_range)
+       newrange = ((struct nf_nat_range)
                { mr->range[0].flags | IP_NAT_RANGE_MAP_IPS,
                  newdst, newdst,
                  mr->range[0].min, mr->range[0].max });
 
        /* Hand modified range to generic setup. */
-       return ip_nat_setup_info(ct, &newrange, hooknum);
+       return nf_nat_setup_info(ct, &newrange, hooknum);
 }
 
 static struct xt_target redirect_reg = {
        .name           = "REDIRECT",
        .family         = AF_INET,
        .target         = redirect_target,
-       .targetsize     = sizeof(struct ip_nat_multi_range_compat),
+       .targetsize     = sizeof(struct nf_nat_multi_range_compat),
        .table          = "nat",
        .hooks          = (1 << NF_IP_PRE_ROUTING) | (1 << NF_IP_LOCAL_OUT),
        .checkentry     = redirect_check,
index 80f739e..9041e07 100644 (file)
@@ -1,7 +1,5 @@
 /*
  * This is a module which is used for rejecting packets.
- * Added support for customized reject packets (Jozsef Kadlecsik).
- * Added support for ICMP type-3-code-13 (Maciej Soltysiak). [RFC 1812]
  */
 
 /* (C) 1999-2001 Paul `Rusty' Russell
@@ -43,7 +41,7 @@ MODULE_DESCRIPTION("iptables REJECT target module");
 static void send_reset(struct sk_buff *oldskb, int hook)
 {
        struct sk_buff *nskb;
-       struct iphdr *iph = oldskb->nh.iph;
+       struct iphdr *niph;
        struct tcphdr _otcph, *oth, *tcph;
        __be16 tmp_port;
        __be32 tmp_addr;
@@ -51,10 +49,10 @@ static void send_reset(struct sk_buff *oldskb, int hook)
        unsigned int addr_type;
 
        /* IP header checks: fragment. */
-       if (oldskb->nh.iph->frag_off & htons(IP_OFFSET))
+       if (ip_hdr(oldskb)->frag_off & htons(IP_OFFSET))
                return;
 
-       oth = skb_header_pointer(oldskb, oldskb->nh.iph->ihl * 4,
+       oth = skb_header_pointer(oldskb, ip_hdrlen(oldskb),
                                 sizeof(_otcph), &_otcph);
        if (oth == NULL)
                return;
@@ -64,7 +62,7 @@ static void send_reset(struct sk_buff *oldskb, int hook)
                return;
 
        /* Check checksum */
-       if (nf_ip_checksum(oldskb, hook, iph->ihl * 4, IPPROTO_TCP))
+       if (nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), IPPROTO_TCP))
                return;
 
        /* We need a linear, writeable skb.  We also need to expand
@@ -84,20 +82,21 @@ static void send_reset(struct sk_buff *oldskb, int hook)
        skb_shinfo(nskb)->gso_segs = 0;
        skb_shinfo(nskb)->gso_type = 0;
 
-       tcph = (struct tcphdr *)((u_int32_t*)nskb->nh.iph + nskb->nh.iph->ihl);
+       tcph = (struct tcphdr *)(skb_network_header(nskb) + ip_hdrlen(nskb));
 
        /* Swap source and dest */
-       tmp_addr = nskb->nh.iph->saddr;
-       nskb->nh.iph->saddr = nskb->nh.iph->daddr;
-       nskb->nh.iph->daddr = tmp_addr;
+       niph = ip_hdr(nskb);
+       tmp_addr = niph->saddr;
+       niph->saddr = niph->daddr;
+       niph->daddr = tmp_addr;
        tmp_port = tcph->source;
        tcph->source = tcph->dest;
        tcph->dest = tmp_port;
 
        /* Truncate to length (no data) */
        tcph->doff = sizeof(struct tcphdr)/4;
-       skb_trim(nskb, nskb->nh.iph->ihl*4 + sizeof(struct tcphdr));
-       nskb->nh.iph->tot_len = htons(nskb->len);
+       skb_trim(nskb, ip_hdrlen(nskb) + sizeof(struct tcphdr));
+       niph->tot_len = htons(nskb->len);
 
        if (tcph->ack) {
                needs_ack = 0;
@@ -105,9 +104,9 @@ static void send_reset(struct sk_buff *oldskb, int hook)
                tcph->ack_seq = 0;
        } else {
                needs_ack = 1;
-               tcph->ack_seq = htonl(ntohl(oth->seq) + oth->syn + oth->fin
-                                     + oldskb->len - oldskb->nh.iph->ihl*4
-                                     - (oth->doff<<2));
+               tcph->ack_seq = htonl(ntohl(oth->seq) + oth->syn + oth->fin +
+                                     oldskb->len - ip_hdrlen(oldskb) -
+                                     (oth->doff << 2));
                tcph->seq = 0;
        }
 
@@ -122,14 +121,13 @@ static void send_reset(struct sk_buff *oldskb, int hook)
        /* Adjust TCP checksum */
        tcph->check = 0;
        tcph->check = tcp_v4_check(sizeof(struct tcphdr),
-                                  nskb->nh.iph->saddr,
-                                  nskb->nh.iph->daddr,
+                                  niph->saddr, niph->daddr,
                                   csum_partial((char *)tcph,
                                                sizeof(struct tcphdr), 0));
 
        /* Set DF, id = 0 */
-       nskb->nh.iph->frag_off = htons(IP_DF);
-       nskb->nh.iph->id = 0;
+       niph->frag_off = htons(IP_DF);
+       niph->id = 0;
 
        addr_type = RTN_UNSPEC;
        if (hook != NF_IP_FORWARD
@@ -145,12 +143,11 @@ static void send_reset(struct sk_buff *oldskb, int hook)
        nskb->ip_summed = CHECKSUM_NONE;
 
        /* Adjust IP TTL */
-       nskb->nh.iph->ttl = dst_metric(nskb->dst, RTAX_HOPLIMIT);
+       niph->ttl = dst_metric(nskb->dst, RTAX_HOPLIMIT);
 
        /* Adjust IP checksum */
-       nskb->nh.iph->check = 0;
-       nskb->nh.iph->check = ip_fast_csum((unsigned char *)nskb->nh.iph,
-                                          nskb->nh.iph->ihl);
+       niph->check = 0;
+       niph->check = ip_fast_csum(skb_network_header(nskb), niph->ihl);
 
        /* "Never happens" */
        if (nskb->len > dst_mtu(nskb->dst))
@@ -182,7 +179,7 @@ static unsigned int reject(struct sk_buff **pskb,
 
        /* Our naive response construction doesn't deal with IP
           options, and probably shouldn't try. */
-       if ((*pskb)->nh.iph->ihl<<2 != sizeof(struct iphdr))
+       if (ip_hdrlen(*pskb) != sizeof(struct iphdr))
                return NF_DROP;
 
        /* WARNING: This code causes reentry within iptables.
index bd4404e..511e5ff 100644 (file)
@@ -7,21 +7,6 @@
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
- *
- * 010320 Martin Josefsson <gandalf@wlug.westbo.se>
- *     * copied ipt_BALANCE.c to ipt_SAME.c and changed a few things.
- * 010728 Martin Josefsson <gandalf@wlug.westbo.se>
- *     * added --nodst to not include destination-ip in new source
- *       calculations.
- *     * added some more sanity-checks.
- * 010729 Martin Josefsson <gandalf@wlug.westbo.se>
- *     * fixed a buggy if-statement in same_check(), should have
- *       used ntohl() but didn't.
- *     * added support for multiple ranges. IPT_SAME_MAX_RANGE is
- *       defined in linux/include/linux/netfilter_ipv4/ipt_SAME.h
- *       and is currently set to 10.
- *     * added support for 1-address range, nice to have now that
- *       we have multiple ranges.
  */
 #include <linux/types.h>
 #include <linux/ip.h>
 #include <net/checksum.h>
 #include <linux/netfilter_ipv4.h>
 #include <linux/netfilter/x_tables.h>
-#ifdef CONFIG_NF_NAT_NEEDED
 #include <net/netfilter/nf_nat_rule.h>
-#else
-#include <linux/netfilter_ipv4/ip_nat_rule.h>
-#endif
 #include <linux/netfilter_ipv4/ipt_SAME.h>
 
 MODULE_LICENSE("GPL");
@@ -138,17 +119,17 @@ same_target(struct sk_buff **pskb,
                const struct xt_target *target,
                const void *targinfo)
 {
-       struct ip_conntrack *ct;
+       struct nf_conn *ct;
        enum ip_conntrack_info ctinfo;
        u_int32_t tmpip, aindex;
        __be32 new_ip;
        const struct ipt_same_info *same = targinfo;
-       struct ip_nat_range newrange;
-       const struct ip_conntrack_tuple *t;
+       struct nf_nat_range newrange;
+       const struct nf_conntrack_tuple *t;
 
-       IP_NF_ASSERT(hooknum == NF_IP_PRE_ROUTING ||
+       NF_CT_ASSERT(hooknum == NF_IP_PRE_ROUTING ||
                        hooknum == NF_IP_POST_ROUTING);
-       ct = ip_conntrack_get(*pskb, &ctinfo);
+       ct = nf_ct_get(*pskb, &ctinfo);
 
        t = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
 
@@ -157,17 +138,10 @@ same_target(struct sk_buff **pskb,
           Here we calculate the index in same->iparray which
           holds the ipaddress we should use */
 
-#ifdef CONFIG_NF_NAT_NEEDED
        tmpip = ntohl(t->src.u3.ip);
 
        if (!(same->info & IPT_SAME_NODST))
                tmpip += ntohl(t->dst.u3.ip);
-#else
-       tmpip = ntohl(t->src.ip);
-
-       if (!(same->info & IPT_SAME_NODST))
-               tmpip += ntohl(t->dst.ip);
-#endif
        aindex = tmpip % same->ipnum;
 
        new_ip = htonl(same->iparray[aindex]);
@@ -178,13 +152,13 @@ same_target(struct sk_buff **pskb,
                        NIPQUAD(new_ip));
 
        /* Transfer from original range. */
-       newrange = ((struct ip_nat_range)
+       newrange = ((struct nf_nat_range)
                { same->range[0].flags, new_ip, new_ip,
                  /* FIXME: Use ports from correct range! */
                  same->range[0].min, same->range[0].max });
 
        /* Hand modified range to generic setup. */
-       return ip_nat_setup_info(ct, &newrange, hooknum);
+       return nf_nat_setup_info(ct, &newrange, hooknum);
 }
 
 static struct xt_target same_reg = {
index cedf9f7..0ad02f2 100644 (file)
@@ -29,13 +29,13 @@ target(struct sk_buff **pskb,
        const void *targinfo)
 {
        const struct ipt_tos_target_info *tosinfo = targinfo;
-       struct iphdr *iph = (*pskb)->nh.iph;
+       struct iphdr *iph = ip_hdr(*pskb);
 
        if ((iph->tos & IPTOS_TOS_MASK) != tosinfo->tos) {
                __u8 oldtos;
                if (!skb_make_writable(pskb, sizeof(struct iphdr)))
                        return NF_DROP;
-               iph = (*pskb)->nh.iph;
+               iph = ip_hdr(*pskb);
                oldtos = iph->tos;
                iph->tos = (iph->tos & IPTOS_PREC_MASK) | tosinfo->tos;
                nf_csum_replace2(&iph->check, htons(oldtos), htons(iph->tos));
index 64be31c..a991ec7 100644 (file)
@@ -32,7 +32,7 @@ ipt_ttl_target(struct sk_buff **pskb,
        if (!skb_make_writable(pskb, (*pskb)->len))
                return NF_DROP;
 
-       iph = (*pskb)->nh.iph;
+       iph = ip_hdr(*pskb);
 
        switch (info->mode) {
                case IPT_TTL_SET:
index 9acc018..23b607b 100644 (file)
@@ -2,20 +2,6 @@
  * netfilter module for userspace packet logging daemons
  *
  * (C) 2000-2004 by Harald Welte <laforge@netfilter.org>
- *
- * 2000/09/22 ulog-cprange feature added
- * 2001/01/04 in-kernel queue as proposed by Sebastian Zander
- *                                             <zander@fokus.gmd.de>
- * 2001/01/30 per-rule nlgroup conflicts with global queue.
- *            nlgroup now global (sysctl)
- * 2001/04/19 ulog-queue reworked, now fixed buffer size specified at
- *           module loadtime -HW
- * 2002/07/07 remove broken nflog_rcv() function -HW
- * 2002/08/29 fix shifted/unshifted nlgroup bug -HW
- * 2002/10/30 fix uninitialized mac_len field - <Anders K. Pedersen>
- * 2004/10/25 fix erroneous calculation of 'len' parameter to NLMSG_PUT
- *           resulting in bogus 'error during NLMSG_PUT' messages.
- *
  * (C) 1999-2001 Paul `Rusty' Russell
  * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
  *
@@ -42,8 +28,6 @@
  * flushtimeout:
  *   Specify, after how many hundredths of a second the queue should be
  *   flushed even if it is not full yet.
- *
- * ipt_ULOG.c,v 1.22 2002/10/30 09:07:31 laforge Exp
  */
 
 #include <linux/module.h>
@@ -187,6 +171,7 @@ static void ipt_ulog_packet(unsigned int hooknum,
        ulog_packet_msg_t *pm;
        size_t size, copy_len;
        struct nlmsghdr *nlh;
+       struct timeval tv;
 
        /* ffs == find first bit set, necessary because userspace
         * is already shifting groupnumber, but we need unshifted.
@@ -232,13 +217,14 @@ static void ipt_ulog_packet(unsigned int hooknum,
        pm = NLMSG_DATA(nlh);
 
        /* We might not have a timestamp, get one */
-       if (skb->tstamp.off_sec == 0)
+       if (skb->tstamp.tv64 == 0)
                __net_timestamp((struct sk_buff *)skb);
 
        /* copy hook, prefix, timestamp, payload, etc. */
        pm->data_len = copy_len;
-       put_unaligned(skb->tstamp.off_sec, &pm->timestamp_sec);
-       put_unaligned(skb->tstamp.off_usec, &pm->timestamp_usec);
+       tv = ktime_to_timeval(skb->tstamp);
+       put_unaligned(tv.tv_sec, &pm->timestamp_sec);
+       put_unaligned(tv.tv_usec, &pm->timestamp_usec);
        put_unaligned(skb->mark, &pm->mark);
        pm->hook = hooknum;
        if (prefix != NULL)
@@ -249,9 +235,9 @@ static void ipt_ulog_packet(unsigned int hooknum,
                *(pm->prefix) = '\0';
 
        if (in && in->hard_header_len > 0
-           && skb->mac.raw != (void *) skb->nh.iph
+           && skb->mac_header != skb->network_header
            && in->hard_header_len <= ULOG_MAC_LEN) {
-               memcpy(pm->mac, skb->mac.raw, in->hard_header_len);
+               memcpy(pm->mac, skb_mac_header(skb), in->hard_header_len);
                pm->mac_len = in->hard_header_len;
        } else
                pm->mac_len = 0;
@@ -363,12 +349,52 @@ static int ipt_ulog_checkentry(const char *tablename,
        return 1;
 }
 
+#ifdef CONFIG_COMPAT
+struct compat_ipt_ulog_info {
+       compat_uint_t   nl_group;
+       compat_size_t   copy_range;
+       compat_size_t   qthreshold;
+       char            prefix[ULOG_PREFIX_LEN];
+};
+
+static void compat_from_user(void *dst, void *src)
+{
+       struct compat_ipt_ulog_info *cl = src;
+       struct ipt_ulog_info l = {
+               .nl_group       = cl->nl_group,
+               .copy_range     = cl->copy_range,
+               .qthreshold     = cl->qthreshold,
+       };
+
+       memcpy(l.prefix, cl->prefix, sizeof(l.prefix));
+       memcpy(dst, &l, sizeof(l));
+}
+
+static int compat_to_user(void __user *dst, void *src)
+{
+       struct ipt_ulog_info *l = src;
+       struct compat_ipt_ulog_info cl = {
+               .nl_group       = l->nl_group,
+               .copy_range     = l->copy_range,
+               .qthreshold     = l->qthreshold,
+       };
+
+       memcpy(cl.prefix, l->prefix, sizeof(cl.prefix));
+       return copy_to_user(dst, &cl, sizeof(cl)) ? -EFAULT : 0;
+}
+#endif /* CONFIG_COMPAT */
+
 static struct xt_target ipt_ulog_reg = {
        .name           = "ULOG",
        .family         = AF_INET,
        .target         = ipt_ulog_target,
        .targetsize     = sizeof(struct ipt_ulog_info),
        .checkentry     = ipt_ulog_checkentry,
+#ifdef CONFIG_COMPAT
+       .compatsize     = sizeof(struct compat_ipt_ulog_info),
+       .compat_from_user = compat_from_user,
+       .compat_to_user = compat_to_user,
+#endif
        .me             = THIS_MODULE,
 };
 
@@ -390,14 +416,11 @@ static int __init ipt_ulog_init(void)
        }
 
        /* initialize ulog_buffers */
-       for (i = 0; i < ULOG_MAXNLGROUPS; i++) {
-               init_timer(&ulog_buffers[i].timer);
-               ulog_buffers[i].timer.function = ulog_timer;
-               ulog_buffers[i].timer.data = i;
-       }
+       for (i = 0; i < ULOG_MAXNLGROUPS; i++)
+               setup_timer(&ulog_buffers[i].timer, ulog_timer, i);
 
        nflognl = netlink_kernel_create(NETLINK_NFLOG, ULOG_MAXNLGROUPS, NULL,
-                                       THIS_MODULE);
+                                       NULL, THIS_MODULE);
        if (!nflognl)
                return -ENOMEM;
 
index cfa0472..a652a14 100644 (file)
@@ -33,7 +33,7 @@ static int match(const struct sk_buff *skb,
                 int offset, unsigned int protoff, int *hotdrop)
 {
        const struct ipt_addrtype_info *info = matchinfo;
-       const struct iphdr *iph = skb->nh.iph;
+       const struct iphdr *iph = ip_hdr(skb);
        int ret = 1;
 
        if (info->source)
index 37508b2..2621812 100644 (file)
@@ -1,6 +1,4 @@
 /* IP tables module for matching the value of the IPv4 and TCP ECN bits
- *
- * ipt_ecn.c,v 1.3 2002/05/29 15:09:00 laforge Exp
  *
  * (C) 2002 by Harald Welte <laforge@gnumonks.org>
  *
@@ -11,6 +9,7 @@
 
 #include <linux/in.h>
 #include <linux/ip.h>
+#include <net/ip.h>
 #include <linux/module.h>
 #include <linux/skbuff.h>
 #include <linux/tcp.h>
@@ -26,7 +25,7 @@ MODULE_LICENSE("GPL");
 static inline int match_ip(const struct sk_buff *skb,
                           const struct ipt_ecn_info *einfo)
 {
-       return ((skb->nh.iph->tos&IPT_ECN_IP_MASK) == einfo->ip_ect);
+       return (ip_hdr(skb)->tos & IPT_ECN_IP_MASK) == einfo->ip_ect;
 }
 
 static inline int match_tcp(const struct sk_buff *skb,
@@ -38,8 +37,7 @@ static inline int match_tcp(const struct sk_buff *skb,
        /* In practice, TCP match does this, so can't fail.  But let's
         * be good citizens.
         */
-       th = skb_header_pointer(skb, skb->nh.iph->ihl * 4,
-                               sizeof(_tcph), &_tcph);
+       th = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_tcph), &_tcph);
        if (th == NULL) {
                *hotdrop = 0;
                return 0;
@@ -80,7 +78,7 @@ static int match(const struct sk_buff *skb,
                        return 0;
 
        if (info->operation & (IPT_ECN_OP_MATCH_ECE|IPT_ECN_OP_MATCH_CWR)) {
-               if (skb->nh.iph->protocol != IPPROTO_TCP)
+               if (ip_hdr(skb)->protocol != IPPROTO_TCP)
                        return 0;
                if (!match_tcp(skb, info, hotdrop))
                        return 0;
index bc5d5e6..33af9e9 100644 (file)
@@ -32,7 +32,7 @@ match(const struct sk_buff *skb,
       int offset, unsigned int protoff, int *hotdrop)
 {
        const struct ipt_iprange_info *info = matchinfo;
-       const struct iphdr *iph = skb->nh.iph;
+       const struct iphdr *iph = ip_hdr(skb);
 
        if (info->flags & IPRANGE_SRC) {
                if (((ntohl(iph->saddr) < ntohl(info->src.min_ip))
index aecb9c4..15a9e8b 100644 (file)
@@ -183,11 +183,11 @@ ipt_recent_match(const struct sk_buff *skb,
        int ret = info->invert;
 
        if (info->side == IPT_RECENT_DEST)
-               addr = skb->nh.iph->daddr;
+               addr = ip_hdr(skb)->daddr;
        else
-               addr = skb->nh.iph->saddr;
+               addr = ip_hdr(skb)->saddr;
 
-       ttl = skb->nh.iph->ttl;
+       ttl = ip_hdr(skb)->ttl;
        /* use TTL as seen before forwarding */
        if (out && !skb->sk)
                ttl++;
index 5d33b51..d314844 100644 (file)
@@ -30,7 +30,7 @@ match(const struct sk_buff *skb,
 {
        const struct ipt_tos_info *info = matchinfo;
 
-       return (skb->nh.iph->tos == info->tos) ^ info->invert;
+       return (ip_hdr(skb)->tos == info->tos) ^ info->invert;
 }
 
 static struct xt_match tos_match = {
index 1eca9f4..ab02d9e 100644 (file)
@@ -1,6 +1,4 @@
 /* IP tables module for matching the value of the TTL
- *
- * ipt_ttl.c,v 1.5 2000/11/13 11:16:08 laforge Exp
  *
  * (C) 2000,2001 by Harald Welte <laforge@netfilter.org>
  *
@@ -26,19 +24,20 @@ static int match(const struct sk_buff *skb,
                 int offset, unsigned int protoff, int *hotdrop)
 {
        const struct ipt_ttl_info *info = matchinfo;
+       const u8 ttl = ip_hdr(skb)->ttl;
 
        switch (info->mode) {
                case IPT_TTL_EQ:
-                       return (skb->nh.iph->ttl == info->ttl);
+                       return (ttl == info->ttl);
                        break;
                case IPT_TTL_NE:
-                       return (!(skb->nh.iph->ttl == info->ttl));
+                       return (!(ttl == info->ttl));
                        break;
                case IPT_TTL_LT:
-                       return (skb->nh.iph->ttl < info->ttl);
+                       return (ttl < info->ttl);
                        break;
                case IPT_TTL_GT:
-                       return (skb->nh.iph->ttl > info->ttl);
+                       return (ttl > info->ttl);
                        break;
                default:
                        printk(KERN_WARNING "ipt_ttl: unknown mode %d\n",
index d1d61e9..4272890 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/module.h>
 #include <linux/moduleparam.h>
 #include <linux/netfilter_ipv4/ip_tables.h>
+#include <net/ip.h>
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
@@ -102,7 +103,7 @@ ipt_local_out_hook(unsigned int hook,
 {
        /* root is playing with raw sockets. */
        if ((*pskb)->len < sizeof(struct iphdr)
-           || (*pskb)->nh.iph->ihl * 4 < sizeof(struct iphdr)) {
+           || ip_hdrlen(*pskb) < sizeof(struct iphdr)) {
                if (net_ratelimit())
                        printk("ipt_hook: happy cracking.\n");
                return NF_ACCEPT;
index 98b66ef..9278802 100644 (file)
@@ -7,8 +7,6 @@
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
- *
- * Extended to all five netfilter hooks by Brad Chapman & Harald Welte
  */
 #include <linux/module.h>
 #include <linux/netfilter_ipv4/ip_tables.h>
@@ -17,6 +15,7 @@
 #include <net/sock.h>
 #include <net/route.h>
 #include <linux/ip.h>
+#include <net/ip.h>
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
@@ -130,13 +129,14 @@ ipt_local_hook(unsigned int hook,
                   int (*okfn)(struct sk_buff *))
 {
        unsigned int ret;
+       const struct iphdr *iph;
        u_int8_t tos;
        __be32 saddr, daddr;
        u_int32_t mark;
 
        /* root is playing with raw sockets. */
        if ((*pskb)->len < sizeof(struct iphdr)
-           || (*pskb)->nh.iph->ihl * 4 < sizeof(struct iphdr)) {
+           || ip_hdrlen(*pskb) < sizeof(struct iphdr)) {
                if (net_ratelimit())
                        printk("ipt_hook: happy cracking.\n");
                return NF_ACCEPT;
@@ -144,19 +144,23 @@ ipt_local_hook(unsigned int hook,
 
        /* Save things which could affect route */
        mark = (*pskb)->mark;
-       saddr = (*pskb)->nh.iph->saddr;
-       daddr = (*pskb)->nh.iph->daddr;
-       tos = (*pskb)->nh.iph->tos;
+       iph = ip_hdr(*pskb);
+       saddr = iph->saddr;
+       daddr = iph->daddr;
+       tos = iph->tos;
 
        ret = ipt_do_table(pskb, hook, in, out, &packet_mangler);
        /* Reroute for ANY change. */
-       if (ret != NF_DROP && ret != NF_STOLEN && ret != NF_QUEUE
-           && ((*pskb)->nh.iph->saddr != saddr
-               || (*pskb)->nh.iph->daddr != daddr
-               || (*pskb)->mark != mark
-               || (*pskb)->nh.iph->tos != tos))
-               if (ip_route_me_harder(pskb, RTN_UNSPEC))
-                       ret = NF_DROP;
+       if (ret != NF_DROP && ret != NF_STOLEN && ret != NF_QUEUE) {
+               iph = ip_hdr(*pskb);
+
+               if (iph->saddr != saddr ||
+                   iph->daddr != daddr ||
+                   (*pskb)->mark != mark ||
+                   iph->tos != tos)
+                       if (ip_route_me_harder(pskb, RTN_UNSPEC))
+                               ret = NF_DROP;
+       }
 
        return ret;
 }
index 8f3e92d..0654eaa 100644 (file)
@@ -4,14 +4,6 @@
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
- *
- * 16 Dec 2003: Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
- *     - move L3 protocol dependent part to this file.
- * 23 Mar 2004: Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
- *     - add get_features() to support various size of conntrack
- *       structures.
- *
- * Derived from net/ipv4/netfilter/ip_conntrack_standalone.c
  */
 
 #include <linux/types.h>
@@ -87,7 +79,7 @@ nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user)
        local_bh_enable();
 
        if (skb)
-               ip_send_check(skb->nh.iph);
+               ip_send_check(ip_hdr(skb));
 
        return skb;
 }
@@ -97,16 +89,16 @@ ipv4_prepare(struct sk_buff **pskb, unsigned int hooknum, unsigned int *dataoff,
             u_int8_t *protonum)
 {
        /* Never happen */
-       if ((*pskb)->nh.iph->frag_off & htons(IP_OFFSET)) {
+       if (ip_hdr(*pskb)->frag_off & htons(IP_OFFSET)) {
                if (net_ratelimit()) {
                        printk(KERN_ERR "ipv4_prepare: Frag of proto %u (hook=%u)\n",
-                       (*pskb)->nh.iph->protocol, hooknum);
+                       ip_hdr(*pskb)->protocol, hooknum);
                }
                return -NF_DROP;
        }
 
-       *dataoff = (*pskb)->nh.raw - (*pskb)->data + (*pskb)->nh.iph->ihl*4;
-       *protonum = (*pskb)->nh.iph->protocol;
+       *dataoff = skb_network_offset(*pskb) + ip_hdrlen(*pskb);
+       *protonum = ip_hdr(*pskb)->protocol;
 
        return NF_ACCEPT;
 }
@@ -152,9 +144,8 @@ static unsigned int ipv4_conntrack_help(unsigned int hooknum,
                return NF_ACCEPT;
 
        return help->helper->help(pskb,
-                              (*pskb)->nh.raw - (*pskb)->data
-                                              + (*pskb)->nh.iph->ihl*4,
-                              ct, ctinfo);
+                                 skb_network_offset(*pskb) + ip_hdrlen(*pskb),
+                                 ct, ctinfo);
 }
 
 static unsigned int ipv4_conntrack_defrag(unsigned int hooknum,
@@ -171,7 +162,7 @@ static unsigned int ipv4_conntrack_defrag(unsigned int hooknum,
 #endif
 
        /* Gather fragments. */
-       if ((*pskb)->nh.iph->frag_off & htons(IP_MF|IP_OFFSET)) {
+       if (ip_hdr(*pskb)->frag_off & htons(IP_MF | IP_OFFSET)) {
                *pskb = nf_ct_ipv4_gather_frags(*pskb,
                                                hooknum == NF_IP_PRE_ROUTING ?
                                                IP_DEFRAG_CONNTRACK_IN :
@@ -199,7 +190,7 @@ static unsigned int ipv4_conntrack_local(unsigned int hooknum,
 {
        /* root is playing with raw sockets. */
        if ((*pskb)->len < sizeof(struct iphdr)
-           || (*pskb)->nh.iph->ihl * 4 < sizeof(struct iphdr)) {
+           || ip_hdrlen(*pskb) < sizeof(struct iphdr)) {
                if (net_ratelimit())
                        printk("ipt_hook: happy cracking.\n");
                return NF_ACCEPT;
index 5fd1e53..f4fc657 100644 (file)
@@ -4,11 +4,6 @@
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
- *
- * 16 Dec 2003: Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
- *     - enable working with Layer 3 protocol independent connection tracking.
- *
- * Derived from net/ipv4/netfilter/ip_conntrack_proto_icmp.c
  */
 
 #include <linux/types.h>
@@ -158,7 +153,7 @@ icmp_error_message(struct sk_buff *skb,
        NF_CT_ASSERT(skb->nfct == NULL);
 
        /* Not enough header? */
-       inside = skb_header_pointer(skb, skb->nh.iph->ihl*4, sizeof(_in), &_in);
+       inside = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_in), &_in);
        if (inside == NULL)
                return -NF_ACCEPT;
 
@@ -172,7 +167,7 @@ icmp_error_message(struct sk_buff *skb,
        /* rcu_read_lock()ed by nf_hook_slow */
        innerproto = __nf_ct_l4proto_find(PF_INET, inside->ip.protocol);
 
-       dataoff = skb->nh.iph->ihl*4 + sizeof(inside->icmp);
+       dataoff = ip_hdrlen(skb) + sizeof(inside->icmp);
        /* Are they talking about one of our connections? */
        if (!nf_ct_get_tuple(skb, dataoff, dataoff + inside->ip.ihl*4, PF_INET,
                             inside->ip.protocol, &origtuple,
@@ -227,7 +222,7 @@ icmp_error(struct sk_buff *skb, unsigned int dataoff,
        struct icmphdr _ih, *icmph;
 
        /* Not enough header? */
-       icmph = skb_header_pointer(skb, skb->nh.iph->ihl*4, sizeof(_ih), &_ih);
+       icmph = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_ih), &_ih);
        if (icmph == NULL) {
                if (LOG_INVALID(IPPROTO_ICMP))
                        nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL,
index 452e9d3..ea02f00 100644 (file)
@@ -431,7 +431,7 @@ int nf_nat_icmp_reply_translation(struct nf_conn *ct,
        } *inside;
        struct nf_conntrack_l4proto *l4proto;
        struct nf_conntrack_tuple inner, target;
-       int hdrlen = (*pskb)->nh.iph->ihl * 4;
+       int hdrlen = ip_hdrlen(*pskb);
        enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
        unsigned long statusbit;
        enum nf_nat_manip_type manip = HOOK2MANIP(hooknum);
@@ -439,7 +439,7 @@ int nf_nat_icmp_reply_translation(struct nf_conn *ct,
        if (!skb_make_writable(pskb, hdrlen + sizeof(*inside)))
                return 0;
 
-       inside = (void *)(*pskb)->data + (*pskb)->nh.iph->ihl*4;
+       inside = (void *)(*pskb)->data + ip_hdrlen(*pskb);
 
        /* We're actually going to mangle it beyond trivial checksum
           adjustment, so make sure the current checksum is correct. */
@@ -469,9 +469,9 @@ int nf_nat_icmp_reply_translation(struct nf_conn *ct,
        l4proto = __nf_ct_l4proto_find(PF_INET, inside->ip.protocol);
 
        if (!nf_ct_get_tuple(*pskb,
-                            (*pskb)->nh.iph->ihl*4 + sizeof(struct icmphdr),
-                            (*pskb)->nh.iph->ihl*4 +
-                            sizeof(struct icmphdr) + inside->ip.ihl*4,
+                            ip_hdrlen(*pskb) + sizeof(struct icmphdr),
+                            (ip_hdrlen(*pskb) +
+                             sizeof(struct icmphdr) + inside->ip.ihl * 4),
                             (u_int16_t)AF_INET,
                             inside->ip.protocol,
                             &inner, l3proto, l4proto))
@@ -483,14 +483,14 @@ int nf_nat_icmp_reply_translation(struct nf_conn *ct,
           packet: PREROUTING (DST manip), routing produces ICMP, goes
           through POSTROUTING (which must correct the DST manip). */
        if (!manip_pkt(inside->ip.protocol, pskb,
-                      (*pskb)->nh.iph->ihl*4 + sizeof(inside->icmp),
+                      ip_hdrlen(*pskb) + sizeof(inside->icmp),
                       &ct->tuplehash[!dir].tuple,
                       !manip))
                return 0;
 
        if ((*pskb)->ip_summed != CHECKSUM_PARTIAL) {
                /* Reloading "inside" here since manip_pkt inner. */
-               inside = (void *)(*pskb)->data + (*pskb)->nh.iph->ihl*4;
+               inside = (void *)(*pskb)->data + ip_hdrlen(*pskb);
                inside->icmp.checksum = 0;
                inside->icmp.checksum =
                        csum_fold(skb_checksum(*pskb, hdrlen,
index 9cbf3f9..fcebc96 100644 (file)
@@ -33,7 +33,7 @@ static int set_addr(struct sk_buff **pskb,
                    unsigned int addroff, __be32 ip, __be16 port)
 {
        enum ip_conntrack_info ctinfo;
-       struct nf_conn *ct = ip_conntrack_get(*pskb, &ctinfo);
+       struct nf_conn *ct = nf_ct_get(*pskb, &ctinfo);
        struct {
                __be32 ip;
                __be16 port;
@@ -44,7 +44,7 @@ static int set_addr(struct sk_buff **pskb,
        buf.port = port;
        addroff += dataoff;
 
-       if ((*pskb)->nh.iph->protocol == IPPROTO_TCP) {
+       if (ip_hdr(*pskb)->protocol == IPPROTO_TCP) {
                if (!nf_nat_mangle_tcp_packet(pskb, ct, ctinfo,
                                              addroff, sizeof(buf),
                                              (char *) &buf, sizeof(buf))) {
@@ -55,11 +55,11 @@ static int set_addr(struct sk_buff **pskb,
                }
 
                /* Relocate data pointer */
-               th = skb_header_pointer(*pskb, (*pskb)->nh.iph->ihl * 4,
+               th = skb_header_pointer(*pskb, ip_hdrlen(*pskb),
                                        sizeof(_tcph), &_tcph);
                if (th == NULL)
                        return -1;
-               *data = (*pskb)->data + (*pskb)->nh.iph->ihl * 4 +
+               *data = (*pskb)->data + ip_hdrlen(*pskb) +
                    th->doff * 4 + dataoff;
        } else {
                if (!nf_nat_mangle_udp_packet(pskb, ct, ctinfo,
@@ -73,8 +73,8 @@ static int set_addr(struct sk_buff **pskb,
                /* nf_nat_mangle_udp_packet uses skb_make_writable() to copy
                 * or pull everything in a linear buffer, so we can safely
                 * use the skb pointers now */
-               *data = (*pskb)->data + (*pskb)->nh.iph->ihl * 4 +
-                   sizeof(struct udphdr);
+               *data = ((*pskb)->data + ip_hdrlen(*pskb) +
+                        sizeof(struct udphdr));
        }
 
        return 0;
@@ -383,7 +383,7 @@ static int nat_h245(struct sk_buff **pskb, struct nf_conn *ct,
 static void ip_nat_q931_expect(struct nf_conn *new,
                               struct nf_conntrack_expect *this)
 {
-       struct ip_nat_range range;
+       struct nf_nat_range range;
 
        if (this->tuple.src.u3.ip != 0) {       /* Only accept calls from GK */
                nf_nat_follow_master(new, this);
index 49a90c3..15b6e5c 100644 (file)
@@ -87,12 +87,13 @@ static void mangle_contents(struct sk_buff *skb,
        unsigned char *data;
 
        BUG_ON(skb_is_nonlinear(skb));
-       data = (unsigned char *)skb->nh.iph + dataoff;
+       data = skb_network_header(skb) + dataoff;
 
        /* move post-replacement */
        memmove(data + match_offset + rep_len,
                data + match_offset + match_len,
-               skb->tail - (data + match_offset + match_len));
+               skb->tail - (skb->network_header + dataoff +
+                            match_offset + match_len));
 
        /* insert data from buffer */
        memcpy(data + match_offset, rep_buffer, rep_len);
@@ -111,8 +112,8 @@ static void mangle_contents(struct sk_buff *skb,
        }
 
        /* fix IP hdr checksum information */
-       skb->nh.iph->tot_len = htons(skb->len);
-       ip_send_check(skb->nh.iph);
+       ip_hdr(skb)->tot_len = htons(skb->len);
+       ip_send_check(ip_hdr(skb));
 }
 
 /* Unusual, but possible case. */
@@ -152,6 +153,7 @@ nf_nat_mangle_tcp_packet(struct sk_buff **pskb,
                         const char *rep_buffer,
                         unsigned int rep_len)
 {
+       struct rtable *rt = (struct rtable *)(*pskb)->dst;
        struct iphdr *iph;
        struct tcphdr *tcph;
        int oldlen, datalen;
@@ -166,7 +168,7 @@ nf_nat_mangle_tcp_packet(struct sk_buff **pskb,
 
        SKB_LINEAR_ASSERT(*pskb);
 
-       iph = (*pskb)->nh.iph;
+       iph = ip_hdr(*pskb);
        tcph = (void *)iph + iph->ihl*4;
 
        oldlen = (*pskb)->len - iph->ihl*4;
@@ -175,11 +177,22 @@ nf_nat_mangle_tcp_packet(struct sk_buff **pskb,
 
        datalen = (*pskb)->len - iph->ihl*4;
        if ((*pskb)->ip_summed != CHECKSUM_PARTIAL) {
-               tcph->check = 0;
-               tcph->check = tcp_v4_check(datalen,
-                                          iph->saddr, iph->daddr,
-                                          csum_partial((char *)tcph,
-                                                       datalen, 0));
+               if (!(rt->rt_flags & RTCF_LOCAL) &&
+                   (*pskb)->dev->features & NETIF_F_ALL_CSUM) {
+                       (*pskb)->ip_summed = CHECKSUM_PARTIAL;
+                       (*pskb)->csum_start = skb_headroom(*pskb) +
+                                             skb_network_offset(*pskb) +
+                                             iph->ihl * 4;
+                       (*pskb)->csum_offset = offsetof(struct tcphdr, check);
+                       tcph->check = ~tcp_v4_check(datalen,
+                                                   iph->saddr, iph->daddr, 0);
+               } else {
+                       tcph->check = 0;
+                       tcph->check = tcp_v4_check(datalen,
+                                                  iph->saddr, iph->daddr,
+                                                  csum_partial((char *)tcph,
+                                                               datalen, 0));
+               }
        } else
                nf_proto_csum_replace2(&tcph->check, *pskb,
                                       htons(oldlen), htons(datalen), 1);
@@ -190,7 +203,7 @@ nf_nat_mangle_tcp_packet(struct sk_buff **pskb,
                                    (int)rep_len - (int)match_len,
                                    ct, ctinfo);
                /* Tell TCP window tracking about seq change */
-               nf_conntrack_tcp_update(*pskb, (*pskb)->nh.iph->ihl*4,
+               nf_conntrack_tcp_update(*pskb, ip_hdrlen(*pskb),
                                        ct, CTINFO2DIR(ctinfo));
        }
        return 1;
@@ -216,12 +229,13 @@ nf_nat_mangle_udp_packet(struct sk_buff **pskb,
                         const char *rep_buffer,
                         unsigned int rep_len)
 {
+       struct rtable *rt = (struct rtable *)(*pskb)->dst;
        struct iphdr *iph;
        struct udphdr *udph;
        int datalen, oldlen;
 
        /* UDP helpers might accidentally mangle the wrong packet */
-       iph = (*pskb)->nh.iph;
+       iph = ip_hdr(*pskb);
        if ((*pskb)->len < iph->ihl*4 + sizeof(*udph) +
                               match_offset + match_len)
                return 0;
@@ -234,7 +248,7 @@ nf_nat_mangle_udp_packet(struct sk_buff **pskb,
            !enlarge_skb(pskb, rep_len - match_len))
                return 0;
 
-       iph = (*pskb)->nh.iph;
+       iph = ip_hdr(*pskb);
        udph = (void *)iph + iph->ihl*4;
 
        oldlen = (*pskb)->len - iph->ihl*4;
@@ -250,13 +264,25 @@ nf_nat_mangle_udp_packet(struct sk_buff **pskb,
                return 1;
 
        if ((*pskb)->ip_summed != CHECKSUM_PARTIAL) {
-               udph->check = 0;
-               udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
-                                               datalen, IPPROTO_UDP,
-                                               csum_partial((char *)udph,
-                                                            datalen, 0));
-               if (!udph->check)
-                       udph->check = CSUM_MANGLED_0;
+               if (!(rt->rt_flags & RTCF_LOCAL) &&
+                   (*pskb)->dev->features & NETIF_F_ALL_CSUM) {
+                       (*pskb)->ip_summed = CHECKSUM_PARTIAL;
+                       (*pskb)->csum_start = skb_headroom(*pskb) +
+                                             skb_network_offset(*pskb) +
+                                             iph->ihl * 4;
+                       (*pskb)->csum_offset = offsetof(struct udphdr, check);
+                       udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
+                                                        datalen, IPPROTO_UDP,
+                                                        0);
+               } else {
+                       udph->check = 0;
+                       udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
+                                                       datalen, IPPROTO_UDP,
+                                                       csum_partial((char *)udph,
+                                                                    datalen, 0));
+                       if (!udph->check)
+                               udph->check = CSUM_MANGLED_0;
+               }
        } else
                nf_proto_csum_replace2(&udph->check, *pskb,
                                       htons(oldlen), htons(datalen), 1);
@@ -318,8 +344,8 @@ nf_nat_sack_adjust(struct sk_buff **pskb,
        unsigned int dir, optoff, optend;
        struct nf_conn_nat *nat = nfct_nat(ct);
 
-       optoff = (*pskb)->nh.iph->ihl*4 + sizeof(struct tcphdr);
-       optend = (*pskb)->nh.iph->ihl*4 + tcph->doff*4;
+       optoff = ip_hdrlen(*pskb) + sizeof(struct tcphdr);
+       optend = ip_hdrlen(*pskb) + tcph->doff * 4;
 
        if (!skb_make_writable(pskb, optend))
                return 0;
@@ -371,10 +397,10 @@ nf_nat_seq_adjust(struct sk_buff **pskb,
        this_way = &nat->info.seq[dir];
        other_way = &nat->info.seq[!dir];
 
-       if (!skb_make_writable(pskb, (*pskb)->nh.iph->ihl*4+sizeof(*tcph)))
+       if (!skb_make_writable(pskb, ip_hdrlen(*pskb) + sizeof(*tcph)))
                return 0;
 
-       tcph = (void *)(*pskb)->data + (*pskb)->nh.iph->ihl*4;
+       tcph = (void *)(*pskb)->data + ip_hdrlen(*pskb);
        if (after(ntohl(tcph->seq), this_way->correction_pos))
                newseq = htonl(ntohl(tcph->seq) + this_way->offset_after);
        else
@@ -399,7 +425,7 @@ nf_nat_seq_adjust(struct sk_buff **pskb,
        if (!nf_nat_sack_adjust(pskb, tcph, ct, ctinfo))
                return 0;
 
-       nf_conntrack_tcp_update(*pskb, (*pskb)->nh.iph->ihl*4, ct, dir);
+       nf_conntrack_tcp_update(*pskb, ip_hdrlen(*pskb), ct, dir);
 
        return 1;
 }
index 7ba341c..a668887 100644 (file)
@@ -53,7 +53,7 @@ static void pptp_nat_expected(struct nf_conn *ct,
        struct nf_conntrack_tuple t;
        struct nf_ct_pptp_master *ct_pptp_info;
        struct nf_nat_pptp *nat_pptp_info;
-       struct ip_nat_range range;
+       struct nf_nat_range range;
 
        ct_pptp_info = &nfct_help(master)->help.ct_pptp_info;
        nat_pptp_info = &nfct_nat(master)->help.nat_pptp_info;
index 147a437..2a28339 100644 (file)
@@ -191,7 +191,7 @@ static unsigned int ipt_dnat_target(struct sk_buff **pskb,
 
        if (hooknum == NF_IP_LOCAL_OUT &&
            mr->range[0].flags & IP_NAT_RANGE_MAP_IPS)
-               warn_if_extra_mangle((*pskb)->nh.iph->daddr,
+               warn_if_extra_mangle(ip_hdr(*pskb)->daddr,
                                     mr->range[0].min_ip);
 
        return nf_nat_setup_info(ct, &mr->range[0], hooknum);
index b12cd7c..bfd88e4 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/module.h>
 #include <linux/skbuff.h>
 #include <linux/ip.h>
+#include <net/ip.h>
 #include <linux/udp.h>
 
 #include <net/netfilter/nf_nat.h>
@@ -92,7 +93,7 @@ static int map_sip_addr(struct sk_buff **pskb, enum ip_conntrack_info ctinfo,
        if (!nf_nat_mangle_udp_packet(pskb, ct, ctinfo,
                                      matchoff, matchlen, addr, addrlen))
                return 0;
-       *dptr = (*pskb)->data + (*pskb)->nh.iph->ihl*4 + sizeof(struct udphdr);
+       *dptr = (*pskb)->data + ip_hdrlen(*pskb) + sizeof(struct udphdr);
        return 1;
 
 }
@@ -106,7 +107,7 @@ static unsigned int ip_nat_sip(struct sk_buff **pskb,
        struct addr_map map;
        int dataoff, datalen;
 
-       dataoff = (*pskb)->nh.iph->ihl*4 + sizeof(struct udphdr);
+       dataoff = ip_hdrlen(*pskb) + sizeof(struct udphdr);
        datalen = (*pskb)->len - dataoff;
        if (datalen < sizeof("SIP/2.0") - 1)
                return NF_DROP;
@@ -155,7 +156,7 @@ static unsigned int mangle_sip_packet(struct sk_buff **pskb,
                return 0;
 
        /* We need to reload this. Thanks Patrick. */
-       *dptr = (*pskb)->data + (*pskb)->nh.iph->ihl*4 + sizeof(struct udphdr);
+       *dptr = (*pskb)->data + ip_hdrlen(*pskb) + sizeof(struct udphdr);
        return 1;
 }
 
@@ -168,7 +169,7 @@ static int mangle_content_len(struct sk_buff **pskb,
        char buffer[sizeof("65536")];
        int bufflen;
 
-       dataoff = (*pskb)->nh.iph->ihl*4 + sizeof(struct udphdr);
+       dataoff = ip_hdrlen(*pskb) + sizeof(struct udphdr);
 
        /* Get actual SDP lenght */
        if (ct_sip_get_info(ct, dptr, (*pskb)->len - dataoff, &matchoff,
@@ -200,7 +201,7 @@ static unsigned int mangle_sdp(struct sk_buff **pskb,
        char buffer[sizeof("nnn.nnn.nnn.nnn")];
        unsigned int dataoff, bufflen;
 
-       dataoff = (*pskb)->nh.iph->ihl*4 + sizeof(struct udphdr);
+       dataoff = ip_hdrlen(*pskb) + sizeof(struct udphdr);
 
        /* Mangle owner and contact info. */
        bufflen = sprintf(buffer, "%u.%u.%u.%u", NIPQUAD(newip));
index ce5c493..6e88505 100644 (file)
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
  *
  * Author: James Morris <jmorris@intercode.com.au>
- *
- * Updates:
- * 2000-08-06: Convert to new helper API (Harald Welte).
- *
  */
 #include <linux/module.h>
 #include <linux/moduleparam.h>
@@ -1194,7 +1190,7 @@ static int snmp_translate(struct nf_conn *ct,
                          enum ip_conntrack_info ctinfo,
                          struct sk_buff **pskb)
 {
-       struct iphdr *iph = (*pskb)->nh.iph;
+       struct iphdr *iph = ip_hdr(*pskb);
        struct udphdr *udph = (struct udphdr *)((__be32 *)iph + iph->ihl);
        u_int16_t udplen = ntohs(udph->len);
        u_int16_t paylen = udplen - sizeof(struct udphdr);
@@ -1235,7 +1231,7 @@ static int help(struct sk_buff **pskb, unsigned int protoff,
 {
        int dir = CTINFO2DIR(ctinfo);
        unsigned int ret;
-       struct iphdr *iph = (*pskb)->nh.iph;
+       struct iphdr *iph = ip_hdr(*pskb);
        struct udphdr *udph = (struct udphdr *)((u_int32_t *)iph + iph->ihl);
 
        /* SNMP replies and originating SNMP traps get mangled */
index 15aa3db..64bbed2 100644 (file)
@@ -86,8 +86,7 @@ nf_nat_fn(unsigned int hooknum,
 
        /* We never see fragments: conntrack defrags on pre-routing
           and local-out, and nf_nat_out protects post-routing. */
-       NF_CT_ASSERT(!((*pskb)->nh.iph->frag_off
-                      & htons(IP_MF|IP_OFFSET)));
+       NF_CT_ASSERT(!(ip_hdr(*pskb)->frag_off & htons(IP_MF | IP_OFFSET)));
 
        ct = nf_ct_get(*pskb, &ctinfo);
        /* Can't track?  It's not due to stress, or conntrack would
@@ -98,11 +97,10 @@ nf_nat_fn(unsigned int hooknum,
                /* Exception: ICMP redirect to new connection (not in
                   hash table yet).  We must not let this through, in
                   case we're doing NAT to the same network. */
-               if ((*pskb)->nh.iph->protocol == IPPROTO_ICMP) {
+               if (ip_hdr(*pskb)->protocol == IPPROTO_ICMP) {
                        struct icmphdr _hdr, *hp;
 
-                       hp = skb_header_pointer(*pskb,
-                                               (*pskb)->nh.iph->ihl*4,
+                       hp = skb_header_pointer(*pskb, ip_hdrlen(*pskb),
                                                sizeof(_hdr), &_hdr);
                        if (hp != NULL &&
                            hp->type == ICMP_REDIRECT)
@@ -122,7 +120,7 @@ nf_nat_fn(unsigned int hooknum,
        switch (ctinfo) {
        case IP_CT_RELATED:
        case IP_CT_RELATED+IP_CT_IS_REPLY:
-               if ((*pskb)->nh.iph->protocol == IPPROTO_ICMP) {
+               if (ip_hdr(*pskb)->protocol == IPPROTO_ICMP) {
                        if (!nf_nat_icmp_reply_translation(ct, ctinfo,
                                                           hooknum, pskb))
                                return NF_DROP;
@@ -177,11 +175,11 @@ nf_nat_in(unsigned int hooknum,
          int (*okfn)(struct sk_buff *))
 {
        unsigned int ret;
-       __be32 daddr = (*pskb)->nh.iph->daddr;
+       __be32 daddr = ip_hdr(*pskb)->daddr;
 
        ret = nf_nat_fn(hooknum, pskb, in, out, okfn);
        if (ret != NF_DROP && ret != NF_STOLEN &&
-           daddr != (*pskb)->nh.iph->daddr) {
+           daddr != ip_hdr(*pskb)->daddr) {
                dst_release((*pskb)->dst);
                (*pskb)->dst = NULL;
        }
@@ -203,7 +201,7 @@ nf_nat_out(unsigned int hooknum,
 
        /* root is playing with raw sockets. */
        if ((*pskb)->len < sizeof(struct iphdr) ||
-           (*pskb)->nh.iph->ihl * 4 < sizeof(struct iphdr))
+           ip_hdrlen(*pskb) < sizeof(struct iphdr))
                return NF_ACCEPT;
 
        ret = nf_nat_fn(hooknum, pskb, in, out, okfn);
@@ -236,7 +234,7 @@ nf_nat_local_fn(unsigned int hooknum,
 
        /* root is playing with raw sockets. */
        if ((*pskb)->len < sizeof(struct iphdr) ||
-           (*pskb)->nh.iph->ihl * 4 < sizeof(struct iphdr))
+           ip_hdrlen(*pskb) < sizeof(struct iphdr))
                return NF_ACCEPT;
 
        ret = nf_nat_fn(hooknum, pskb, in, out, okfn);
index ae68a69..37ab580 100644 (file)
@@ -87,19 +87,6 @@ static const struct file_operations sockstat_seq_fops = {
        .release = single_release,
 };
 
-static unsigned long
-fold_field(void *mib[], int offt)
-{
-       unsigned long res = 0;
-       int i;
-
-       for_each_possible_cpu(i) {
-               res += *(((unsigned long *) per_cpu_ptr(mib[0], i)) + offt);
-               res += *(((unsigned long *) per_cpu_ptr(mib[1], i)) + offt);
-       }
-       return res;
-}
-
 /* snmp items */
 static const struct snmp_mib snmp4_ipstats_list[] = {
        SNMP_MIB_ITEM("InReceives", IPSTATS_MIB_INRECEIVES),
@@ -266,8 +253,8 @@ static int snmp_seq_show(struct seq_file *seq, void *v)
 
        for (i = 0; snmp4_ipstats_list[i].name != NULL; i++)
                seq_printf(seq, " %lu",
-                          fold_field((void **) ip_statistics,
-                                     snmp4_ipstats_list[i].entry));
+                          snmp_fold_field((void **)ip_statistics,
+                                          snmp4_ipstats_list[i].entry));
 
        seq_puts(seq, "\nIcmp:");
        for (i = 0; snmp4_icmp_list[i].name != NULL; i++)
@@ -276,8 +263,8 @@ static int snmp_seq_show(struct seq_file *seq, void *v)
        seq_puts(seq, "\nIcmp:");
        for (i = 0; snmp4_icmp_list[i].name != NULL; i++)
                seq_printf(seq, " %lu",
-                          fold_field((void **) icmp_statistics,
-                                     snmp4_icmp_list[i].entry));
+                          snmp_fold_field((void **)icmp_statistics,
+                                          snmp4_icmp_list[i].entry));
 
        seq_puts(seq, "\nTcp:");
        for (i = 0; snmp4_tcp_list[i].name != NULL; i++)
@@ -288,12 +275,12 @@ static int snmp_seq_show(struct seq_file *seq, void *v)
                /* MaxConn field is signed, RFC 2012 */
                if (snmp4_tcp_list[i].entry == TCP_MIB_MAXCONN)
                        seq_printf(seq, " %ld",
-                                  fold_field((void **) tcp_statistics,
-                                             snmp4_tcp_list[i].entry));
+                                  snmp_fold_field((void **)tcp_statistics,
+                                                  snmp4_tcp_list[i].entry));
                else
                        seq_printf(seq, " %lu",
-                                  fold_field((void **) tcp_statistics,
-                                             snmp4_tcp_list[i].entry));
+                                  snmp_fold_field((void **)tcp_statistics,
+                                                  snmp4_tcp_list[i].entry));
        }
 
        seq_puts(seq, "\nUdp:");
@@ -303,8 +290,8 @@ static int snmp_seq_show(struct seq_file *seq, void *v)
        seq_puts(seq, "\nUdp:");
        for (i = 0; snmp4_udp_list[i].name != NULL; i++)
                seq_printf(seq, " %lu",
-                          fold_field((void **) udp_statistics,
-                                     snmp4_udp_list[i].entry));
+                          snmp_fold_field((void **)udp_statistics,
+                                          snmp4_udp_list[i].entry));
 
        /* the UDP and UDP-Lite MIBs are the same */
        seq_puts(seq, "\nUdpLite:");
@@ -314,8 +301,8 @@ static int snmp_seq_show(struct seq_file *seq, void *v)
        seq_puts(seq, "\nUdpLite:");
        for (i = 0; snmp4_udp_list[i].name != NULL; i++)
                seq_printf(seq, " %lu",
-                          fold_field((void **) udplite_statistics,
-                                     snmp4_udp_list[i].entry)     );
+                          snmp_fold_field((void **)udplite_statistics,
+                                          snmp4_udp_list[i].entry));
 
        seq_putc(seq, '\n');
        return 0;
@@ -348,8 +335,8 @@ static int netstat_seq_show(struct seq_file *seq, void *v)
        seq_puts(seq, "\nTcpExt:");
        for (i = 0; snmp4_net_list[i].name != NULL; i++)
                seq_printf(seq, " %lu",
-                          fold_field((void **) net_statistics,
-                                     snmp4_net_list[i].entry));
+                          snmp_fold_field((void **)net_statistics,
+                                          snmp4_net_list[i].entry));
 
        seq_putc(seq, '\n');
        return 0;
index da70fef..971ab93 100644 (file)
@@ -45,7 +45,7 @@
 #include <net/ipip.h>
 #include <linux/igmp.h>
 
-struct net_protocol *inet_protos[MAX_INET_PROTOS];
+struct net_protocol *inet_protos[MAX_INET_PROTOS] ____cacheline_aligned_in_smp;
 static DEFINE_SPINLOCK(inet_proto_lock);
 
 /*
index 87e9c16..24d7c9f 100644 (file)
@@ -132,7 +132,7 @@ static __inline__ int icmp_filter(struct sock *sk, struct sk_buff *skb)
        if (!pskb_may_pull(skb, sizeof(struct icmphdr)))
                return 1;
 
-       type = skb->h.icmph->type;
+       type = icmp_hdr(skb)->type;
        if (type < 32) {
                __u32 data = raw_sk(sk)->filter.data;
 
@@ -184,8 +184,8 @@ out:
 void raw_err (struct sock *sk, struct sk_buff *skb, u32 info)
 {
        struct inet_sock *inet = inet_sk(sk);
-       int type = skb->h.icmph->type;
-       int code = skb->h.icmph->code;
+       const int type = icmp_hdr(skb)->type;
+       const int code = icmp_hdr(skb)->code;
        int err = 0;
        int harderr = 0;
 
@@ -256,7 +256,7 @@ int raw_rcv(struct sock *sk, struct sk_buff *skb)
        }
        nf_reset(skb);
 
-       skb_push(skb, skb->data - skb->nh.raw);
+       skb_push(skb, skb->data - skb_network_header(skb));
 
        raw_rcv_skb(sk, skb);
        return 0;
@@ -291,11 +291,13 @@ static int raw_send_hdrinc(struct sock *sk, void *from, size_t length,
        skb->priority = sk->sk_priority;
        skb->dst = dst_clone(&rt->u.dst);
 
-       skb->nh.iph = iph = (struct iphdr *)skb_put(skb, length);
+       skb_reset_network_header(skb);
+       iph = ip_hdr(skb);
+       skb_put(skb, length);
 
        skb->ip_summed = CHECKSUM_NONE;
 
-       skb->h.raw = skb->nh.raw;
+       skb->transport_header = skb->network_header;
        err = memcpy_fromiovecend((void *)iph, from, 0, length);
        if (err)
                goto error_fault;
@@ -613,7 +615,7 @@ static int raw_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
        /* Copy the address. */
        if (sin) {
                sin->sin_family = AF_INET;
-               sin->sin_addr.s_addr = skb->nh.iph->saddr;
+               sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
                sin->sin_port = 0;
                memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
        }
@@ -887,7 +889,7 @@ static int raw_seq_show(struct seq_file *seq, void *v)
        return 0;
 }
 
-static struct seq_operations raw_seq_ops = {
+static const struct seq_operations raw_seq_ops = {
        .start = raw_seq_start,
        .next  = raw_seq_next,
        .stop  = raw_seq_stop,
index 37e0d4d..cb76e3c 100644 (file)
@@ -82,7 +82,6 @@
 #include <linux/proc_fs.h>
 #include <linux/init.h>
 #include <linux/skbuff.h>
-#include <linux/rtnetlink.h>
 #include <linux/inetdevice.h>
 #include <linux/igmp.h>
 #include <linux/pkt_sched.h>
 #include <net/xfrm.h>
 #include <net/ip_mp_alg.h>
 #include <net/netevent.h>
+#include <net/rtnetlink.h>
 #ifdef CONFIG_SYSCTL
 #include <linux/sysctl.h>
 #endif
@@ -364,7 +364,7 @@ static int rt_cache_seq_show(struct seq_file *seq, void *v)
        return 0;
 }
 
-static struct seq_operations rt_cache_seq_ops = {
+static const struct seq_operations rt_cache_seq_ops = {
        .start  = rt_cache_seq_start,
        .next   = rt_cache_seq_next,
        .stop   = rt_cache_seq_stop,
@@ -470,7 +470,7 @@ static int rt_cpu_seq_show(struct seq_file *seq, void *v)
        return 0;
 }
 
-static struct seq_operations rt_cpu_seq_ops = {
+static const struct seq_operations rt_cpu_seq_ops = {
        .start  = rt_cpu_seq_start,
        .next   = rt_cpu_seq_next,
        .stop   = rt_cpu_seq_stop,
@@ -1519,7 +1519,7 @@ static void ipv4_link_failure(struct sk_buff *skb)
 static int ip_rt_bug(struct sk_buff *skb)
 {
        printk(KERN_DEBUG "ip_rt_bug: %u.%u.%u.%u -> %u.%u.%u.%u, %s\n",
-               NIPQUAD(skb->nh.iph->saddr), NIPQUAD(skb->nh.iph->daddr),
+               NIPQUAD(ip_hdr(skb)->saddr), NIPQUAD(ip_hdr(skb)->daddr),
                skb->dev ? skb->dev->name : "?");
        kfree_skb(skb);
        return 0;
@@ -1698,9 +1698,9 @@ static void ip_handle_martian_source(struct net_device *dev,
                printk(KERN_WARNING "martian source %u.%u.%u.%u from "
                        "%u.%u.%u.%u, on dev %s\n",
                        NIPQUAD(daddr), NIPQUAD(saddr), dev->name);
-               if (dev->hard_header_len && skb->mac.raw) {
+               if (dev->hard_header_len && skb_mac_header_was_set(skb)) {
                        int i;
-                       unsigned char *p = skb->mac.raw;
+                       const unsigned char *p = skb_mac_header(skb);
                        printk(KERN_WARNING "ll header: ");
                        for (i = 0; i < dev->hard_header_len; i++, p++) {
                                printk("%02x", *p);
@@ -2134,7 +2134,7 @@ int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr,
                rcu_read_lock();
                if ((in_dev = __in_dev_get_rcu(dev)) != NULL) {
                        int our = ip_check_mc(in_dev, daddr, saddr,
-                               skb->nh.iph->protocol);
+                               ip_hdr(skb)->protocol);
                        if (our
 #ifdef CONFIG_IP_MROUTE
                            || (!LOCAL_MCAST(daddr) && IN_DEV_MFORWARD(in_dev))
@@ -2396,7 +2396,7 @@ static int ip_route_output_slow(struct rtable **rp, const struct flowi *oldflp)
 
                /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
                dev_out = ip_dev_find(oldflp->fl4_src);
-               if (dev_out == NULL)
+               if ((dev_out == NULL) && !(sysctl_ip_nonlocal_bind))
                        goto out;
 
                /* I removed check for oif == dev_out->oif here.
@@ -2407,7 +2407,7 @@ static int ip_route_output_slow(struct rtable **rp, const struct flowi *oldflp)
                      of another iface. --ANK
                 */
 
-               if (oldflp->oif == 0
+               if (dev_out && oldflp->oif == 0
                    && (MULTICAST(oldflp->fl4_dst) || oldflp->fl4_dst == htonl(0xFFFFFFFF))) {
                        /* Special hack: user can direct multicasts
                           and limited broadcast via necessary interface
@@ -2683,7 +2683,7 @@ static int rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
                id = rt->peer->ip_id_count;
                if (rt->peer->tcp_ts_stamp) {
                        ts = rt->peer->tcp_ts;
-                       tsage = xtime.tv_sec - rt->peer->tcp_ts_stamp;
+                       tsage = get_seconds() - rt->peer->tcp_ts_stamp;
                }
        }
 
@@ -2721,7 +2721,7 @@ nla_put_failure:
        return -EMSGSIZE;
 }
 
-int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg)
+static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg)
 {
        struct rtmsg *rtm;
        struct nlattr *tb[RTA_MAX+1];
@@ -2747,10 +2747,11 @@ int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg)
        /* Reserve room for dummy headers, this skb can pass
           through good chunk of routing engine.
         */
-       skb->mac.raw = skb->nh.raw = skb->data;
+       skb_reset_mac_header(skb);
+       skb_reset_network_header(skb);
 
        /* Bugfix: need to give ip_route_input enough of an IP header to not gag. */
-       skb->nh.iph->protocol = IPPROTO_ICMP;
+       ip_hdr(skb)->protocol = IPPROTO_ICMP;
        skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr));
 
        src = tb[RTA_SRC] ? nla_get_be32(tb[RTA_SRC]) : 0;
@@ -3193,6 +3194,8 @@ int __init ip_rt_init(void)
        xfrm_init();
        xfrm4_init();
 #endif
+       rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL);
+
        return rc;
 }
 
index 33016cc..2da1be0 100644 (file)
@@ -125,10 +125,11 @@ static __u16 const msstab[] = {
 __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp)
 {
        struct tcp_sock *tp = tcp_sk(sk);
+       const struct iphdr *iph = ip_hdr(skb);
+       const struct tcphdr *th = tcp_hdr(skb);
        int mssind;
        const __u16 mss = *mssp;
 
-
        tp->last_synq_overflow = jiffies;
 
        /* XXX sort msstab[] by probability?  Binary search? */
@@ -138,9 +139,8 @@ __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp)
 
        NET_INC_STATS_BH(LINUX_MIB_SYNCOOKIESSENT);
 
-       return secure_tcp_syn_cookie(skb->nh.iph->saddr, skb->nh.iph->daddr,
-                                    skb->h.th->source, skb->h.th->dest,
-                                    ntohl(skb->h.th->seq),
+       return secure_tcp_syn_cookie(iph->saddr, iph->daddr,
+                                    th->source, th->dest, ntohl(th->seq),
                                     jiffies / (HZ * 60), mssind);
 }
 
@@ -157,14 +157,13 @@ __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp)
  */
 static inline int cookie_check(struct sk_buff *skb, __u32 cookie)
 {
-       __u32 seq;
-       __u32 mssind;
-
-       seq = ntohl(skb->h.th->seq)-1;
-       mssind = check_tcp_syn_cookie(cookie,
-                                     skb->nh.iph->saddr, skb->nh.iph->daddr,
-                                     skb->h.th->source, skb->h.th->dest,
-                                     seq, jiffies / (HZ * 60), COUNTER_TRIES);
+       const struct iphdr *iph = ip_hdr(skb);
+       const struct tcphdr *th = tcp_hdr(skb);
+       __u32 seq = ntohl(th->seq) - 1;
+       __u32 mssind = check_tcp_syn_cookie(cookie, iph->saddr, iph->daddr,
+                                           th->source, th->dest, seq,
+                                           jiffies / (HZ * 60),
+                                           COUNTER_TRIES);
 
        return mssind < NUM_MSS ? msstab[mssind] + 1 : 0;
 }
@@ -191,14 +190,15 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
        struct inet_request_sock *ireq;
        struct tcp_request_sock *treq;
        struct tcp_sock *tp = tcp_sk(sk);
-       __u32 cookie = ntohl(skb->h.th->ack_seq) - 1;
+       const struct tcphdr *th = tcp_hdr(skb);
+       __u32 cookie = ntohl(th->ack_seq) - 1;
        struct sock *ret = sk;
        struct request_sock *req;
        int mss;
        struct rtable *rt;
        __u8 rcv_wscale;
 
-       if (!sysctl_tcp_syncookies || !skb->h.th->ack)
+       if (!sysctl_tcp_syncookies || !th->ack)
                goto out;
 
        if (time_after(jiffies, tp->last_synq_overflow + TCP_TIMEOUT_INIT) ||
@@ -220,12 +220,12 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
        }
        ireq = inet_rsk(req);
        treq = tcp_rsk(req);
-       treq->rcv_isn           = ntohl(skb->h.th->seq) - 1;
+       treq->rcv_isn           = ntohl(th->seq) - 1;
        treq->snt_isn           = cookie;
        req->mss                = mss;
-       ireq->rmt_port          = skb->h.th->source;
-       ireq->loc_addr          = skb->nh.iph->daddr;
-       ireq->rmt_addr          = skb->nh.iph->saddr;
+       ireq->rmt_port          = th->source;
+       ireq->loc_addr          = ip_hdr(skb)->daddr;
+       ireq->rmt_addr          = ip_hdr(skb)->saddr;
        ireq->opt               = NULL;
 
        /* We throwed the options of the initial SYN away, so we hope
@@ -261,8 +261,8 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
                                                .tos = RT_CONN_FLAGS(sk) } },
                                    .proto = IPPROTO_TCP,
                                    .uli_u = { .ports =
-                                              { .sport = skb->h.th->dest,
-                                                .dport = skb->h.th->source } } };
+                                              { .sport = th->dest,
+                                                .dport = th->source } } };
                security_req_classify_flow(req, &fl);
                if (ip_route_output_key(&rt, &fl)) {
                        reqsk_free(req);
index 0aa3047..6817d64 100644 (file)
@@ -646,6 +646,14 @@ ctl_table ipv4_table[] = {
                .mode           = 0644,
                .proc_handler   = &proc_dointvec
        },
+       {
+               .ctl_name       = NET_TCP_FRTO_RESPONSE,
+               .procname       = "tcp_frto_response",
+               .data           = &sysctl_tcp_frto_response,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = &proc_dointvec
+       },
        {
                .ctl_name       = NET_TCP_LOW_LATENCY,
                .procname       = "tcp_low_latency",
@@ -803,6 +811,14 @@ ctl_table ipv4_table[] = {
                .proc_handler   = &proc_allowed_congestion_control,
                .strategy       = &strategy_allowed_congestion_control,
        },
+       {
+               .ctl_name       = NET_TCP_MAX_SSTHRESH,
+               .procname       = "tcp_max_ssthresh",
+               .data           = &sysctl_tcp_max_ssthresh,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = &proc_dointvec,
+       },
        { .ctl_name = 0 }
 };
 
index 3834b10..2cf9a89 100644 (file)
@@ -297,7 +297,7 @@ EXPORT_SYMBOL(tcp_sockets_allocated);
  * All the sk_stream_mem_schedule() is of this nature: accounting
  * is strict, actions are advisory and have some latency.
  */
-int tcp_memory_pressure;
+int tcp_memory_pressure __read_mostly;
 
 EXPORT_SYMBOL(tcp_memory_pressure);
 
@@ -425,7 +425,7 @@ int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
                        /* Subtract 1, if FIN is in queue. */
                        if (answ && !skb_queue_empty(&sk->sk_receive_queue))
                                answ -=
-                      ((struct sk_buff *)sk->sk_receive_queue.prev)->h.th->fin;
+                      tcp_hdr((struct sk_buff *)sk->sk_receive_queue.prev)->fin;
                } else
                        answ = tp->urg_seq - tp->copied_seq;
                release_sock(sk);
@@ -444,7 +444,7 @@ int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
                break;
        default:
                return -ENOIOCTLCMD;
-       };
+       }
 
        return put_user(answ, (int __user *)arg);
 }
@@ -460,9 +460,9 @@ static inline int forced_push(struct tcp_sock *tp)
        return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
 }
 
-static inline void skb_entail(struct sock *sk, struct tcp_sock *tp,
-                             struct sk_buff *skb)
+static inline void skb_entail(struct sock *sk, struct sk_buff *skb)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
        struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
 
        skb->csum    = 0;
@@ -470,10 +470,8 @@ static inline void skb_entail(struct sock *sk, struct tcp_sock *tp,
        tcb->flags   = TCPCB_FLAG_ACK;
        tcb->sacked  = 0;
        skb_header_release(skb);
-       __skb_queue_tail(&sk->sk_write_queue, skb);
+       tcp_add_write_queue_tail(sk, skb);
        sk_charge_skb(sk, skb);
-       if (!sk->sk_send_head)
-               sk->sk_send_head = skb;
        if (tp->nonagle & TCP_NAGLE_PUSH)
                tp->nonagle &= ~TCP_NAGLE_PUSH;
 }
@@ -488,15 +486,17 @@ static inline void tcp_mark_urg(struct tcp_sock *tp, int flags,
        }
 }
 
-static inline void tcp_push(struct sock *sk, struct tcp_sock *tp, int flags,
-                           int mss_now, int nonagle)
+static inline void tcp_push(struct sock *sk, int flags, int mss_now,
+                           int nonagle)
 {
-       if (sk->sk_send_head) {
-               struct sk_buff *skb = sk->sk_write_queue.prev;
+       struct tcp_sock *tp = tcp_sk(sk);
+
+       if (tcp_send_head(sk)) {
+               struct sk_buff *skb = tcp_write_queue_tail(sk);
                if (!(flags & MSG_MORE) || forced_push(tp))
                        tcp_mark_push(tp, skb);
                tcp_mark_urg(tp, flags, skb);
-               __tcp_push_pending_frames(sk, tp, mss_now,
+               __tcp_push_pending_frames(sk, mss_now,
                                          (flags & MSG_MORE) ? TCP_NAGLE_CORK : nonagle);
        }
 }
@@ -526,13 +526,13 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffse
                goto do_error;
 
        while (psize > 0) {
-               struct sk_buff *skb = sk->sk_write_queue.prev;
+               struct sk_buff *skb = tcp_write_queue_tail(sk);
                struct page *page = pages[poffset / PAGE_SIZE];
                int copy, i, can_coalesce;
                int offset = poffset % PAGE_SIZE;
                int size = min_t(size_t, psize, PAGE_SIZE - offset);
 
-               if (!sk->sk_send_head || (copy = size_goal - skb->len) <= 0) {
+               if (!tcp_send_head(sk) || (copy = size_goal - skb->len) <= 0) {
 new_segment:
                        if (!sk_stream_memory_free(sk))
                                goto wait_for_sndbuf;
@@ -542,7 +542,7 @@ new_segment:
                        if (!skb)
                                goto wait_for_memory;
 
-                       skb_entail(sk, tp, skb);
+                       skb_entail(sk, skb);
                        copy = size_goal;
                }
 
@@ -588,8 +588,8 @@ new_segment:
 
                if (forced_push(tp)) {
                        tcp_mark_push(tp, skb);
-                       __tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH);
-               } else if (skb == sk->sk_send_head)
+                       __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
+               } else if (skb == tcp_send_head(sk))
                        tcp_push_one(sk, mss_now);
                continue;
 
@@ -597,7 +597,7 @@ wait_for_sndbuf:
                set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
 wait_for_memory:
                if (copied)
-                       tcp_push(sk, tp, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
+                       tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
 
                if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
                        goto do_error;
@@ -608,7 +608,7 @@ wait_for_memory:
 
 out:
        if (copied)
-               tcp_push(sk, tp, flags, mss_now, tp->nonagle);
+               tcp_push(sk, flags, mss_now, tp->nonagle);
        return copied;
 
 do_error:
@@ -639,8 +639,9 @@ ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset,
 #define TCP_PAGE(sk)   (sk->sk_sndmsg_page)
 #define TCP_OFF(sk)    (sk->sk_sndmsg_off)
 
-static inline int select_size(struct sock *sk, struct tcp_sock *tp)
+static inline int select_size(struct sock *sk)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
        int tmp = tp->mss_cache;
 
        if (sk->sk_route_caps & NETIF_F_SG) {
@@ -704,9 +705,9 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
                while (seglen > 0) {
                        int copy;
 
-                       skb = sk->sk_write_queue.prev;
+                       skb = tcp_write_queue_tail(sk);
 
-                       if (!sk->sk_send_head ||
+                       if (!tcp_send_head(sk) ||
                            (copy = size_goal - skb->len) <= 0) {
 
 new_segment:
@@ -716,7 +717,7 @@ new_segment:
                                if (!sk_stream_memory_free(sk))
                                        goto wait_for_sndbuf;
 
-                               skb = sk_stream_alloc_pskb(sk, select_size(sk, tp),
+                               skb = sk_stream_alloc_pskb(sk, select_size(sk),
                                                           0, sk->sk_allocation);
                                if (!skb)
                                        goto wait_for_memory;
@@ -727,7 +728,7 @@ new_segment:
                                if (sk->sk_route_caps & NETIF_F_ALL_CSUM)
                                        skb->ip_summed = CHECKSUM_PARTIAL;
 
-                               skb_entail(sk, tp, skb);
+                               skb_entail(sk, skb);
                                copy = size_goal;
                        }
 
@@ -832,8 +833,8 @@ new_segment:
 
                        if (forced_push(tp)) {
                                tcp_mark_push(tp, skb);
-                               __tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH);
-                       } else if (skb == sk->sk_send_head)
+                               __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
+                       } else if (skb == tcp_send_head(sk))
                                tcp_push_one(sk, mss_now);
                        continue;
 
@@ -841,7 +842,7 @@ wait_for_sndbuf:
                        set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
 wait_for_memory:
                        if (copied)
-                               tcp_push(sk, tp, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
+                               tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
 
                        if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
                                goto do_error;
@@ -853,16 +854,18 @@ wait_for_memory:
 
 out:
        if (copied)
-               tcp_push(sk, tp, flags, mss_now, tp->nonagle);
+               tcp_push(sk, flags, mss_now, tp->nonagle);
        TCP_CHECK_TIMER(sk);
        release_sock(sk);
        return copied;
 
 do_fault:
        if (!skb->len) {
-               if (sk->sk_send_head == skb)
-                       sk->sk_send_head = NULL;
-               __skb_unlink(skb, &sk->sk_write_queue);
+               tcp_unlink_write_queue(skb, sk);
+               /* It is the one place in all of TCP, except connection
+                * reset, where we can be unlinking the send_head.
+                */
+               tcp_check_send_head(sk, skb);
                sk_stream_free_skb(sk, skb);
        }
 
@@ -1016,9 +1019,9 @@ static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
 
        skb_queue_walk(&sk->sk_receive_queue, skb) {
                offset = seq - TCP_SKB_CB(skb)->seq;
-               if (skb->h.th->syn)
+               if (tcp_hdr(skb)->syn)
                        offset--;
-               if (offset < skb->len || skb->h.th->fin) {
+               if (offset < skb->len || tcp_hdr(skb)->fin) {
                        *off = offset;
                        return skb;
                }
@@ -1070,7 +1073,7 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
                        if (offset != skb->len)
                                break;
                }
-               if (skb->h.th->fin) {
+               if (tcp_hdr(skb)->fin) {
                        sk_eat_skb(sk, skb, 0);
                        ++seq;
                        break;
@@ -1174,11 +1177,11 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
                                break;
                        }
                        offset = *seq - TCP_SKB_CB(skb)->seq;
-                       if (skb->h.th->syn)
+                       if (tcp_hdr(skb)->syn)
                                offset--;
                        if (offset < skb->len)
                                goto found_ok_skb;
-                       if (skb->h.th->fin)
+                       if (tcp_hdr(skb)->fin)
                                goto found_fin_ok;
                        BUG_TRAP(flags & MSG_PEEK);
                        skb = skb->next;
@@ -1389,12 +1392,12 @@ do_prequeue:
 skip_copy:
                if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) {
                        tp->urg_data = 0;
-                       tcp_fast_path_check(sk, tp);
+                       tcp_fast_path_check(sk);
                }
                if (used + offset < skb->len)
                        continue;
 
-               if (skb->h.th->fin)
+               if (tcp_hdr(skb)->fin)
                        goto found_fin_ok;
                if (!(flags & MSG_PEEK)) {
                        sk_eat_skb(sk, skb, copied_early);
@@ -1563,7 +1566,7 @@ void tcp_close(struct sock *sk, long timeout)
         */
        while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
                u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq -
-                         skb->h.th->fin;
+                         tcp_hdr(skb)->fin;
                data_was_unread += len;
                __kfree_skb(skb);
        }
@@ -1732,7 +1735,7 @@ int tcp_disconnect(struct sock *sk, int flags)
 
        tcp_clear_xmit_timers(sk);
        __skb_queue_purge(&sk->sk_receive_queue);
-       sk_stream_writequeue_purge(sk);
+       tcp_write_queue_purge(sk);
        __skb_queue_purge(&tp->out_of_order_queue);
 #ifdef CONFIG_NET_DMA
        __skb_queue_purge(&sk->sk_async_wait_queue);
@@ -1758,7 +1761,7 @@ int tcp_disconnect(struct sock *sk, int flags)
        tcp_set_ca_state(sk, TCP_CA_Open);
        tcp_clear_retrans(tp);
        inet_csk_delack_init(sk);
-       sk->sk_send_head = NULL;
+       tcp_init_send_head(sk);
        tp->rx_opt.saw_tstamp = 0;
        tcp_sack_reset(&tp->rx_opt);
        __sk_dst_reset(sk);
@@ -1830,7 +1833,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
                         * for currently queued segments.
                         */
                        tp->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH;
-                       tcp_push_pending_frames(sk, tp);
+                       tcp_push_pending_frames(sk);
                } else {
                        tp->nonagle &= ~TCP_NAGLE_OFF;
                }
@@ -1854,7 +1857,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
                        tp->nonagle &= ~TCP_NAGLE_CORK;
                        if (tp->nonagle&TCP_NAGLE_OFF)
                                tp->nonagle |= TCP_NAGLE_PUSH;
-                       tcp_push_pending_frames(sk, tp);
+                       tcp_push_pending_frames(sk);
                }
                break;
 
@@ -1954,7 +1957,8 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
        default:
                err = -ENOPROTOOPT;
                break;
-       };
+       }
+
        release_sock(sk);
        return err;
 }
@@ -2124,7 +2128,7 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
                return 0;
        default:
                return -ENOPROTOOPT;
-       };
+       }
 
        if (put_user(len, optlen))
                return -EFAULT;
@@ -2170,7 +2174,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
        if (!pskb_may_pull(skb, sizeof(*th)))
                goto out;
 
-       th = skb->h.th;
+       th = tcp_hdr(skb);
        thlen = th->doff * 4;
        if (thlen < sizeof(*th))
                goto out;
@@ -2210,7 +2214,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
        delta = htonl(oldlen + (thlen + len));
 
        skb = segs;
-       th = skb->h.th;
+       th = tcp_hdr(skb);
        seq = ntohl(th->seq);
 
        do {
@@ -2219,23 +2223,25 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
                th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
                                       (__force u32)delta));
                if (skb->ip_summed != CHECKSUM_PARTIAL)
-                       th->check = csum_fold(csum_partial(skb->h.raw, thlen,
-                                                          skb->csum));
+                       th->check =
+                            csum_fold(csum_partial(skb_transport_header(skb),
+                                                   thlen, skb->csum));
 
                seq += len;
                skb = skb->next;
-               th = skb->h.th;
+               th = tcp_hdr(skb);
 
                th->seq = htonl(seq);
                th->cwr = 0;
        } while (skb->next);
 
-       delta = htonl(oldlen + (skb->tail - skb->h.raw) + skb->data_len);
+       delta = htonl(oldlen + (skb->tail - skb->transport_header) +
+                     skb->data_len);
        th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
                                (__force u32)delta));
        if (skb->ip_summed != CHECKSUM_PARTIAL)
-               th->check = csum_fold(csum_partial(skb->h.raw, thlen,
-                                                  skb->csum));
+               th->check = csum_fold(csum_partial(skb_transport_header(skb),
+                                                  thlen, skb->csum));
 
 out:
        return segs;
@@ -2372,6 +2378,23 @@ void __tcp_put_md5sig_pool(void)
 EXPORT_SYMBOL(__tcp_put_md5sig_pool);
 #endif
 
+void tcp_done(struct sock *sk)
+{
+       if(sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV)
+               TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
+
+       tcp_set_state(sk, TCP_CLOSE);
+       tcp_clear_xmit_timers(sk);
+
+       sk->sk_shutdown = SHUTDOWN_MASK;
+
+       if (!sock_flag(sk, SOCK_DEAD))
+               sk->sk_state_change(sk);
+       else
+               inet_csk_destroy_sock(sk);
+}
+EXPORT_SYMBOL_GPL(tcp_done);
+
 extern void __skb_cb_too_small_for_tcp(int, int);
 extern struct tcp_congestion_ops tcp_reno;
 
index 5730333..281c9f9 100644 (file)
@@ -206,7 +206,7 @@ static void bictcp_state(struct sock *sk, u8 new_state)
 /* Track delayed acknowledgment ratio using sliding window
  * ratio = (15*ratio + sample) / 16
  */
-static void bictcp_acked(struct sock *sk, u32 cnt)
+static void bictcp_acked(struct sock *sk, u32 cnt, ktime_t last)
 {
        const struct inet_connection_sock *icsk = inet_csk(sk);
 
index 5c8caf4..86b2653 100644 (file)
@@ -12,6 +12,8 @@
 #include <linux/list.h>
 #include <net/tcp.h>
 
+int sysctl_tcp_max_ssthresh = 0;
+
 static DEFINE_SPINLOCK(tcp_cong_list_lock);
 static LIST_HEAD(tcp_cong_list);
 
@@ -77,18 +79,19 @@ void tcp_init_congestion_control(struct sock *sk)
        struct inet_connection_sock *icsk = inet_csk(sk);
        struct tcp_congestion_ops *ca;
 
-       if (icsk->icsk_ca_ops != &tcp_init_congestion_ops)
-               return;
+       /* if no choice made yet assign the current value set as default */
+       if (icsk->icsk_ca_ops == &tcp_init_congestion_ops) {
+               rcu_read_lock();
+               list_for_each_entry_rcu(ca, &tcp_cong_list, list) {
+                       if (try_module_get(ca->owner)) {
+                               icsk->icsk_ca_ops = ca;
+                               break;
+                       }
 
-       rcu_read_lock();
-       list_for_each_entry_rcu(ca, &tcp_cong_list, list) {
-               if (try_module_get(ca->owner)) {
-                       icsk->icsk_ca_ops = ca;
-                       break;
+                       /* fallback to next available */
                }
-
+               rcu_read_unlock();
        }
-       rcu_read_unlock();
 
        if (icsk->icsk_ca_ops->init)
                icsk->icsk_ca_ops->init(sk);
@@ -123,7 +126,7 @@ int tcp_set_default_congestion_control(const char *name)
 #endif
 
        if (ca) {
-               ca->non_restricted = 1; /* default is always allowed */
+               ca->flags |= TCP_CONG_NON_RESTRICTED;   /* default is always allowed */
                list_move(&ca->list, &tcp_cong_list);
                ret = 0;
        }
@@ -178,7 +181,7 @@ void tcp_get_allowed_congestion_control(char *buf, size_t maxlen)
        *buf = '\0';
        rcu_read_lock();
        list_for_each_entry_rcu(ca, &tcp_cong_list, list) {
-               if (!ca->non_restricted)
+               if (!(ca->flags & TCP_CONG_NON_RESTRICTED))
                        continue;
                offs += snprintf(buf + offs, maxlen - offs,
                                 "%s%s",
@@ -209,16 +212,16 @@ int tcp_set_allowed_congestion_control(char *val)
                }
        }
 
-       /* pass 2 clear */
+       /* pass 2 clear old values */
        list_for_each_entry_rcu(ca, &tcp_cong_list, list)
-               ca->non_restricted = 0;
+               ca->flags &= ~TCP_CONG_NON_RESTRICTED;
 
        /* pass 3 mark as allowed */
        while ((name = strsep(&val, " ")) && *name) {
                ca = tcp_ca_find(name);
                WARN_ON(!ca);
                if (ca)
-                       ca->non_restricted = 1;
+                       ca->flags |= TCP_CONG_NON_RESTRICTED;
        }
 out:
        spin_unlock(&tcp_cong_list_lock);
@@ -236,6 +239,7 @@ int tcp_set_congestion_control(struct sock *sk, const char *name)
 
        rcu_read_lock();
        ca = tcp_ca_find(name);
+
        /* no change asking for existing value */
        if (ca == icsk->icsk_ca_ops)
                goto out;
@@ -252,7 +256,7 @@ int tcp_set_congestion_control(struct sock *sk, const char *name)
        if (!ca)
                err = -ENOENT;
 
-       else if (!(ca->non_restricted || capable(CAP_NET_ADMIN)))
+       else if (!((ca->flags & TCP_CONG_NON_RESTRICTED) || capable(CAP_NET_ADMIN)))
                err = -EPERM;
 
        else if (!try_module_get(ca->owner))
@@ -261,7 +265,8 @@ int tcp_set_congestion_control(struct sock *sk, const char *name)
        else {
                tcp_cleanup_congestion_control(sk);
                icsk->icsk_ca_ops = ca;
-               if (icsk->icsk_ca_ops->init)
+
+               if (sk->sk_state != TCP_CLOSE && icsk->icsk_ca_ops->init)
                        icsk->icsk_ca_ops->init(sk);
        }
  out:
@@ -271,10 +276,13 @@ int tcp_set_congestion_control(struct sock *sk, const char *name)
 
 
 /*
- * Linear increase during slow start
+ * Slow start (exponential increase) with
+ * RFC3742 Limited Slow Start (fast linear increase) support.
  */
 void tcp_slow_start(struct tcp_sock *tp)
 {
+       int cnt = 0;
+
        if (sysctl_tcp_abc) {
                /* RFC3465: Slow Start
                 * TCP sender SHOULD increase cwnd by the number of
@@ -283,17 +291,25 @@ void tcp_slow_start(struct tcp_sock *tp)
                 */
                if (tp->bytes_acked < tp->mss_cache)
                        return;
-
-               /* We MAY increase by 2 if discovered delayed ack */
-               if (sysctl_tcp_abc > 1 && tp->bytes_acked >= 2*tp->mss_cache) {
-                       if (tp->snd_cwnd < tp->snd_cwnd_clamp)
-                               tp->snd_cwnd++;
-               }
        }
+
+       if (sysctl_tcp_max_ssthresh > 0 &&
+           tp->snd_cwnd > sysctl_tcp_max_ssthresh)
+               cnt += sysctl_tcp_max_ssthresh>>1;
+       else
+               cnt += tp->snd_cwnd;
+
+       /* RFC3465: We MAY increase by 2 if discovered delayed ack */
+       if (sysctl_tcp_abc > 1 && tp->bytes_acked >= 2*tp->mss_cache)
+               cnt <<= 1;
        tp->bytes_acked = 0;
 
-       if (tp->snd_cwnd < tp->snd_cwnd_clamp)
-               tp->snd_cwnd++;
+       tp->snd_cwnd_cnt += cnt;
+       while (tp->snd_cwnd_cnt >= tp->snd_cwnd) {
+               tp->snd_cwnd_cnt -= tp->snd_cwnd;
+               if (tp->snd_cwnd < tp->snd_cwnd_clamp)
+                       tp->snd_cwnd++;
+       }
 }
 EXPORT_SYMBOL_GPL(tcp_slow_start);
 
@@ -355,8 +371,8 @@ u32 tcp_reno_min_cwnd(const struct sock *sk)
 EXPORT_SYMBOL_GPL(tcp_reno_min_cwnd);
 
 struct tcp_congestion_ops tcp_reno = {
+       .flags          = TCP_CONG_NON_RESTRICTED,
        .name           = "reno",
-       .non_restricted = 1,
        .owner          = THIS_MODULE,
        .ssthresh       = tcp_reno_ssthresh,
        .cong_avoid     = tcp_reno_cong_avoid,
index 9a582fb..1422448 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * TCP CUBIC: Binary Increase Congestion control for TCP v2.0
+ * TCP CUBIC: Binary Increase Congestion control for TCP v2.1
  *
  * This is from the implementation of CUBIC TCP in
  * Injong Rhee, Lisong Xu.
@@ -51,8 +51,6 @@ MODULE_PARM_DESC(bic_scale, "scale (scaled by 1024) value for bic function (bic_
 module_param(tcp_friendliness, int, 0644);
 MODULE_PARM_DESC(tcp_friendliness, "turn on/off tcp friendliness");
 
-#include <asm/div64.h>
-
 /* BIC TCP Parameters */
 struct bictcp {
        u32     cnt;            /* increase cwnd by 1 after ACKs */
@@ -93,50 +91,51 @@ static void bictcp_init(struct sock *sk)
                tcp_sk(sk)->snd_ssthresh = initial_ssthresh;
 }
 
-/* 64bit divisor, dividend and result. dynamic precision */
-static inline u_int64_t div64_64(u_int64_t dividend, u_int64_t divisor)
-{
-       u_int32_t d = divisor;
-
-       if (divisor > 0xffffffffULL) {
-               unsigned int shift = fls(divisor >> 32);
-
-               d = divisor >> shift;
-               dividend >>= shift;
-       }
-
-       /* avoid 64 bit division if possible */
-       if (dividend >> 32)
-               do_div(dividend, d);
-       else
-               dividend = (uint32_t) dividend / d;
-
-       return dividend;
-}
-
-/*
- * calculate the cubic root of x using Newton-Raphson
+/* calculate the cubic root of x using a table lookup followed by one
+ * Newton-Raphson iteration.
+ * Avg err ~= 0.195%
  */
 static u32 cubic_root(u64 a)
 {
-       u32 x, x1;
-
-       /* Initial estimate is based on:
-        * cbrt(x) = exp(log(x) / 3)
+       u32 x, b, shift;
+       /*
+        * cbrt(x) MSB values for x MSB values in [0..63].
+        * Precomputed then refined by hand - Willy Tarreau
+        *
+        * For x in [0..63],
+        *   v = cbrt(x << 18) - 1
+        *   cbrt(x) = (v[x] + 10) >> 6
         */
-       x = 1u << (fls64(a)/3);
+       static const u8 v[] = {
+               /* 0x00 */    0,   54,   54,   54,  118,  118,  118,  118,
+               /* 0x08 */  123,  129,  134,  138,  143,  147,  151,  156,
+               /* 0x10 */  157,  161,  164,  168,  170,  173,  176,  179,
+               /* 0x18 */  181,  185,  187,  190,  192,  194,  197,  199,
+               /* 0x20 */  200,  202,  204,  206,  209,  211,  213,  215,
+               /* 0x28 */  217,  219,  221,  222,  224,  225,  227,  229,
+               /* 0x30 */  231,  232,  234,  236,  237,  239,  240,  242,
+               /* 0x38 */  244,  245,  246,  248,  250,  251,  252,  254,
+       };
+
+       b = fls64(a);
+       if (b < 7) {
+               /* a in [0..63] */
+               return ((u32)v[(u32)a] + 35) >> 6;
+       }
+
+       b = ((b * 84) >> 8) - 1;
+       shift = (a >> (b * 3));
+
+       x = ((u32)(((u32)v[shift] + 10) << b)) >> 6;
 
        /*
-        * Iteration based on:
+        * Newton-Raphson iteration
         *                         2
         * x    = ( 2 * x  +  a / x  ) / 3
         *  k+1          k         k
         */
-       do {
-               x1 = x;
-               x = (2 * x + (uint32_t) div64_64(a, x*x)) / 3;
-       } while (abs(x1 - x) > 1);
-
+       x = (2 * x + (u32)div64_64(a, (u64)x * (u64)(x - 1)));
+       x = ((x * 341) >> 10);
        return x;
 }
 
@@ -215,7 +214,9 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
        if (ca->delay_min > 0) {
                /* max increment = Smax * rtt / 0.1  */
                min_cnt = (cwnd * HZ * 8)/(10 * max_increment * ca->delay_min);
-               if (ca->cnt < min_cnt)
+
+               /* use concave growth when the target is above the origin */
+               if (ca->cnt < min_cnt && t >= ca->bic_K)
                        ca->cnt = min_cnt;
        }
 
@@ -333,7 +334,7 @@ static void bictcp_state(struct sock *sk, u8 new_state)
 /* Track delayed acknowledgment ratio using sliding window
  * ratio = (15*ratio + sample) / 16
  */
-static void bictcp_acked(struct sock *sk, u32 cnt)
+static void bictcp_acked(struct sock *sk, u32 cnt, ktime_t last)
 {
        const struct inet_connection_sock *icsk = inet_csk(sk);
 
@@ -401,4 +402,4 @@ module_exit(cubictcp_unregister);
 MODULE_AUTHOR("Sangtae Ha, Stephen Hemminger");
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("CUBIC TCP");
-MODULE_VERSION("2.0");
+MODULE_VERSION("2.1");
index 1020eb4..4ba4a7a 100644 (file)
@@ -98,7 +98,7 @@ static inline void measure_rtt(struct sock *sk)
        }
 }
 
-static void measure_achieved_throughput(struct sock *sk, u32 pkts_acked)
+static void measure_achieved_throughput(struct sock *sk, u32 pkts_acked, ktime_t last)
 {
        const struct inet_connection_sock *icsk = inet_csk(sk);
        const struct tcp_sock *tp = tcp_sk(sk);
index 59e691d..e5be351 100644 (file)
@@ -144,7 +144,7 @@ static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 rtt,
        ca->snd_cwnd_cents += odd;
 
        /* check when fractions goes >=128 and increase cwnd by 1. */
-       while(ca->snd_cwnd_cents >= 128) {
+       while (ca->snd_cwnd_cents >= 128) {
                tp->snd_cwnd++;
                ca->snd_cwnd_cents -= 128;
                tp->snd_cwnd_cnt = 0;
diff --git a/net/ipv4/tcp_illinois.c b/net/ipv4/tcp_illinois.c
new file mode 100644 (file)
index 0000000..4adc47c
--- /dev/null
@@ -0,0 +1,356 @@
+/*
+ * TCP Illinois congestion control.
+ * Home page:
+ *     http://www.ews.uiuc.edu/~shaoliu/tcpillinois/index.html
+ *
+ * The algorithm is described in:
+ * "TCP-Illinois: A Loss and Delay-Based Congestion Control Algorithm
+ *  for High-Speed Networks"
+ * http://www.ews.uiuc.edu/~shaoliu/papersandslides/liubassri06perf.pdf
+ *
+ * Implemented from description in paper and ns-2 simulation.
+ * Copyright (C) 2007 Stephen Hemminger <shemminger@linux-foundation.org>
+ */
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/inet_diag.h>
+#include <asm/div64.h>
+#include <net/tcp.h>
+
+#define ALPHA_SHIFT    7
+#define ALPHA_SCALE    (1u<<ALPHA_SHIFT)
+#define ALPHA_MIN      ((3*ALPHA_SCALE)/10)    /* ~0.3 */
+#define ALPHA_MAX      (10*ALPHA_SCALE)        /* 10.0 */
+#define ALPHA_BASE     ALPHA_SCALE             /* 1.0 */
+#define U32_MAX                ((u32)~0U)
+#define RTT_MAX                (U32_MAX / ALPHA_MAX)   /* 3.3 secs */
+
+#define BETA_SHIFT     6
+#define BETA_SCALE     (1u<<BETA_SHIFT)
+#define BETA_MIN       (BETA_SCALE/8)          /* 0.125 */
+#define BETA_MAX       (BETA_SCALE/2)          /* 0.5 */
+#define BETA_BASE      BETA_MAX
+
+static int win_thresh __read_mostly = 15;
+module_param(win_thresh, int, 0);
+MODULE_PARM_DESC(win_thresh, "Window threshold for starting adaptive sizing");
+
+static int theta __read_mostly = 5;
+module_param(theta, int, 0);
+MODULE_PARM_DESC(theta, "# of fast RTT's before full growth");
+
+/* TCP Illinois Parameters */
+struct illinois {
+       u64     sum_rtt;        /* sum of rtt's measured within last rtt */
+       u16     cnt_rtt;        /* # of rtts measured within last rtt */
+       u32     base_rtt;       /* min of all rtt in usec */
+       u32     max_rtt;        /* max of all rtt in usec */
+       u32     end_seq;        /* right edge of current RTT */
+       u32     alpha;          /* Additive increase */
+       u32     beta;           /* Muliplicative decrease */
+       u16     acked;          /* # packets acked by current ACK */
+       u8      rtt_above;      /* average rtt has gone above threshold */
+       u8      rtt_low;        /* # of rtts measurements below threshold */
+};
+
+static void rtt_reset(struct sock *sk)
+{
+       struct tcp_sock *tp = tcp_sk(sk);
+       struct illinois *ca = inet_csk_ca(sk);
+
+       ca->end_seq = tp->snd_nxt;
+       ca->cnt_rtt = 0;
+       ca->sum_rtt = 0;
+
+       /* TODO: age max_rtt? */
+}
+
+static void tcp_illinois_init(struct sock *sk)
+{
+       struct illinois *ca = inet_csk_ca(sk);
+
+       ca->alpha = ALPHA_MAX;
+       ca->beta = BETA_BASE;
+       ca->base_rtt = 0x7fffffff;
+       ca->max_rtt = 0;
+
+       ca->acked = 0;
+       ca->rtt_low = 0;
+       ca->rtt_above = 0;
+
+       rtt_reset(sk);
+}
+
+/* Measure RTT for each ack. */
+static void tcp_illinois_acked(struct sock *sk, u32 pkts_acked, ktime_t last)
+{
+       struct illinois *ca = inet_csk_ca(sk);
+       u32 rtt;
+
+       ca->acked = pkts_acked;
+
+       rtt = ktime_to_us(net_timedelta(last));
+
+       /* ignore bogus values, this prevents wraparound in alpha math */
+       if (rtt > RTT_MAX)
+               rtt = RTT_MAX;
+
+       /* keep track of minimum RTT seen so far */
+       if (ca->base_rtt > rtt)
+               ca->base_rtt = rtt;
+
+       /* and max */
+       if (ca->max_rtt < rtt)
+               ca->max_rtt = rtt;
+
+       ++ca->cnt_rtt;
+       ca->sum_rtt += rtt;
+}
+
+/* Maximum queuing delay */
+static inline u32 max_delay(const struct illinois *ca)
+{
+       return ca->max_rtt - ca->base_rtt;
+}
+
+/* Average queuing delay */
+static inline u32 avg_delay(const struct illinois *ca)
+{
+       u64 t = ca->sum_rtt;
+
+       do_div(t, ca->cnt_rtt);
+       return t - ca->base_rtt;
+}
+
+/*
+ * Compute value of alpha used for additive increase.
+ * If small window then use 1.0, equivalent to Reno.
+ *
+ * For larger windows, adjust based on average delay.
+ * A. If average delay is at minimum (we are uncongested),
+ *    then use large alpha (10.0) to increase faster.
+ * B. If average delay is at maximum (getting congested)
+ *    then use small alpha (0.3)
+ *
+ * The result is a convex window growth curve.
+ */
+static u32 alpha(struct illinois *ca, u32 da, u32 dm)
+{
+       u32 d1 = dm / 100;      /* Low threshold */
+
+       if (da <= d1) {
+               /* If never got out of low delay zone, then use max */
+               if (!ca->rtt_above)
+                       return ALPHA_MAX;
+
+               /* Wait for 5 good RTT's before allowing alpha to go alpha max.
+                * This prevents one good RTT from causing sudden window increase.
+                */
+               if (++ca->rtt_low < theta)
+                       return ca->alpha;
+
+               ca->rtt_low = 0;
+               ca->rtt_above = 0;
+               return ALPHA_MAX;
+       }
+
+       ca->rtt_above = 1;
+
+       /*
+        * Based on:
+        *
+        *      (dm - d1) amin amax
+        * k1 = -------------------
+        *         amax - amin
+        *
+        *       (dm - d1) amin
+        * k2 = ----------------  - d1
+        *        amax - amin
+        *
+        *             k1
+        * alpha = ----------
+        *          k2 + da
+        */
+
+       dm -= d1;
+       da -= d1;
+       return (dm * ALPHA_MAX) /
+               (dm + (da  * (ALPHA_MAX - ALPHA_MIN)) / ALPHA_MIN);
+}
+
+/*
+ * Beta used for multiplicative decrease.
+ * For small window sizes returns same value as Reno (0.5)
+ *
+ * If delay is small (10% of max) then beta = 1/8
+ * If delay is up to 80% of max then beta = 1/2
+ * In between is a linear function
+ */
+static u32 beta(u32 da, u32 dm)
+{
+       u32 d2, d3;
+
+       d2 = dm / 10;
+       if (da <= d2)
+               return BETA_MIN;
+
+       d3 = (8 * dm) / 10;
+       if (da >= d3 || d3 <= d2)
+               return BETA_MAX;
+
+       /*
+        * Based on:
+        *
+        *       bmin d3 - bmax d2
+        * k3 = -------------------
+        *         d3 - d2
+        *
+        *       bmax - bmin
+        * k4 = -------------
+        *         d3 - d2
+        *
+        * b = k3 + k4 da
+        */
+       return (BETA_MIN * d3 - BETA_MAX * d2 + (BETA_MAX - BETA_MIN) * da)
+               / (d3 - d2);
+}
+
+/* Update alpha and beta values once per RTT */
+static void update_params(struct sock *sk)
+{
+       struct tcp_sock *tp = tcp_sk(sk);
+       struct illinois *ca = inet_csk_ca(sk);
+
+       if (tp->snd_cwnd < win_thresh) {
+               ca->alpha = ALPHA_BASE;
+               ca->beta = BETA_BASE;
+       } else if (ca->cnt_rtt > 0) {
+               u32 dm = max_delay(ca);
+               u32 da = avg_delay(ca);
+
+               ca->alpha = alpha(ca, da, dm);
+               ca->beta = beta(da, dm);
+       }
+
+       rtt_reset(sk);
+}
+
+/*
+ * In case of loss, reset to default values
+ */
+static void tcp_illinois_state(struct sock *sk, u8 new_state)
+{
+       struct illinois *ca = inet_csk_ca(sk);
+
+       if (new_state == TCP_CA_Loss) {
+               ca->alpha = ALPHA_BASE;
+               ca->beta = BETA_BASE;
+               ca->rtt_low = 0;
+               ca->rtt_above = 0;
+               rtt_reset(sk);
+       }
+}
+
+/*
+ * Increase window in response to successful acknowledgment.
+ */
+static void tcp_illinois_cong_avoid(struct sock *sk, u32 ack, u32 rtt,
+                                   u32 in_flight, int flag)
+{
+       struct tcp_sock *tp = tcp_sk(sk);
+       struct illinois *ca = inet_csk_ca(sk);
+
+       if (after(ack, ca->end_seq))
+               update_params(sk);
+
+       /* RFC2861 only increase cwnd if fully utilized */
+       if (!tcp_is_cwnd_limited(sk, in_flight))
+               return;
+
+       /* In slow start */
+       if (tp->snd_cwnd <= tp->snd_ssthresh)
+               tcp_slow_start(tp);
+
+       else {
+               u32 delta;
+
+               /* snd_cwnd_cnt is # of packets since last cwnd increment */
+               tp->snd_cwnd_cnt += ca->acked;
+               ca->acked = 1;
+
+               /* This is close approximation of:
+                * tp->snd_cwnd += alpha/tp->snd_cwnd
+               */
+               delta = (tp->snd_cwnd_cnt * ca->alpha) >> ALPHA_SHIFT;
+               if (delta >= tp->snd_cwnd) {
+                       tp->snd_cwnd = min(tp->snd_cwnd + delta / tp->snd_cwnd,
+                                          (u32) tp->snd_cwnd_clamp);
+                       tp->snd_cwnd_cnt = 0;
+               }
+       }
+}
+
+static u32 tcp_illinois_ssthresh(struct sock *sk)
+{
+       struct tcp_sock *tp = tcp_sk(sk);
+       struct illinois *ca = inet_csk_ca(sk);
+
+       /* Multiplicative decrease */
+       return max((tp->snd_cwnd * ca->beta) >> BETA_SHIFT, 2U);
+}
+
+
+/* Extract info for Tcp socket info provided via netlink. */
+static void tcp_illinois_info(struct sock *sk, u32 ext,
+                             struct sk_buff *skb)
+{
+       const struct illinois *ca = inet_csk_ca(sk);
+
+       if (ext & (1 << (INET_DIAG_VEGASINFO - 1))) {
+               struct tcpvegas_info info = {
+                       .tcpv_enabled = 1,
+                       .tcpv_rttcnt = ca->cnt_rtt,
+                       .tcpv_minrtt = ca->base_rtt,
+               };
+               u64 t = ca->sum_rtt;
+
+               do_div(t, ca->cnt_rtt);
+               info.tcpv_rtt = t;
+
+               nla_put(skb, INET_DIAG_VEGASINFO, sizeof(info), &info);
+       }
+}
+
+static struct tcp_congestion_ops tcp_illinois = {
+       .flags          = TCP_CONG_RTT_STAMP,
+       .init           = tcp_illinois_init,
+       .ssthresh       = tcp_illinois_ssthresh,
+       .min_cwnd       = tcp_reno_min_cwnd,
+       .cong_avoid     = tcp_illinois_cong_avoid,
+       .set_state      = tcp_illinois_state,
+       .get_info       = tcp_illinois_info,
+       .pkts_acked     = tcp_illinois_acked,
+
+       .owner          = THIS_MODULE,
+       .name           = "illinois",
+};
+
+static int __init tcp_illinois_register(void)
+{
+       BUILD_BUG_ON(sizeof(struct illinois) > ICSK_CA_PRIV_SIZE);
+       return tcp_register_congestion_control(&tcp_illinois);
+}
+
+static void __exit tcp_illinois_unregister(void)
+{
+       tcp_unregister_congestion_control(&tcp_illinois);
+}
+
+module_init(tcp_illinois_register);
+module_exit(tcp_illinois_unregister);
+
+MODULE_AUTHOR("Stephen Hemminger, Shao Liu");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("TCP Illinois");
+MODULE_VERSION("1.0");
index 1a14191..051f0f8 100644 (file)
@@ -86,6 +86,7 @@ int sysctl_tcp_stdurg __read_mostly;
 int sysctl_tcp_rfc1337 __read_mostly;
 int sysctl_tcp_max_orphans __read_mostly = NR_FILE;
 int sysctl_tcp_frto __read_mostly;
+int sysctl_tcp_frto_response __read_mostly;
 int sysctl_tcp_nometrics_save __read_mostly;
 
 int sysctl_tcp_moderate_rcvbuf __read_mostly = 1;
@@ -100,6 +101,7 @@ int sysctl_tcp_abc __read_mostly;
 #define FLAG_ECE               0x40 /* ECE in this ACK                         */
 #define FLAG_DATA_LOST         0x80 /* SACK detected data lossage.             */
 #define FLAG_SLOWPATH          0x100 /* Do not skip RFC checks for window update.*/
+#define FLAG_ONLY_ORIG_SACKED  0x200 /* SACKs only non-rexmit sent before RTO */
 
 #define FLAG_ACKED             (FLAG_DATA_ACKED|FLAG_SYN_ACKED)
 #define FLAG_NOT_DUP           (FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED)
@@ -110,6 +112,8 @@ int sysctl_tcp_abc __read_mostly;
 #define IsFack(tp) ((tp)->rx_opt.sack_ok & 2)
 #define IsDSack(tp) ((tp)->rx_opt.sack_ok & 4)
 
+#define IsSackFrto() (sysctl_tcp_frto == 0x2)
+
 #define TCP_REMNANT (TCP_FLAG_FIN|TCP_FLAG_URG|TCP_FLAG_SYN|TCP_FLAG_PSH)
 
 /* Adapt the MSS value used to make delayed ack decision to the
@@ -136,7 +140,7 @@ static void tcp_measure_rcv_mss(struct sock *sk,
                 *
                 * "len" is invariant segment length, including TCP header.
                 */
-               len += skb->data - skb->h.raw;
+               len += skb->data - skb_transport_header(skb);
                if (len >= TCP_MIN_RCVMSS + sizeof(struct tcphdr) ||
                    /* If PSH is not set, packet should be
                     * full sized, provided peer TCP is not badly broken.
@@ -144,7 +148,7 @@ static void tcp_measure_rcv_mss(struct sock *sk,
                     * to handle super-low mtu links fairly.
                     */
                    (len >= TCP_MIN_MSS + sizeof(struct tcphdr) &&
-                    !(tcp_flag_word(skb->h.th)&TCP_REMNANT))) {
+                    !(tcp_flag_word(tcp_hdr(skb)) & TCP_REMNANT))) {
                        /* Subtract also invariant (if peer is RFC compliant),
                         * tcp header plus fixed timestamp option length.
                         * Resulting "len" is MSS free of SACK jitter.
@@ -231,9 +235,9 @@ static void tcp_fixup_sndbuf(struct sock *sk)
  */
 
 /* Slow part of check#2. */
-static int __tcp_grow_window(const struct sock *sk, struct tcp_sock *tp,
-                            const struct sk_buff *skb)
+static int __tcp_grow_window(const struct sock *sk, const struct sk_buff *skb)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
        /* Optimize this! */
        int truesize = tcp_win_from_space(skb->truesize)/2;
        int window = tcp_win_from_space(sysctl_tcp_rmem[2])/2;
@@ -248,9 +252,11 @@ static int __tcp_grow_window(const struct sock *sk, struct tcp_sock *tp,
        return 0;
 }
 
-static void tcp_grow_window(struct sock *sk, struct tcp_sock *tp,
+static void tcp_grow_window(struct sock *sk,
                            struct sk_buff *skb)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
+
        /* Check #1 */
        if (tp->rcv_ssthresh < tp->window_clamp &&
            (int)tp->rcv_ssthresh < tcp_space(sk) &&
@@ -263,7 +269,7 @@ static void tcp_grow_window(struct sock *sk, struct tcp_sock *tp,
                if (tcp_win_from_space(skb->truesize) <= skb->len)
                        incr = 2*tp->advmss;
                else
-                       incr = __tcp_grow_window(sk, tp, skb);
+                       incr = __tcp_grow_window(sk, skb);
 
                if (incr) {
                        tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr, tp->window_clamp);
@@ -326,8 +332,9 @@ static void tcp_init_buffer_space(struct sock *sk)
 }
 
 /* 5. Recalculate window clamp after socket hit its memory bounds. */
-static void tcp_clamp_window(struct sock *sk, struct tcp_sock *tp)
+static void tcp_clamp_window(struct sock *sk)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
        struct inet_connection_sock *icsk = inet_csk(sk);
 
        icsk->icsk_ack.quick = 0;
@@ -499,8 +506,9 @@ new_measure:
  * each ACK we send, he increments snd_cwnd and transmits more of his
  * queue.  -DaveM
  */
-static void tcp_event_data_recv(struct sock *sk, struct tcp_sock *tp, struct sk_buff *skb)
+static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
        struct inet_connection_sock *icsk = inet_csk(sk);
        u32 now;
 
@@ -541,7 +549,7 @@ static void tcp_event_data_recv(struct sock *sk, struct tcp_sock *tp, struct sk_
        TCP_ECN_check_ce(tp, skb);
 
        if (skb->len >= 128)
-               tcp_grow_window(sk, tp, skb);
+               tcp_grow_window(sk, skb);
 }
 
 /* Called to compute a smoothed rtt estimate. The data fed to this
@@ -574,7 +582,7 @@ static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt)
         * does not matter how to _calculate_ it. Seems, it was trap
         * that VJ failed to avoid. 8)
         */
-       if(m == 0)
+       if (m == 0)
                m = 1;
        if (tp->srtt != 0) {
                m -= (tp->srtt >> 3);   /* m is now error in rtt est */
@@ -759,15 +767,17 @@ __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst)
 }
 
 /* Set slow start threshold and cwnd not falling to slow start */
-void tcp_enter_cwr(struct sock *sk)
+void tcp_enter_cwr(struct sock *sk, const int set_ssthresh)
 {
        struct tcp_sock *tp = tcp_sk(sk);
+       const struct inet_connection_sock *icsk = inet_csk(sk);
 
        tp->prior_ssthresh = 0;
        tp->bytes_acked = 0;
-       if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) {
+       if (icsk->icsk_ca_state < TCP_CA_CWR) {
                tp->undo_marker = 0;
-               tp->snd_ssthresh = inet_csk(sk)->icsk_ca_ops->ssthresh(sk);
+               if (set_ssthresh)
+                       tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
                tp->snd_cwnd = min(tp->snd_cwnd,
                                   tcp_packets_in_flight(tp) + 1U);
                tp->snd_cwnd_cnt = 0;
@@ -934,7 +944,8 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
 {
        const struct inet_connection_sock *icsk = inet_csk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
-       unsigned char *ptr = ack_skb->h.raw + TCP_SKB_CB(ack_skb)->sacked;
+       unsigned char *ptr = (skb_transport_header(ack_skb) +
+                             TCP_SKB_CB(ack_skb)->sacked);
        struct tcp_sack_block_wire *sp = (struct tcp_sack_block_wire *)(ptr+2);
        struct sk_buff *cached_skb;
        int num_sacks = (ptr[1] - TCPOLEN_SACK_BASE)>>3;
@@ -1038,7 +1049,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
        cached_skb = tp->fastpath_skb_hint;
        cached_fack_count = tp->fastpath_cnt_hint;
        if (!cached_skb) {
-               cached_skb = sk->sk_write_queue.next;
+               cached_skb = tcp_write_queue_head(sk);
                cached_fack_count = 0;
        }
 
@@ -1055,10 +1066,13 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
                if (after(end_seq, tp->high_seq))
                        flag |= FLAG_DATA_LOST;
 
-               sk_stream_for_retrans_queue_from(skb, sk) {
+               tcp_for_write_queue_from(skb, sk) {
                        int in_sack, pcount;
                        u8 sacked;
 
+                       if (skb == tcp_send_head(sk))
+                               break;
+
                        cached_skb = skb;
                        cached_fack_count = fack_count;
                        if (i == first_sack_index) {
@@ -1159,6 +1173,18 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
                                                /* clear lost hint */
                                                tp->retransmit_skb_hint = NULL;
                                        }
+                                       /* SACK enhanced F-RTO detection.
+                                        * Set flag if and only if non-rexmitted
+                                        * segments below frto_highmark are
+                                        * SACKed (RFC4138; Appendix B).
+                                        * Clearing correct due to in-order walk
+                                        */
+                                       if (after(end_seq, tp->frto_highmark)) {
+                                               flag &= ~FLAG_ONLY_ORIG_SACKED;
+                                       } else {
+                                               if (!(sacked & TCPCB_RETRANS))
+                                                       flag |= FLAG_ONLY_ORIG_SACKED;
+                                       }
                                }
 
                                TCP_SKB_CB(skb)->sacked |= TCPCB_SACKED_ACKED;
@@ -1195,7 +1221,9 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
        if (lost_retrans && icsk->icsk_ca_state == TCP_CA_Recovery) {
                struct sk_buff *skb;
 
-               sk_stream_for_retrans_queue(skb, sk) {
+               tcp_for_write_queue(skb, sk) {
+                       if (skb == tcp_send_head(sk))
+                               break;
                        if (after(TCP_SKB_CB(skb)->seq, lost_retrans))
                                break;
                        if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
@@ -1224,7 +1252,8 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
 
        tp->left_out = tp->sacked_out + tp->lost_out;
 
-       if ((reord < tp->fackets_out) && icsk->icsk_ca_state != TCP_CA_Loss)
+       if ((reord < tp->fackets_out) && icsk->icsk_ca_state != TCP_CA_Loss &&
+           (!tp->frto_highmark || after(tp->snd_una, tp->frto_highmark)))
                tcp_update_reordering(sk, ((tp->fackets_out + 1) - reord), 0);
 
 #if FASTRETRANS_DEBUG > 0
@@ -1236,9 +1265,54 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
        return flag;
 }
 
-/* RTO occurred, but do not yet enter loss state. Instead, transmit two new
- * segments to see from the next ACKs whether any data was really missing.
- * If the RTO was spurious, new ACKs should arrive.
+/* F-RTO can only be used if these conditions are satisfied:
+ *  - there must be some unsent new data
+ *  - the advertised window should allow sending it
+ *  - TCP has never retransmitted anything other than head (SACK enhanced
+ *    variant from Appendix B of RFC4138 is more robust here)
+ */
+int tcp_use_frto(struct sock *sk)
+{
+       const struct tcp_sock *tp = tcp_sk(sk);
+       struct sk_buff *skb;
+
+       if (!sysctl_tcp_frto || !tcp_send_head(sk) ||
+               after(TCP_SKB_CB(tcp_send_head(sk))->end_seq,
+                     tp->snd_una + tp->snd_wnd))
+               return 0;
+
+       if (IsSackFrto())
+               return 1;
+
+       /* Avoid expensive walking of rexmit queue if possible */
+       if (tp->retrans_out > 1)
+               return 0;
+
+       skb = tcp_write_queue_head(sk);
+       skb = tcp_write_queue_next(sk, skb);    /* Skips head */
+       tcp_for_write_queue_from(skb, sk) {
+               if (skb == tcp_send_head(sk))
+                       break;
+               if (TCP_SKB_CB(skb)->sacked&TCPCB_RETRANS)
+                       return 0;
+               /* Short-circuit when first non-SACKed skb has been checked */
+               if (!(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED))
+                       break;
+       }
+       return 1;
+}
+
+/* RTO occurred, but do not yet enter Loss state. Instead, defer RTO
+ * recovery a bit and use heuristics in tcp_process_frto() to detect if
+ * the RTO was spurious. Only clear SACKED_RETRANS of the head here to
+ * keep retrans_out counting accurate (with SACK F-RTO, other than head
+ * may still have that bit set); TCPCB_LOST and remaining SACKED_RETRANS
+ * bits are handled if the Loss state is really to be entered (in
+ * tcp_enter_frto_loss).
+ *
+ * Do like tcp_enter_loss() would; when RTO expires the second time it
+ * does:
+ *  "Reduce ssthresh if it has not yet been made inside this window."
  */
 void tcp_enter_frto(struct sock *sk)
 {
@@ -1246,39 +1320,69 @@ void tcp_enter_frto(struct sock *sk)
        struct tcp_sock *tp = tcp_sk(sk);
        struct sk_buff *skb;
 
-       tp->frto_counter = 1;
-
-       if (icsk->icsk_ca_state <= TCP_CA_Disorder ||
+       if ((!tp->frto_counter && icsk->icsk_ca_state <= TCP_CA_Disorder) ||
            tp->snd_una == tp->high_seq ||
-           (icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) {
+           ((icsk->icsk_ca_state == TCP_CA_Loss || tp->frto_counter) &&
+            !icsk->icsk_retransmits)) {
                tp->prior_ssthresh = tcp_current_ssthresh(sk);
-               tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
+               /* Our state is too optimistic in ssthresh() call because cwnd
+                * is not reduced until tcp_enter_frto_loss() when previous FRTO
+                * recovery has not yet completed. Pattern would be this: RTO,
+                * Cumulative ACK, RTO (2xRTO for the same segment does not end
+                * up here twice).
+                * RFC4138 should be more specific on what to do, even though
+                * RTO is quite unlikely to occur after the first Cumulative ACK
+                * due to back-off and complexity of triggering events ...
+                */
+               if (tp->frto_counter) {
+                       u32 stored_cwnd;
+                       stored_cwnd = tp->snd_cwnd;
+                       tp->snd_cwnd = 2;
+                       tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
+                       tp->snd_cwnd = stored_cwnd;
+               } else {
+                       tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
+               }
+               /* ... in theory, cong.control module could do "any tricks" in
+                * ssthresh(), which means that ca_state, lost bits and lost_out
+                * counter would have to be faked before the call occurs. We
+                * consider that too expensive, unlikely and hacky, so modules
+                * using these in ssthresh() must deal these incompatibility
+                * issues if they receives CA_EVENT_FRTO and frto_counter != 0
+                */
                tcp_ca_event(sk, CA_EVENT_FRTO);
        }
 
-       /* Have to clear retransmission markers here to keep the bookkeeping
-        * in shape, even though we are not yet in Loss state.
-        * If something was really lost, it is eventually caught up
-        * in tcp_enter_frto_loss.
-        */
-       tp->retrans_out = 0;
        tp->undo_marker = tp->snd_una;
        tp->undo_retrans = 0;
 
-       sk_stream_for_retrans_queue(skb, sk) {
-               TCP_SKB_CB(skb)->sacked &= ~TCPCB_RETRANS;
+       skb = tcp_write_queue_head(sk);
+       if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
+               TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
+               tp->retrans_out -= tcp_skb_pcount(skb);
        }
        tcp_sync_left_out(tp);
 
-       tcp_set_ca_state(sk, TCP_CA_Open);
-       tp->frto_highmark = tp->snd_nxt;
+       /* Earlier loss recovery underway (see RFC4138; Appendix B).
+        * The last condition is necessary at least in tp->frto_counter case.
+        */
+       if (IsSackFrto() && (tp->frto_counter ||
+           ((1 << icsk->icsk_ca_state) & (TCPF_CA_Recovery|TCPF_CA_Loss))) &&
+           after(tp->high_seq, tp->snd_una)) {
+               tp->frto_highmark = tp->high_seq;
+       } else {
+               tp->frto_highmark = tp->snd_nxt;
+       }
+       tcp_set_ca_state(sk, TCP_CA_Disorder);
+       tp->high_seq = tp->snd_nxt;
+       tp->frto_counter = 1;
 }
 
 /* Enter Loss state after F-RTO was applied. Dupack arrived after RTO,
  * which indicates that we should follow the traditional RTO recovery,
  * i.e. mark everything lost and do go-back-N retransmission.
  */
-static void tcp_enter_frto_loss(struct sock *sk)
+static void tcp_enter_frto_loss(struct sock *sk, int allowed_segments, int flag)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        struct sk_buff *skb;
@@ -1287,10 +1391,23 @@ static void tcp_enter_frto_loss(struct sock *sk)
        tp->sacked_out = 0;
        tp->lost_out = 0;
        tp->fackets_out = 0;
+       tp->retrans_out = 0;
 
-       sk_stream_for_retrans_queue(skb, sk) {
+       tcp_for_write_queue(skb, sk) {
+               if (skb == tcp_send_head(sk))
+                       break;
                cnt += tcp_skb_pcount(skb);
-               TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST;
+               /*
+                * Count the retransmission made on RTO correctly (only when
+                * waiting for the first ACK and did not get it)...
+                */
+               if ((tp->frto_counter == 1) && !(flag&FLAG_DATA_ACKED)) {
+                       tp->retrans_out += tcp_skb_pcount(skb);
+                       /* ...enter this if branch just for the first segment */
+                       flag |= FLAG_DATA_ACKED;
+               } else {
+                       TCP_SKB_CB(skb)->sacked &= ~(TCPCB_LOST|TCPCB_SACKED_RETRANS);
+               }
                if (!(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED)) {
 
                        /* Do not mark those segments lost that were
@@ -1308,7 +1425,7 @@ static void tcp_enter_frto_loss(struct sock *sk)
        }
        tcp_sync_left_out(tp);
 
-       tp->snd_cwnd = tp->frto_counter + tcp_packets_in_flight(tp)+1;
+       tp->snd_cwnd = tcp_packets_in_flight(tp) + allowed_segments;
        tp->snd_cwnd_cnt = 0;
        tp->snd_cwnd_stamp = tcp_time_stamp;
        tp->undo_marker = 0;
@@ -1366,7 +1483,9 @@ void tcp_enter_loss(struct sock *sk, int how)
        if (!how)
                tp->undo_marker = tp->snd_una;
 
-       sk_stream_for_retrans_queue(skb, sk) {
+       tcp_for_write_queue(skb, sk) {
+               if (skb == tcp_send_head(sk))
+                       break;
                cnt += tcp_skb_pcount(skb);
                if (TCP_SKB_CB(skb)->sacked&TCPCB_RETRANS)
                        tp->undo_marker = 0;
@@ -1401,14 +1520,14 @@ static int tcp_check_sack_reneging(struct sock *sk)
         * receiver _host_ is heavily congested (or buggy).
         * Do processing similar to RTO timeout.
         */
-       if ((skb = skb_peek(&sk->sk_write_queue)) != NULL &&
+       if ((skb = tcp_write_queue_head(sk)) != NULL &&
            (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) {
                struct inet_connection_sock *icsk = inet_csk(sk);
                NET_INC_STATS_BH(LINUX_MIB_TCPSACKRENEGING);
 
                tcp_enter_loss(sk, 1);
                icsk->icsk_retransmits++;
-               tcp_retransmit_skb(sk, skb_peek(&sk->sk_write_queue));
+               tcp_retransmit_skb(sk, tcp_write_queue_head(sk));
                inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
                                          icsk->icsk_rto, TCP_RTO_MAX);
                return 1;
@@ -1426,10 +1545,12 @@ static inline int tcp_skb_timedout(struct sock *sk, struct sk_buff *skb)
        return (tcp_time_stamp - TCP_SKB_CB(skb)->when > inet_csk(sk)->icsk_rto);
 }
 
-static inline int tcp_head_timedout(struct sock *sk, struct tcp_sock *tp)
+static inline int tcp_head_timedout(struct sock *sk)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
+
        return tp->packets_out &&
-              tcp_skb_timedout(sk, skb_peek(&sk->sk_write_queue));
+              tcp_skb_timedout(sk, tcp_write_queue_head(sk));
 }
 
 /* Linux NewReno/SACK/FACK/ECN state machine.
@@ -1525,10 +1646,15 @@ static inline int tcp_head_timedout(struct sock *sk, struct tcp_sock *tp)
  * Main question: may we further continue forward transmission
  * with the same cwnd?
  */
-static int tcp_time_to_recover(struct sock *sk, struct tcp_sock *tp)
+static int tcp_time_to_recover(struct sock *sk)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
        __u32 packets_out;
 
+       /* Do not perform any recovery during FRTO algorithm */
+       if (tp->frto_counter)
+               return 0;
+
        /* Trick#1: The loss is proven. */
        if (tp->lost_out)
                return 1;
@@ -1540,7 +1666,7 @@ static int tcp_time_to_recover(struct sock *sk, struct tcp_sock *tp)
        /* Trick#3 : when we use RFC2988 timer restart, fast
         * retransmit can be triggered by timeout of queue head.
         */
-       if (tcp_head_timedout(sk, tp))
+       if (tcp_head_timedout(sk))
                return 1;
 
        /* Trick#4: It is still not OK... But will it be useful to delay
@@ -1549,7 +1675,7 @@ static int tcp_time_to_recover(struct sock *sk, struct tcp_sock *tp)
        packets_out = tp->packets_out;
        if (packets_out <= tp->reordering &&
            tp->sacked_out >= max_t(__u32, packets_out/2, sysctl_tcp_reordering) &&
-           !tcp_may_send_now(sk, tp)) {
+           !tcp_may_send_now(sk)) {
                /* We have nothing to send. This connection is limited
                 * either by receiver window or by application.
                 */
@@ -1589,8 +1715,10 @@ static void tcp_add_reno_sack(struct sock *sk)
 
 /* Account for ACK, ACKing some data in Reno Recovery phase. */
 
-static void tcp_remove_reno_sacks(struct sock *sk, struct tcp_sock *tp, int acked)
+static void tcp_remove_reno_sacks(struct sock *sk, int acked)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
+
        if (acked > 0) {
                /* One ACK acked hole. The rest eat duplicate ACKs. */
                if (acked-1 >= tp->sacked_out)
@@ -1609,9 +1737,10 @@ static inline void tcp_reset_reno_sack(struct tcp_sock *tp)
 }
 
 /* Mark head of queue up as lost. */
-static void tcp_mark_head_lost(struct sock *sk, struct tcp_sock *tp,
+static void tcp_mark_head_lost(struct sock *sk,
                               int packets, u32 high_seq)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
        struct sk_buff *skb;
        int cnt;
 
@@ -1620,11 +1749,13 @@ static void tcp_mark_head_lost(struct sock *sk, struct tcp_sock *tp,
                skb = tp->lost_skb_hint;
                cnt = tp->lost_cnt_hint;
        } else {
-               skb = sk->sk_write_queue.next;
+               skb = tcp_write_queue_head(sk);
                cnt = 0;
        }
 
-       sk_stream_for_retrans_queue_from(skb, sk) {
+       tcp_for_write_queue_from(skb, sk) {
+               if (skb == tcp_send_head(sk))
+                       break;
                /* TODO: do this better */
                /* this is not the most efficient way to do this... */
                tp->lost_skb_hint = skb;
@@ -1638,12 +1769,11 @@ static void tcp_mark_head_lost(struct sock *sk, struct tcp_sock *tp,
 
                        /* clear xmit_retransmit_queue hints
                         *  if this is beyond hint */
-                       if(tp->retransmit_skb_hint != NULL &&
-                          before(TCP_SKB_CB(skb)->seq,
-                                 TCP_SKB_CB(tp->retransmit_skb_hint)->seq)) {
-
+                       if (tp->retransmit_skb_hint != NULL &&
+                           before(TCP_SKB_CB(skb)->seq,
+                                  TCP_SKB_CB(tp->retransmit_skb_hint)->seq))
                                tp->retransmit_skb_hint = NULL;
-                       }
+
                }
        }
        tcp_sync_left_out(tp);
@@ -1651,15 +1781,17 @@ static void tcp_mark_head_lost(struct sock *sk, struct tcp_sock *tp,
 
 /* Account newly detected lost packet(s) */
 
-static void tcp_update_scoreboard(struct sock *sk, struct tcp_sock *tp)
+static void tcp_update_scoreboard(struct sock *sk)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
+
        if (IsFack(tp)) {
                int lost = tp->fackets_out - tp->reordering;
                if (lost <= 0)
                        lost = 1;
-               tcp_mark_head_lost(sk, tp, lost, tp->high_seq);
+               tcp_mark_head_lost(sk, lost, tp->high_seq);
        } else {
-               tcp_mark_head_lost(sk, tp, 1, tp->high_seq);
+               tcp_mark_head_lost(sk, 1, tp->high_seq);
        }
 
        /* New heuristics: it is possible only after we switched
@@ -1667,13 +1799,15 @@ static void tcp_update_scoreboard(struct sock *sk, struct tcp_sock *tp)
         * Hence, we can detect timed out packets during fast
         * retransmit without falling to slow start.
         */
-       if (!IsReno(tp) && tcp_head_timedout(sk, tp)) {
+       if (!IsReno(tp) && tcp_head_timedout(sk)) {
                struct sk_buff *skb;
 
                skb = tp->scoreboard_skb_hint ? tp->scoreboard_skb_hint
-                       : sk->sk_write_queue.next;
+                       : tcp_write_queue_head(sk);
 
-               sk_stream_for_retrans_queue_from(skb, sk) {
+               tcp_for_write_queue_from(skb, sk) {
+                       if (skb == tcp_send_head(sk))
+                               break;
                        if (!tcp_skb_timedout(sk, skb))
                                break;
 
@@ -1745,9 +1879,11 @@ static inline int tcp_packet_delayed(struct tcp_sock *tp)
 /* Undo procedures. */
 
 #if FASTRETRANS_DEBUG > 1
-static void DBGUNDO(struct sock *sk, struct tcp_sock *tp, const char *msg)
+static void DBGUNDO(struct sock *sk, const char *msg)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
        struct inet_sock *inet = inet_sk(sk);
+
        printk(KERN_DEBUG "Undo %s %u.%u.%u.%u/%u c%u l%u ss%u/%u p%u\n",
               msg,
               NIPQUAD(inet->daddr), ntohs(inet->dport),
@@ -1793,13 +1929,15 @@ static inline int tcp_may_undo(struct tcp_sock *tp)
 }
 
 /* People celebrate: "We love our President!" */
-static int tcp_try_undo_recovery(struct sock *sk, struct tcp_sock *tp)
+static int tcp_try_undo_recovery(struct sock *sk)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
+
        if (tcp_may_undo(tp)) {
                /* Happy end! We did not retransmit anything
                 * or our original transmission succeeded.
                 */
-               DBGUNDO(sk, tp, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans");
+               DBGUNDO(sk, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans");
                tcp_undo_cwr(sk, 1);
                if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss)
                        NET_INC_STATS_BH(LINUX_MIB_TCPLOSSUNDO);
@@ -1819,10 +1957,12 @@ static int tcp_try_undo_recovery(struct sock *sk, struct tcp_sock *tp)
 }
 
 /* Try to undo cwnd reduction, because D-SACKs acked all retransmitted data */
-static void tcp_try_undo_dsack(struct sock *sk, struct tcp_sock *tp)
+static void tcp_try_undo_dsack(struct sock *sk)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
+
        if (tp->undo_marker && !tp->undo_retrans) {
-               DBGUNDO(sk, tp, "D-SACK");
+               DBGUNDO(sk, "D-SACK");
                tcp_undo_cwr(sk, 1);
                tp->undo_marker = 0;
                NET_INC_STATS_BH(LINUX_MIB_TCPDSACKUNDO);
@@ -1831,9 +1971,9 @@ static void tcp_try_undo_dsack(struct sock *sk, struct tcp_sock *tp)
 
 /* Undo during fast recovery after partial ACK. */
 
-static int tcp_try_undo_partial(struct sock *sk, struct tcp_sock *tp,
-                               int acked)
+static int tcp_try_undo_partial(struct sock *sk, int acked)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
        /* Partial ACK arrived. Force Hoe's retransmit. */
        int failed = IsReno(tp) || tp->fackets_out>tp->reordering;
 
@@ -1846,7 +1986,7 @@ static int tcp_try_undo_partial(struct sock *sk, struct tcp_sock *tp,
 
                tcp_update_reordering(sk, tcp_fackets_out(tp) + acked, 1);
 
-               DBGUNDO(sk, tp, "Hoe");
+               DBGUNDO(sk, "Hoe");
                tcp_undo_cwr(sk, 0);
                NET_INC_STATS_BH(LINUX_MIB_TCPPARTIALUNDO);
 
@@ -1860,17 +2000,21 @@ static int tcp_try_undo_partial(struct sock *sk, struct tcp_sock *tp,
 }
 
 /* Undo during loss recovery after partial ACK. */
-static int tcp_try_undo_loss(struct sock *sk, struct tcp_sock *tp)
+static int tcp_try_undo_loss(struct sock *sk)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
+
        if (tcp_may_undo(tp)) {
                struct sk_buff *skb;
-               sk_stream_for_retrans_queue(skb, sk) {
+               tcp_for_write_queue(skb, sk) {
+                       if (skb == tcp_send_head(sk))
+                               break;
                        TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST;
                }
 
                clear_all_retrans_hints(tp);
 
-               DBGUNDO(sk, tp, "partial loss");
+               DBGUNDO(sk, "partial loss");
                tp->lost_out = 0;
                tp->left_out = tp->sacked_out;
                tcp_undo_cwr(sk, 1);
@@ -1892,15 +2036,17 @@ static inline void tcp_complete_cwr(struct sock *sk)
        tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR);
 }
 
-static void tcp_try_to_open(struct sock *sk, struct tcp_sock *tp, int flag)
+static void tcp_try_to_open(struct sock *sk, int flag)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
+
        tp->left_out = tp->sacked_out;
 
        if (tp->retrans_out == 0)
                tp->retrans_stamp = 0;
 
        if (flag&FLAG_ECE)
-               tcp_enter_cwr(sk);
+               tcp_enter_cwr(sk, 1);
 
        if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) {
                int state = TCP_CA_Open;
@@ -1987,7 +2133,7 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
            before(tp->snd_una, tp->high_seq) &&
            icsk->icsk_ca_state != TCP_CA_Open &&
            tp->fackets_out > tp->reordering) {
-               tcp_mark_head_lost(sk, tp, tp->fackets_out-tp->reordering, tp->high_seq);
+               tcp_mark_head_lost(sk, tp->fackets_out-tp->reordering, tp->high_seq);
                NET_INC_STATS_BH(LINUX_MIB_TCPLOSS);
        }
 
@@ -1997,14 +2143,13 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
        /* E. Check state exit conditions. State can be terminated
         *    when high_seq is ACKed. */
        if (icsk->icsk_ca_state == TCP_CA_Open) {
-               if (!sysctl_tcp_frto)
-                       BUG_TRAP(tp->retrans_out == 0);
+               BUG_TRAP(tp->retrans_out == 0);
                tp->retrans_stamp = 0;
        } else if (!before(tp->snd_una, tp->high_seq)) {
                switch (icsk->icsk_ca_state) {
                case TCP_CA_Loss:
                        icsk->icsk_retransmits = 0;
-                       if (tcp_try_undo_recovery(sk, tp))
+                       if (tcp_try_undo_recovery(sk))
                                return;
                        break;
 
@@ -2018,7 +2163,7 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
                        break;
 
                case TCP_CA_Disorder:
-                       tcp_try_undo_dsack(sk, tp);
+                       tcp_try_undo_dsack(sk);
                        if (!tp->undo_marker ||
                            /* For SACK case do not Open to allow to undo
                             * catching for all duplicate ACKs. */
@@ -2031,7 +2176,7 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
                case TCP_CA_Recovery:
                        if (IsReno(tp))
                                tcp_reset_reno_sack(tp);
-                       if (tcp_try_undo_recovery(sk, tp))
+                       if (tcp_try_undo_recovery(sk))
                                return;
                        tcp_complete_cwr(sk);
                        break;
@@ -2047,14 +2192,14 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
                } else {
                        int acked = prior_packets - tp->packets_out;
                        if (IsReno(tp))
-                               tcp_remove_reno_sacks(sk, tp, acked);
-                       is_dupack = tcp_try_undo_partial(sk, tp, acked);
+                               tcp_remove_reno_sacks(sk, acked);
+                       is_dupack = tcp_try_undo_partial(sk, acked);
                }
                break;
        case TCP_CA_Loss:
                if (flag&FLAG_DATA_ACKED)
                        icsk->icsk_retransmits = 0;
-               if (!tcp_try_undo_loss(sk, tp)) {
+               if (!tcp_try_undo_loss(sk)) {
                        tcp_moderate_cwnd(tp);
                        tcp_xmit_retransmit_queue(sk);
                        return;
@@ -2071,10 +2216,10 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
                }
 
                if (icsk->icsk_ca_state == TCP_CA_Disorder)
-                       tcp_try_undo_dsack(sk, tp);
+                       tcp_try_undo_dsack(sk);
 
-               if (!tcp_time_to_recover(sk, tp)) {
-                       tcp_try_to_open(sk, tp, flag);
+               if (!tcp_time_to_recover(sk)) {
+                       tcp_try_to_open(sk, flag);
                        return;
                }
 
@@ -2113,8 +2258,8 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
                tcp_set_ca_state(sk, TCP_CA_Recovery);
        }
 
-       if (is_dupack || tcp_head_timedout(sk, tp))
-               tcp_update_scoreboard(sk, tp);
+       if (is_dupack || tcp_head_timedout(sk))
+               tcp_update_scoreboard(sk);
        tcp_cwnd_down(sk);
        tcp_xmit_retransmit_queue(sk);
 }
@@ -2190,8 +2335,10 @@ static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 rtt,
  * RFC2988 recommends to restart timer to now+rto.
  */
 
-static void tcp_ack_packets_out(struct sock *sk, struct tcp_sock *tp)
+static void tcp_ack_packets_out(struct sock *sk)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
+
        if (!tp->packets_out) {
                inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS);
        } else {
@@ -2255,14 +2402,6 @@ static int tcp_tso_acked(struct sock *sk, struct sk_buff *skb,
        return acked;
 }
 
-static u32 tcp_usrtt(struct timeval *tv)
-{
-       struct timeval now;
-
-       do_gettimeofday(&now);
-       return (now.tv_sec - tv->tv_sec) * 1000000 + (now.tv_usec - tv->tv_usec);
-}
-
 /* Remove acknowledged frames from the retransmission queue. */
 static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p)
 {
@@ -2273,12 +2412,10 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p)
        int acked = 0;
        __s32 seq_rtt = -1;
        u32 pkts_acked = 0;
-       void (*rtt_sample)(struct sock *sk, u32 usrtt)
-               = icsk->icsk_ca_ops->rtt_sample;
-       struct timeval tv = { .tv_sec = 0, .tv_usec = 0 };
+       ktime_t last_ackt = ktime_set(0,0);
 
-       while ((skb = skb_peek(&sk->sk_write_queue)) &&
-              skb != sk->sk_send_head) {
+       while ((skb = tcp_write_queue_head(sk)) &&
+              skb != tcp_send_head(sk)) {
                struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
                __u8 sacked = scb->sacked;
 
@@ -2318,13 +2455,13 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p)
 
                if (sacked) {
                        if (sacked & TCPCB_RETRANS) {
-                               if(sacked & TCPCB_SACKED_RETRANS)
+                               if (sacked & TCPCB_SACKED_RETRANS)
                                        tp->retrans_out -= tcp_skb_pcount(skb);
                                acked |= FLAG_RETRANS_DATA_ACKED;
                                seq_rtt = -1;
                        } else if (seq_rtt < 0) {
                                seq_rtt = now - scb->when;
-                               skb_get_timestamp(skb, &tv);
+                               last_ackt = skb->tstamp;
                        }
                        if (sacked & TCPCB_SACKED_ACKED)
                                tp->sacked_out -= tcp_skb_pcount(skb);
@@ -2337,23 +2474,24 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p)
                        }
                } else if (seq_rtt < 0) {
                        seq_rtt = now - scb->when;
-                       skb_get_timestamp(skb, &tv);
+                       last_ackt = skb->tstamp;
                }
                tcp_dec_pcount_approx(&tp->fackets_out, skb);
                tcp_packets_out_dec(tp, skb);
-               __skb_unlink(skb, &sk->sk_write_queue);
+               tcp_unlink_write_queue(skb, sk);
                sk_stream_free_skb(sk, skb);
                clear_all_retrans_hints(tp);
        }
 
        if (acked&FLAG_ACKED) {
+               const struct tcp_congestion_ops *ca_ops
+                       = inet_csk(sk)->icsk_ca_ops;
+
                tcp_ack_update_rtt(sk, acked, seq_rtt);
-               tcp_ack_packets_out(sk, tp);
-               if (rtt_sample && !(acked & FLAG_RETRANS_DATA_ACKED))
-                       (*rtt_sample)(sk, tcp_usrtt(&tv));
+               tcp_ack_packets_out(sk);
 
-               if (icsk->icsk_ca_ops->pkts_acked)
-                       icsk->icsk_ca_ops->pkts_acked(sk, pkts_acked);
+               if (ca_ops->pkts_acked)
+                       ca_ops->pkts_acked(sk, pkts_acked, last_ackt);
        }
 
 #if FASTRETRANS_DEBUG > 0
@@ -2390,7 +2528,7 @@ static void tcp_ack_probe(struct sock *sk)
 
        /* Was it a usable window open? */
 
-       if (!after(TCP_SKB_CB(sk->sk_send_head)->end_seq,
+       if (!after(TCP_SKB_CB(tcp_send_head(sk))->end_seq,
                   tp->snd_una + tp->snd_wnd)) {
                icsk->icsk_backoff = 0;
                inet_csk_clear_xmit_timer(sk, ICSK_TIME_PROBE0);
@@ -2433,13 +2571,14 @@ static inline int tcp_may_update_window(const struct tcp_sock *tp, const u32 ack
  * Window update algorithm, described in RFC793/RFC1122 (used in linux-2.2
  * and in FreeBSD. NetBSD's one is even worse.) is wrong.
  */
-static int tcp_ack_update_window(struct sock *sk, struct tcp_sock *tp,
-                                struct sk_buff *skb, u32 ack, u32 ack_seq)
+static int tcp_ack_update_window(struct sock *sk, struct sk_buff *skb, u32 ack,
+                                u32 ack_seq)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
        int flag = 0;
-       u32 nwin = ntohs(skb->h.th->window);
+       u32 nwin = ntohs(tcp_hdr(skb)->window);
 
-       if (likely(!skb->h.th->syn))
+       if (likely(!tcp_hdr(skb)->syn))
                nwin <<= tp->rx_opt.snd_wscale;
 
        if (tcp_may_update_window(tp, ack, ack_seq, nwin)) {
@@ -2453,7 +2592,7 @@ static int tcp_ack_update_window(struct sock *sk, struct tcp_sock *tp,
                         * fast path is recovered for sending TCP.
                         */
                        tp->pred_flags = 0;
-                       tcp_fast_path_check(sk, tp);
+                       tcp_fast_path_check(sk);
 
                        if (nwin > tp->max_window) {
                                tp->max_window = nwin;
@@ -2467,39 +2606,128 @@ static int tcp_ack_update_window(struct sock *sk, struct tcp_sock *tp,
        return flag;
 }
 
-static void tcp_process_frto(struct sock *sk, u32 prior_snd_una)
+/* A very conservative spurious RTO response algorithm: reduce cwnd and
+ * continue in congestion avoidance.
+ */
+static void tcp_conservative_spur_to_response(struct tcp_sock *tp)
+{
+       tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh);
+       tp->snd_cwnd_cnt = 0;
+       tcp_moderate_cwnd(tp);
+}
+
+/* A conservative spurious RTO response algorithm: reduce cwnd using
+ * rate halving and continue in congestion avoidance.
+ */
+static void tcp_ratehalving_spur_to_response(struct sock *sk)
+{
+       tcp_enter_cwr(sk, 0);
+}
+
+static void tcp_undo_spur_to_response(struct sock *sk, int flag)
+{
+       if (flag&FLAG_ECE)
+               tcp_ratehalving_spur_to_response(sk);
+       else
+               tcp_undo_cwr(sk, 1);
+}
+
+/* F-RTO spurious RTO detection algorithm (RFC4138)
+ *
+ * F-RTO affects during two new ACKs following RTO (well, almost, see inline
+ * comments). State (ACK number) is kept in frto_counter. When ACK advances
+ * window (but not to or beyond highest sequence sent before RTO):
+ *   On First ACK,  send two new segments out.
+ *   On Second ACK, RTO was likely spurious. Do spurious response (response
+ *                  algorithm is not part of the F-RTO detection algorithm
+ *                  given in RFC4138 but can be selected separately).
+ * Otherwise (basically on duplicate ACK), RTO was (likely) caused by a loss
+ * and TCP falls back to conventional RTO recovery.
+ *
+ * Rationale: if the RTO was spurious, new ACKs should arrive from the
+ * original window even after we transmit two new data segments.
+ *
+ * SACK version:
+ *   on first step, wait until first cumulative ACK arrives, then move to
+ *   the second step. In second step, the next ACK decides.
+ *
+ * F-RTO is implemented (mainly) in four functions:
+ *   - tcp_use_frto() is used to determine if TCP is can use F-RTO
+ *   - tcp_enter_frto() prepares TCP state on RTO if F-RTO is used, it is
+ *     called when tcp_use_frto() showed green light
+ *   - tcp_process_frto() handles incoming ACKs during F-RTO algorithm
+ *   - tcp_enter_frto_loss() is called if there is not enough evidence
+ *     to prove that the RTO is indeed spurious. It transfers the control
+ *     from F-RTO to the conventional RTO recovery
+ */
+static int tcp_process_frto(struct sock *sk, u32 prior_snd_una, int flag)
 {
        struct tcp_sock *tp = tcp_sk(sk);
 
        tcp_sync_left_out(tp);
 
-       if (tp->snd_una == prior_snd_una ||
-           !before(tp->snd_una, tp->frto_highmark)) {
-               /* RTO was caused by loss, start retransmitting in
-                * go-back-N slow start
-                */
-               tcp_enter_frto_loss(sk);
-               return;
+       /* Duplicate the behavior from Loss state (fastretrans_alert) */
+       if (flag&FLAG_DATA_ACKED)
+               inet_csk(sk)->icsk_retransmits = 0;
+
+       if (!before(tp->snd_una, tp->frto_highmark)) {
+               tcp_enter_frto_loss(sk, tp->frto_counter + 1, flag);
+               return 1;
        }
 
-       if (tp->frto_counter == 1) {
-               /* First ACK after RTO advances the window: allow two new
-                * segments out.
+       if (!IsSackFrto() || IsReno(tp)) {
+               /* RFC4138 shortcoming in step 2; should also have case c):
+                * ACK isn't duplicate nor advances window, e.g., opposite dir
+                * data, winupdate
                 */
-               tp->snd_cwnd = tcp_packets_in_flight(tp) + 2;
+               if ((tp->snd_una == prior_snd_una) && (flag&FLAG_NOT_DUP) &&
+                   !(flag&FLAG_FORWARD_PROGRESS))
+                       return 1;
+
+               if (!(flag&FLAG_DATA_ACKED)) {
+                       tcp_enter_frto_loss(sk, (tp->frto_counter == 1 ? 0 : 3),
+                                           flag);
+                       return 1;
+               }
        } else {
-               /* Also the second ACK after RTO advances the window.
-                * The RTO was likely spurious. Reduce cwnd and continue
-                * in congestion avoidance
-                */
-               tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh);
-               tcp_moderate_cwnd(tp);
+               if (!(flag&FLAG_DATA_ACKED) && (tp->frto_counter == 1)) {
+                       /* Prevent sending of new data. */
+                       tp->snd_cwnd = min(tp->snd_cwnd,
+                                          tcp_packets_in_flight(tp));
+                       return 1;
+               }
+
+               if ((tp->frto_counter == 2) &&
+                   (!(flag&FLAG_FORWARD_PROGRESS) ||
+                    ((flag&FLAG_DATA_SACKED) && !(flag&FLAG_ONLY_ORIG_SACKED)))) {
+                       /* RFC4138 shortcoming (see comment above) */
+                       if (!(flag&FLAG_FORWARD_PROGRESS) && (flag&FLAG_NOT_DUP))
+                               return 1;
+
+                       tcp_enter_frto_loss(sk, 3, flag);
+                       return 1;
+               }
        }
 
-       /* F-RTO affects on two new ACKs following RTO.
-        * At latest on third ACK the TCP behavior is back to normal.
-        */
-       tp->frto_counter = (tp->frto_counter + 1) % 3;
+       if (tp->frto_counter == 1) {
+               tp->snd_cwnd = tcp_packets_in_flight(tp) + 2;
+               tp->frto_counter = 2;
+               return 1;
+       } else /* frto_counter == 2 */ {
+               switch (sysctl_tcp_frto_response) {
+               case 2:
+                       tcp_undo_spur_to_response(sk, flag);
+                       break;
+               case 1:
+                       tcp_conservative_spur_to_response(tp);
+                       break;
+               default:
+                       tcp_ratehalving_spur_to_response(sk);
+                       break;
+               }
+               tp->frto_counter = 0;
+       }
+       return 0;
 }
 
 /* This routine deals with incoming acks, but not outgoing ones. */
@@ -2513,6 +2741,7 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
        u32 prior_in_flight;
        s32 seq_rtt;
        int prior_packets;
+       int frto_cwnd = 0;
 
        /* If the ack is newer than sent or older than previous acks
         * then we can probably ignore it.
@@ -2549,12 +2778,12 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
                else
                        NET_INC_STATS_BH(LINUX_MIB_TCPPUREACKS);
 
-               flag |= tcp_ack_update_window(sk, tp, skb, ack, ack_seq);
+               flag |= tcp_ack_update_window(sk, skb, ack, ack_seq);
 
                if (TCP_SKB_CB(skb)->sacked)
                        flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una);
 
-               if (TCP_ECN_rcv_ecn_echo(tp, skb->h.th))
+               if (TCP_ECN_rcv_ecn_echo(tp, tcp_hdr(skb)))
                        flag |= FLAG_ECE;
 
                tcp_ca_event(sk, CA_EVENT_SLOW_ACK);
@@ -2575,15 +2804,16 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
        flag |= tcp_clean_rtx_queue(sk, &seq_rtt);
 
        if (tp->frto_counter)
-               tcp_process_frto(sk, prior_snd_una);
+               frto_cwnd = tcp_process_frto(sk, prior_snd_una, flag);
 
        if (tcp_ack_is_dubious(sk, flag)) {
                /* Advance CWND, if state allows this. */
-               if ((flag & FLAG_DATA_ACKED) && tcp_may_raise_cwnd(sk, flag))
+               if ((flag & FLAG_DATA_ACKED) && !frto_cwnd &&
+                   tcp_may_raise_cwnd(sk, flag))
                        tcp_cong_avoid(sk, ack,  seq_rtt, prior_in_flight, 0);
                tcp_fastretrans_alert(sk, prior_snd_una, prior_packets, flag);
        } else {
-               if ((flag & FLAG_DATA_ACKED))
+               if ((flag & FLAG_DATA_ACKED) && !frto_cwnd)
                        tcp_cong_avoid(sk, ack, seq_rtt, prior_in_flight, 1);
        }
 
@@ -2599,7 +2829,7 @@ no_queue:
         * being used to time the probes, and is probably far higher than
         * it needs to be for normal retransmission.
         */
-       if (sk->sk_send_head)
+       if (tcp_send_head(sk))
                tcp_ack_probe(sk);
        return 1;
 
@@ -2620,13 +2850,13 @@ uninteresting_ack:
 void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx, int estab)
 {
        unsigned char *ptr;
-       struct tcphdr *th = skb->h.th;
+       struct tcphdr *th = tcp_hdr(skb);
        int length=(th->doff*4)-sizeof(struct tcphdr);
 
        ptr = (unsigned char *)(th + 1);
        opt_rx->saw_tstamp = 0;
 
-       while(length>0) {
+       while (length > 0) {
                int opcode=*ptr++;
                int opsize;
 
@@ -2642,9 +2872,9 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
                                        return;
                                if (opsize > length)
                                        return; /* don't parse partial options */
-                               switch(opcode) {
+                               switch (opcode) {
                                case TCPOPT_MSS:
-                                       if(opsize==TCPOLEN_MSS && th->syn && !estab) {
+                                       if (opsize==TCPOLEN_MSS && th->syn && !estab) {
                                                u16 in_mss = ntohs(get_unaligned((__be16 *)ptr));
                                                if (in_mss) {
                                                        if (opt_rx->user_mss && opt_rx->user_mss < in_mss)
@@ -2654,12 +2884,12 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
                                        }
                                        break;
                                case TCPOPT_WINDOW:
-                                       if(opsize==TCPOLEN_WINDOW && th->syn && !estab)
+                                       if (opsize==TCPOLEN_WINDOW && th->syn && !estab)
                                                if (sysctl_tcp_window_scaling) {
                                                        __u8 snd_wscale = *(__u8 *) ptr;
                                                        opt_rx->wscale_ok = 1;
                                                        if (snd_wscale > 14) {
-                                                               if(net_ratelimit())
+                                                               if (net_ratelimit())
                                                                        printk(KERN_INFO "tcp_parse_options: Illegal window "
                                                                               "scaling value %d >14 received.\n",
                                                                               snd_wscale);
@@ -2669,7 +2899,7 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
                                                }
                                        break;
                                case TCPOPT_TIMESTAMP:
-                                       if(opsize==TCPOLEN_TIMESTAMP) {
+                                       if (opsize==TCPOLEN_TIMESTAMP) {
                                                if ((estab && opt_rx->tstamp_ok) ||
                                                    (!estab && sysctl_tcp_timestamps)) {
                                                        opt_rx->saw_tstamp = 1;
@@ -2679,7 +2909,7 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
                                        }
                                        break;
                                case TCPOPT_SACK_PERM:
-                                       if(opsize==TCPOLEN_SACK_PERM && th->syn && !estab) {
+                                       if (opsize==TCPOLEN_SACK_PERM && th->syn && !estab) {
                                                if (sysctl_tcp_sack) {
                                                        opt_rx->sack_ok = 1;
                                                        tcp_sack_reset(opt_rx);
@@ -2688,7 +2918,7 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
                                        break;
 
                                case TCPOPT_SACK:
-                                       if((opsize >= (TCPOLEN_SACK_BASE + TCPOLEN_SACK_PERBLOCK)) &&
+                                       if ((opsize >= (TCPOLEN_SACK_BASE + TCPOLEN_SACK_PERBLOCK)) &&
                                           !((opsize - TCPOLEN_SACK_BASE) % TCPOLEN_SACK_PERBLOCK) &&
                                           opt_rx->sack_ok) {
                                                TCP_SKB_CB(skb)->sacked = (ptr - 2) - (unsigned char *)th;
@@ -2701,10 +2931,11 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
                                         */
                                        break;
 #endif
-                               };
+                               }
+
                                ptr+=opsize-2;
                                length-=opsize;
-               };
+               }
        }
 }
 
@@ -2737,7 +2968,7 @@ static int tcp_fast_parse_options(struct sk_buff *skb, struct tcphdr *th,
 static inline void tcp_store_ts_recent(struct tcp_sock *tp)
 {
        tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval;
-       tp->rx_opt.ts_recent_stamp = xtime.tv_sec;
+       tp->rx_opt.ts_recent_stamp = get_seconds();
 }
 
 static inline void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq)
@@ -2750,8 +2981,8 @@ static inline void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq)
                 * Not only, also it occurs for expired timestamps.
                 */
 
-               if((s32)(tp->rx_opt.rcv_tsval - tp->rx_opt.ts_recent) >= 0 ||
-                  xtime.tv_sec >= tp->rx_opt.ts_recent_stamp + TCP_PAWS_24DAYS)
+               if ((s32)(tp->rx_opt.rcv_tsval - tp->rx_opt.ts_recent) >= 0 ||
+                  get_seconds() >= tp->rx_opt.ts_recent_stamp + TCP_PAWS_24DAYS)
                        tcp_store_ts_recent(tp);
        }
 }
@@ -2782,7 +3013,7 @@ static inline void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq)
 static int tcp_disordered_ack(const struct sock *sk, const struct sk_buff *skb)
 {
        struct tcp_sock *tp = tcp_sk(sk);
-       struct tcphdr *th = skb->h.th;
+       struct tcphdr *th = tcp_hdr(skb);
        u32 seq = TCP_SKB_CB(skb)->seq;
        u32 ack = TCP_SKB_CB(skb)->ack_seq;
 
@@ -2803,7 +3034,7 @@ static inline int tcp_paws_discard(const struct sock *sk, const struct sk_buff *
 {
        const struct tcp_sock *tp = tcp_sk(sk);
        return ((s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) > TCP_PAWS_WINDOW &&
-               xtime.tv_sec < tp->rx_opt.ts_recent_stamp + TCP_PAWS_24DAYS &&
+               get_seconds() < tp->rx_opt.ts_recent_stamp + TCP_PAWS_24DAYS &&
                !tcp_disordered_ack(sk, skb));
 }
 
@@ -2910,7 +3141,7 @@ static void tcp_fin(struct sk_buff *skb, struct sock *sk, struct tcphdr *th)
                        printk(KERN_ERR "%s: Impossible, sk->sk_state=%d\n",
                               __FUNCTION__, sk->sk_state);
                        break;
-       };
+       }
 
        /* It _is_ possible, that we have something out-of-order _after_ FIN.
         * Probably, we should reset in this case. For now drop them.
@@ -3009,7 +3240,7 @@ static void tcp_sack_maybe_coalesce(struct tcp_sock *tp)
                         */
                        tp->rx_opt.num_sacks--;
                        tp->rx_opt.eff_sacks = min(tp->rx_opt.num_sacks + tp->rx_opt.dsack, 4 - tp->rx_opt.tstamp_ok);
-                       for(i=this_sack; i < tp->rx_opt.num_sacks; i++)
+                       for (i=this_sack; i < tp->rx_opt.num_sacks; i++)
                                sp[i] = sp[i+1];
                        continue;
                }
@@ -3062,7 +3293,7 @@ static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq)
                tp->rx_opt.num_sacks--;
                sp--;
        }
-       for(; this_sack > 0; this_sack--, sp--)
+       for (; this_sack > 0; this_sack--, sp--)
                *sp = *(sp-1);
 
 new_sack:
@@ -3088,7 +3319,7 @@ static void tcp_sack_remove(struct tcp_sock *tp)
                return;
        }
 
-       for(this_sack = 0; this_sack < num_sacks; ) {
+       for (this_sack = 0; this_sack < num_sacks; ) {
                /* Check if the start of the sack is covered by RCV.NXT. */
                if (!before(tp->rcv_nxt, sp->start_seq)) {
                        int i;
@@ -3144,8 +3375,8 @@ static void tcp_ofo_queue(struct sock *sk)
                __skb_unlink(skb, &tp->out_of_order_queue);
                __skb_queue_tail(&sk->sk_receive_queue, skb);
                tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
-               if(skb->h.th->fin)
-                       tcp_fin(skb, sk, skb->h.th);
+               if (tcp_hdr(skb)->fin)
+                       tcp_fin(skb, sk, tcp_hdr(skb));
        }
 }
 
@@ -3153,7 +3384,7 @@ static int tcp_prune_queue(struct sock *sk);
 
 static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
 {
-       struct tcphdr *th = skb->h.th;
+       struct tcphdr *th = tcp_hdr(skb);
        struct tcp_sock *tp = tcp_sk(sk);
        int eaten = -1;
 
@@ -3210,9 +3441,9 @@ queue_and_out:
                        __skb_queue_tail(&sk->sk_receive_queue, skb);
                }
                tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
-               if(skb->len)
-                       tcp_event_data_recv(sk, tp, skb);
-               if(th->fin)
+               if (skb->len)
+                       tcp_event_data_recv(sk, skb);
+               if (th->fin)
                        tcp_fin(skb, sk, th);
 
                if (!skb_queue_empty(&tp->out_of_order_queue)) {
@@ -3228,7 +3459,7 @@ queue_and_out:
                if (tp->rx_opt.num_sacks)
                        tcp_sack_remove(tp);
 
-               tcp_fast_path_check(sk, tp);
+               tcp_fast_path_check(sk);
 
                if (eaten > 0)
                        __kfree_skb(skb);
@@ -3392,7 +3623,7 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list,
                 * - bloated or contains data before "start" or
                 *   overlaps to the next one.
                 */
-               if (!skb->h.th->syn && !skb->h.th->fin &&
+               if (!tcp_hdr(skb)->syn && !tcp_hdr(skb)->fin &&
                    (tcp_win_from_space(skb->truesize) > skb->len ||
                     before(TCP_SKB_CB(skb)->seq, start) ||
                     (skb->next != tail &&
@@ -3403,7 +3634,7 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list,
                start = TCP_SKB_CB(skb)->end_seq;
                skb = skb->next;
        }
-       if (skb == tail || skb->h.th->syn || skb->h.th->fin)
+       if (skb == tail || tcp_hdr(skb)->syn || tcp_hdr(skb)->fin)
                return;
 
        while (before(start, end)) {
@@ -3419,11 +3650,14 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list,
                nskb = alloc_skb(copy+header, GFP_ATOMIC);
                if (!nskb)
                        return;
+
+               skb_set_mac_header(nskb, skb_mac_header(skb) - skb->head);
+               skb_set_network_header(nskb, (skb_network_header(skb) -
+                                             skb->head));
+               skb_set_transport_header(nskb, (skb_transport_header(skb) -
+                                               skb->head));
                skb_reserve(nskb, header);
                memcpy(nskb->head, skb->head, header);
-               nskb->nh.raw = nskb->head + (skb->nh.raw-skb->head);
-               nskb->h.raw = nskb->head + (skb->h.raw-skb->head);
-               nskb->mac.raw = nskb->head + (skb->mac.raw-skb->head);
                memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
                TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start;
                __skb_insert(nskb, skb->prev, skb, list);
@@ -3449,7 +3683,9 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list,
                                __kfree_skb(skb);
                                NET_INC_STATS_BH(LINUX_MIB_TCPRCVCOLLAPSED);
                                skb = next;
-                               if (skb == tail || skb->h.th->syn || skb->h.th->fin)
+                               if (skb == tail ||
+                                   tcp_hdr(skb)->syn ||
+                                   tcp_hdr(skb)->fin)
                                        return;
                        }
                }
@@ -3514,7 +3750,7 @@ static int tcp_prune_queue(struct sock *sk)
        NET_INC_STATS_BH(LINUX_MIB_PRUNECALLED);
 
        if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
-               tcp_clamp_window(sk, tp);
+               tcp_clamp_window(sk);
        else if (tcp_memory_pressure)
                tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
 
@@ -3583,8 +3819,10 @@ void tcp_cwnd_application_limited(struct sock *sk)
        tp->snd_cwnd_stamp = tcp_time_stamp;
 }
 
-static int tcp_should_expand_sndbuf(struct sock *sk, struct tcp_sock *tp)
+static int tcp_should_expand_sndbuf(struct sock *sk)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
+
        /* If the user specified a specific send buffer setting, do
         * not modify it.
         */
@@ -3616,7 +3854,7 @@ static void tcp_new_space(struct sock *sk)
 {
        struct tcp_sock *tp = tcp_sk(sk);
 
-       if (tcp_should_expand_sndbuf(sk, tp)) {
+       if (tcp_should_expand_sndbuf(sk)) {
                int sndmem = max_t(u32, tp->rx_opt.mss_clamp, tp->mss_cache) +
                        MAX_TCP_HEADER + 16 + sizeof(struct sk_buff),
                    demanded = max_t(unsigned int, tp->snd_cwnd,
@@ -3640,9 +3878,9 @@ static void tcp_check_space(struct sock *sk)
        }
 }
 
-static inline void tcp_data_snd_check(struct sock *sk, struct tcp_sock *tp)
+static inline void tcp_data_snd_check(struct sock *sk)
 {
-       tcp_push_pending_frames(sk, tp);
+       tcp_push_pending_frames(sk);
        tcp_check_space(sk);
 }
 
@@ -3790,7 +4028,7 @@ static int tcp_copy_to_iovec(struct sock *sk, struct sk_buff *skb, int hlen)
        int err;
 
        local_bh_enable();
-       if (skb->ip_summed==CHECKSUM_UNNECESSARY)
+       if (skb_csum_unnecessary(skb))
                err = skb_copy_datagram_iovec(skb, hlen, tp->ucopy.iov, chunk);
        else
                err = skb_copy_and_csum_datagram_iovec(skb, hlen,
@@ -3822,7 +4060,7 @@ static __sum16 __tcp_checksum_complete_user(struct sock *sk, struct sk_buff *skb
 
 static inline int tcp_checksum_complete_user(struct sock *sk, struct sk_buff *skb)
 {
-       return skb->ip_summed != CHECKSUM_UNNECESSARY &&
+       return !skb_csum_unnecessary(skb) &&
                __tcp_checksum_complete_user(sk, skb);
 }
 
@@ -3840,7 +4078,7 @@ static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb, int hlen
        if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
                tp->ucopy.dma_chan = get_softnet_dma();
 
-       if (tp->ucopy.dma_chan && skb->ip_summed == CHECKSUM_UNNECESSARY) {
+       if (tp->ucopy.dma_chan && skb_csum_unnecessary(skb)) {
 
                dma_cookie = dma_skb_copy_datagram_iovec(tp->ucopy.dma_chan,
                        skb, hlen, tp->ucopy.iov, chunk, tp->ucopy.pinned_list);
@@ -3856,7 +4094,7 @@ static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb, int hlen
                tcp_rcv_space_adjust(sk);
 
                if ((tp->ucopy.len == 0) ||
-                   (tcp_flag_word(skb->h.th) & TCP_FLAG_PSH) ||
+                   (tcp_flag_word(tcp_hdr(skb)) & TCP_FLAG_PSH) ||
                    (atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1))) {
                        tp->ucopy.wakeup = 1;
                        sk->sk_data_ready(sk, 0);
@@ -3976,7 +4214,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
                                 */
                                tcp_ack(sk, skb, 0);
                                __kfree_skb(skb);
-                               tcp_data_snd_check(sk, tp);
+                               tcp_data_snd_check(sk);
                                return 0;
                        } else { /* Header too small */
                                TCP_INC_STATS_BH(TCP_MIB_INERRS);
@@ -4047,12 +4285,12 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
                                tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
                        }
 
-                       tcp_event_data_recv(sk, tp, skb);
+                       tcp_event_data_recv(sk, skb);
 
                        if (TCP_SKB_CB(skb)->ack_seq != tp->snd_una) {
                                /* Well, only one small jumplet in fast path... */
                                tcp_ack(sk, skb, FLAG_DATA);
-                               tcp_data_snd_check(sk, tp);
+                               tcp_data_snd_check(sk);
                                if (!inet_csk_ack_scheduled(sk))
                                        goto no_ack;
                        }
@@ -4109,7 +4347,7 @@ slow_path:
                goto discard;
        }
 
-       if(th->rst) {
+       if (th->rst) {
                tcp_reset(sk);
                goto discard;
        }
@@ -4124,7 +4362,7 @@ slow_path:
        }
 
 step5:
-       if(th->ack)
+       if (th->ack)
                tcp_ack(sk, skb, FLAG_SLOWPATH);
 
        tcp_rcv_rtt_measure_ts(sk, skb);
@@ -4135,7 +4373,7 @@ step5:
        /* step 7: process the segment text */
        tcp_data_queue(sk, skb);
 
-       tcp_data_snd_check(sk, tp);
+       tcp_data_snd_check(sk);
        tcp_ack_snd_check(sk);
        return 0;
 
@@ -4412,13 +4650,13 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
                goto discard;
 
        case TCP_LISTEN:
-               if(th->ack)
+               if (th->ack)
                        return 1;
 
-               if(th->rst)
+               if (th->rst)
                        goto discard;
 
-               if(th->syn) {
+               if (th->syn) {
                        if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
                                return 1;
 
@@ -4452,7 +4690,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
                /* Do step6 onward by hand. */
                tcp_urg(sk, skb, th);
                __kfree_skb(skb);
-               tcp_data_snd_check(sk, tp);
+               tcp_data_snd_check(sk);
                return 0;
        }
 
@@ -4474,7 +4712,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
        }
 
        /* step 2: check RST bit */
-       if(th->rst) {
+       if (th->rst) {
                tcp_reset(sk);
                goto discard;
        }
@@ -4497,7 +4735,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
        if (th->ack) {
                int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH);
 
-               switch(sk->sk_state) {
+               switch (sk->sk_state) {
                case TCP_SYN_RECV:
                        if (acceptable) {
                                tp->copied_seq = tp->rcv_nxt;
@@ -4644,7 +4882,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
 
        /* tcp_data could move socket to TIME-WAIT */
        if (sk->sk_state != TCP_CLOSE) {
-               tcp_data_snd_check(sk, tp);
+               tcp_data_snd_check(sk);
                tcp_ack_snd_check(sk);
        }
 
index 0ba74bb..5a3e7f8 100644 (file)
@@ -88,7 +88,7 @@ int sysctl_tcp_low_latency __read_mostly;
 #define ICMP_MIN_LENGTH 8
 
 /* Socket used for sending RSTs */
-static struct socket *tcp_socket;
+static struct socket *tcp_socket __read_mostly;
 
 void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb);
 
@@ -125,10 +125,10 @@ void tcp_unhash(struct sock *sk)
 
 static inline __u32 tcp_v4_init_sequence(struct sk_buff *skb)
 {
-       return secure_tcp_sequence_number(skb->nh.iph->daddr,
-                                         skb->nh.iph->saddr,
-                                         skb->h.th->dest,
-                                         skb->h.th->source);
+       return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
+                                         ip_hdr(skb)->saddr,
+                                         tcp_hdr(skb)->dest,
+                                         tcp_hdr(skb)->source);
 }
 
 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
@@ -149,7 +149,7 @@ int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
         */
        if (tcptw->tw_ts_recent_stamp &&
            (twp == NULL || (sysctl_tcp_tw_reuse &&
-                            xtime.tv_sec - tcptw->tw_ts_recent_stamp > 1))) {
+                            get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
                tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
                if (tp->write_seq == 0)
                        tp->write_seq = 1;
@@ -224,7 +224,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
                 * when trying new connection.
                 */
                if (peer != NULL &&
-                   peer->tcp_ts_stamp + TCP_PAWS_MSL >= xtime.tv_sec) {
+                   peer->tcp_ts_stamp + TCP_PAWS_MSL >= get_seconds()) {
                        tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
                        tp->rx_opt.ts_recent = peer->tcp_ts;
                }
@@ -354,8 +354,8 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
        struct tcphdr *th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
        struct tcp_sock *tp;
        struct inet_sock *inet;
-       int type = skb->h.icmph->type;
-       int code = skb->h.icmph->code;
+       const int type = icmp_hdr(skb)->type;
+       const int code = icmp_hdr(skb)->code;
        struct sock *sk;
        __u32 seq;
        int err;
@@ -499,11 +499,12 @@ out:
 void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb)
 {
        struct inet_sock *inet = inet_sk(sk);
-       struct tcphdr *th = skb->h.th;
+       struct tcphdr *th = tcp_hdr(skb);
 
        if (skb->ip_summed == CHECKSUM_PARTIAL) {
                th->check = ~tcp_v4_check(len, inet->saddr,
                                          inet->daddr, 0);
+               skb->csum_start = skb_transport_header(skb) - skb->head;
                skb->csum_offset = offsetof(struct tcphdr, check);
        } else {
                th->check = tcp_v4_check(len, inet->saddr, inet->daddr,
@@ -515,17 +516,18 @@ void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb)
 
 int tcp_v4_gso_send_check(struct sk_buff *skb)
 {
-       struct iphdr *iph;
+       const struct iphdr *iph;
        struct tcphdr *th;
 
        if (!pskb_may_pull(skb, sizeof(*th)))
                return -EINVAL;
 
-       iph = skb->nh.iph;
-       th = skb->h.th;
+       iph = ip_hdr(skb);
+       th = tcp_hdr(skb);
 
        th->check = 0;
        th->check = ~tcp_v4_check(skb->len, iph->saddr, iph->daddr, 0);
+       skb->csum_start = skb_transport_header(skb) - skb->head;
        skb->csum_offset = offsetof(struct tcphdr, check);
        skb->ip_summed = CHECKSUM_PARTIAL;
        return 0;
@@ -546,7 +548,7 @@ int tcp_v4_gso_send_check(struct sk_buff *skb)
 
 static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
 {
-       struct tcphdr *th = skb->h.th;
+       struct tcphdr *th = tcp_hdr(skb);
        struct {
                struct tcphdr th;
 #ifdef CONFIG_TCP_MD5SIG
@@ -585,7 +587,7 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
        arg.iov[0].iov_len  = sizeof(rep.th);
 
 #ifdef CONFIG_TCP_MD5SIG
-       key = sk ? tcp_v4_md5_do_lookup(sk, skb->nh.iph->daddr) : NULL;
+       key = sk ? tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->daddr) : NULL;
        if (key) {
                rep.opt[0] = htonl((TCPOPT_NOP << 24) |
                                   (TCPOPT_NOP << 16) |
@@ -597,14 +599,14 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
 
                tcp_v4_do_calc_md5_hash((__u8 *)&rep.opt[1],
                                        key,
-                                       skb->nh.iph->daddr,
-                                       skb->nh.iph->saddr,
+                                       ip_hdr(skb)->daddr,
+                                       ip_hdr(skb)->saddr,
                                        &rep.th, IPPROTO_TCP,
                                        arg.iov[0].iov_len);
        }
 #endif
-       arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr,
-                                     skb->nh.iph->saddr, /* XXX */
+       arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
+                                     ip_hdr(skb)->saddr, /* XXX */
                                      sizeof(struct tcphdr), IPPROTO_TCP, 0);
        arg.csumoffset = offsetof(struct tcphdr, check) / 2;
 
@@ -622,7 +624,7 @@ static void tcp_v4_send_ack(struct tcp_timewait_sock *twsk,
                            struct sk_buff *skb, u32 seq, u32 ack,
                            u32 win, u32 ts)
 {
-       struct tcphdr *th = skb->h.th;
+       struct tcphdr *th = tcp_hdr(skb);
        struct {
                struct tcphdr th;
                __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
@@ -670,7 +672,7 @@ static void tcp_v4_send_ack(struct tcp_timewait_sock *twsk,
         * skb->sk) holds true, but we program defensively.
         */
        if (!twsk && skb->sk) {
-               key = tcp_v4_md5_do_lookup(skb->sk, skb->nh.iph->daddr);
+               key = tcp_v4_md5_do_lookup(skb->sk, ip_hdr(skb)->daddr);
        } else if (twsk && twsk->tw_md5_keylen) {
                tw_key.key = twsk->tw_md5_key;
                tw_key.keylen = twsk->tw_md5_keylen;
@@ -690,14 +692,14 @@ static void tcp_v4_send_ack(struct tcp_timewait_sock *twsk,
 
                tcp_v4_do_calc_md5_hash((__u8 *)&rep.opt[offset],
                                        key,
-                                       skb->nh.iph->daddr,
-                                       skb->nh.iph->saddr,
+                                       ip_hdr(skb)->daddr,
+                                       ip_hdr(skb)->saddr,
                                        &rep.th, IPPROTO_TCP,
                                        arg.iov[0].iov_len);
        }
 #endif
-       arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr,
-                                     skb->nh.iph->saddr, /* XXX */
+       arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
+                                     ip_hdr(skb)->saddr, /* XXX */
                                      arg.iov[0].iov_len, IPPROTO_TCP, 0);
        arg.csumoffset = offsetof(struct tcphdr, check) / 2;
 
@@ -745,7 +747,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct request_sock *req,
        skb = tcp_make_synack(sk, dst, req);
 
        if (skb) {
-               struct tcphdr *th = skb->h.th;
+               struct tcphdr *th = tcp_hdr(skb);
 
                th->check = tcp_v4_check(skb->len,
                                         ireq->loc_addr,
@@ -781,7 +783,7 @@ static void syn_flood_warning(struct sk_buff *skb)
                warntime = jiffies;
                printk(KERN_INFO
                       "possible SYN flooding on port %d. Sending cookies.\n",
-                      ntohs(skb->h.th->dest));
+                      ntohs(tcp_hdr(skb)->dest));
        }
 }
 #endif
@@ -1133,8 +1135,8 @@ static int tcp_v4_inbound_md5_hash(struct sock *sk, struct sk_buff *skb)
         */
        __u8 *hash_location = NULL;
        struct tcp_md5sig_key *hash_expected;
-       struct iphdr *iph = skb->nh.iph;
-       struct tcphdr *th = skb->h.th;
+       const struct iphdr *iph = ip_hdr(skb);
+       struct tcphdr *th = tcp_hdr(skb);
        int length = (th->doff << 2) - sizeof(struct tcphdr);
        int genhash;
        unsigned char *ptr;
@@ -1251,8 +1253,8 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
        struct inet_request_sock *ireq;
        struct tcp_options_received tmp_opt;
        struct request_sock *req;
-       __be32 saddr = skb->nh.iph->saddr;
-       __be32 daddr = skb->nh.iph->daddr;
+       __be32 saddr = ip_hdr(skb)->saddr;
+       __be32 daddr = ip_hdr(skb)->daddr;
        __u32 isn = TCP_SKB_CB(skb)->when;
        struct dst_entry *dst = NULL;
 #ifdef CONFIG_SYN_COOKIES
@@ -1327,7 +1329,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
        ireq->rmt_addr = saddr;
        ireq->opt = tcp_v4_save_options(sk, skb);
        if (!want_cookie)
-               TCP_ECN_create_request(req, skb->h.th);
+               TCP_ECN_create_request(req, tcp_hdr(skb));
 
        if (want_cookie) {
 #ifdef CONFIG_SYN_COOKIES
@@ -1351,7 +1353,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
                    (dst = inet_csk_route_req(sk, req)) != NULL &&
                    (peer = rt_get_peer((struct rtable *)dst)) != NULL &&
                    peer->v4daddr == saddr) {
-                       if (xtime.tv_sec < peer->tcp_ts_stamp + TCP_PAWS_MSL &&
+                       if (get_seconds() < peer->tcp_ts_stamp + TCP_PAWS_MSL &&
                            (s32)(peer->tcp_ts - req->ts_recent) >
                                                        TCP_PAWS_WINDOW) {
                                NET_INC_STATS_BH(LINUX_MIB_PAWSPASSIVEREJECTED);
@@ -1375,7 +1377,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
                        LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open "
                                       "request from %u.%u.%u.%u/%u\n",
                                       NIPQUAD(saddr),
-                                      ntohs(skb->h.th->source));
+                                      ntohs(tcp_hdr(skb)->source));
                        dst_release(dst);
                        goto drop_and_free;
                }
@@ -1439,7 +1441,7 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
        newinet->opt          = ireq->opt;
        ireq->opt             = NULL;
        newinet->mc_index     = inet_iif(skb);
-       newinet->mc_ttl       = skb->nh.iph->ttl;
+       newinet->mc_ttl       = ip_hdr(skb)->ttl;
        inet_csk(newsk)->icsk_ext_hdr_len = 0;
        if (newinet->opt)
                inet_csk(newsk)->icsk_ext_hdr_len = newinet->opt->optlen;
@@ -1481,8 +1483,8 @@ exit:
 
 static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
 {
-       struct tcphdr *th = skb->h.th;
-       struct iphdr *iph = skb->nh.iph;
+       struct tcphdr *th = tcp_hdr(skb);
+       const struct iphdr *iph = ip_hdr(skb);
        struct sock *nsk;
        struct request_sock **prev;
        /* Find possible connection requests. */
@@ -1491,9 +1493,8 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
        if (req)
                return tcp_check_req(sk, skb, req, prev);
 
-       nsk = inet_lookup_established(&tcp_hashinfo, skb->nh.iph->saddr,
-                                     th->source, skb->nh.iph->daddr,
-                                     th->dest, inet_iif(skb));
+       nsk = inet_lookup_established(&tcp_hashinfo, iph->saddr, th->source,
+                                     iph->daddr, th->dest, inet_iif(skb));
 
        if (nsk) {
                if (nsk->sk_state != TCP_TIME_WAIT) {
@@ -1513,15 +1514,17 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
 
 static __sum16 tcp_v4_checksum_init(struct sk_buff *skb)
 {
+       const struct iphdr *iph = ip_hdr(skb);
+
        if (skb->ip_summed == CHECKSUM_COMPLETE) {
-               if (!tcp_v4_check(skb->len, skb->nh.iph->saddr,
-                                 skb->nh.iph->daddr, skb->csum)) {
+               if (!tcp_v4_check(skb->len, iph->saddr,
+                                 iph->daddr, skb->csum)) {
                        skb->ip_summed = CHECKSUM_UNNECESSARY;
                        return 0;
                }
        }
 
-       skb->csum = csum_tcpudp_nofold(skb->nh.iph->saddr, skb->nh.iph->daddr,
+       skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
                                       skb->len, IPPROTO_TCP, 0);
 
        if (skb->len <= 76) {
@@ -1555,7 +1558,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
 
        if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
                TCP_CHECK_TIMER(sk);
-               if (tcp_rcv_established(sk, skb, skb->h.th, skb->len)) {
+               if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
                        rsk = sk;
                        goto reset;
                }
@@ -1563,7 +1566,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
                return 0;
        }
 
-       if (skb->len < (skb->h.th->doff << 2) || tcp_checksum_complete(skb))
+       if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
                goto csum_err;
 
        if (sk->sk_state == TCP_LISTEN) {
@@ -1581,7 +1584,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
        }
 
        TCP_CHECK_TIMER(sk);
-       if (tcp_rcv_state_process(sk, skb, skb->h.th, skb->len)) {
+       if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
                rsk = sk;
                goto reset;
        }
@@ -1610,6 +1613,7 @@ csum_err:
 
 int tcp_v4_rcv(struct sk_buff *skb)
 {
+       const struct iphdr *iph;
        struct tcphdr *th;
        struct sock *sk;
        int ret;
@@ -1623,7 +1627,7 @@ int tcp_v4_rcv(struct sk_buff *skb)
        if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
                goto discard_it;
 
-       th = skb->h.th;
+       th = tcp_hdr(skb);
 
        if (th->doff < sizeof(struct tcphdr) / 4)
                goto bad_packet;
@@ -1634,23 +1638,21 @@ int tcp_v4_rcv(struct sk_buff *skb)
         * Packet length and doff are validated by header prediction,
         * provided case of th->doff==0 is eliminated.
         * So, we defer the checks. */
-       if ((skb->ip_summed != CHECKSUM_UNNECESSARY &&
-            tcp_v4_checksum_init(skb)))
+       if (!skb_csum_unnecessary(skb) && tcp_v4_checksum_init(skb))
                goto bad_packet;
 
-       th = skb->h.th;
+       th = tcp_hdr(skb);
+       iph = ip_hdr(skb);
        TCP_SKB_CB(skb)->seq = ntohl(th->seq);
        TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
                                    skb->len - th->doff * 4);
        TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
        TCP_SKB_CB(skb)->when    = 0;
-       TCP_SKB_CB(skb)->flags   = skb->nh.iph->tos;
+       TCP_SKB_CB(skb)->flags   = iph->tos;
        TCP_SKB_CB(skb)->sacked  = 0;
 
-       sk = __inet_lookup(&tcp_hashinfo, skb->nh.iph->saddr, th->source,
-                          skb->nh.iph->daddr, th->dest,
-                          inet_iif(skb));
-
+       sk = __inet_lookup(&tcp_hashinfo, iph->saddr, th->source,
+                          iph->daddr, th->dest, inet_iif(skb));
        if (!sk)
                goto no_tcp_socket;
 
@@ -1724,8 +1726,7 @@ do_time_wait:
        switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
        case TCP_TW_SYN: {
                struct sock *sk2 = inet_lookup_listener(&tcp_hashinfo,
-                                                       skb->nh.iph->daddr,
-                                                       th->dest,
+                                                       iph->daddr, th->dest,
                                                        inet_iif(skb));
                if (sk2) {
                        inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row);
@@ -1770,7 +1771,7 @@ int tcp_v4_remember_stamp(struct sock *sk)
 
        if (peer) {
                if ((s32)(peer->tcp_ts - tp->rx_opt.ts_recent) <= 0 ||
-                   (peer->tcp_ts_stamp + TCP_PAWS_MSL < xtime.tv_sec &&
+                   (peer->tcp_ts_stamp + TCP_PAWS_MSL < get_seconds() &&
                     peer->tcp_ts_stamp <= tp->rx_opt.ts_recent_stamp)) {
                        peer->tcp_ts_stamp = tp->rx_opt.ts_recent_stamp;
                        peer->tcp_ts = tp->rx_opt.ts_recent;
@@ -1791,7 +1792,7 @@ int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw)
                const struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
 
                if ((s32)(peer->tcp_ts - tcptw->tw_ts_recent) <= 0 ||
-                   (peer->tcp_ts_stamp + TCP_PAWS_MSL < xtime.tv_sec &&
+                   (peer->tcp_ts_stamp + TCP_PAWS_MSL < get_seconds() &&
                     peer->tcp_ts_stamp <= tcptw->tw_ts_recent_stamp)) {
                        peer->tcp_ts_stamp = tcptw->tw_ts_recent_stamp;
                        peer->tcp_ts       = tcptw->tw_ts_recent;
@@ -1890,7 +1891,7 @@ int tcp_v4_destroy_sock(struct sock *sk)
        tcp_cleanup_congestion_control(sk);
 
        /* Cleanup up the write buffer. */
-       sk_stream_writequeue_purge(sk);
+       tcp_write_queue_purge(sk);
 
        /* Cleans up our, hopefully empty, out_of_order_queue. */
        __skb_queue_purge(&tp->out_of_order_queue);
@@ -2293,13 +2294,13 @@ static void get_openreq4(struct sock *sk, struct request_sock *req,
                req);
 }
 
-static void get_tcp4_sock(struct sock *sp, char *tmpbuf, int i)
+static void get_tcp4_sock(struct sock *sk, char *tmpbuf, int i)
 {
        int timer_active;
        unsigned long timer_expires;
-       struct tcp_sock *tp = tcp_sk(sp);
-       const struct inet_connection_sock *icsk = inet_csk(sp);
-       struct inet_sock *inet = inet_sk(sp);
+       struct tcp_sock *tp = tcp_sk(sk);
+       const struct inet_connection_sock *icsk = inet_csk(sk);
+       struct inet_sock *inet = inet_sk(sk);
        __be32 dest = inet->daddr;
        __be32 src = inet->rcv_saddr;
        __u16 destp = ntohs(inet->dport);
@@ -2311,9 +2312,9 @@ static void get_tcp4_sock(struct sock *sp, char *tmpbuf, int i)
        } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
                timer_active    = 4;
                timer_expires   = icsk->icsk_timeout;
-       } else if (timer_pending(&sp->sk_timer)) {
+       } else if (timer_pending(&sk->sk_timer)) {
                timer_active    = 2;
-               timer_expires   = sp->sk_timer.expires;
+               timer_expires   = sk->sk_timer.expires;
        } else {
                timer_active    = 0;
                timer_expires = jiffies;
@@ -2321,17 +2322,17 @@ static void get_tcp4_sock(struct sock *sp, char *tmpbuf, int i)
 
        sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
                        "%08X %5d %8d %lu %d %p %u %u %u %u %d",
-               i, src, srcp, dest, destp, sp->sk_state,
+               i, src, srcp, dest, destp, sk->sk_state,
                tp->write_seq - tp->snd_una,
-               sp->sk_state == TCP_LISTEN ? sp->sk_ack_backlog :
+               sk->sk_state == TCP_LISTEN ? sk->sk_ack_backlog :
                                             (tp->rcv_nxt - tp->copied_seq),
                timer_active,
                jiffies_to_clock_t(timer_expires - jiffies),
                icsk->icsk_retransmits,
-               sock_i_uid(sp),
+               sock_i_uid(sk),
                icsk->icsk_probes_out,
-               sock_i_ino(sp),
-               atomic_read(&sp->sk_refcnt), sp,
+               sock_i_ino(sk),
+               atomic_read(&sk->sk_refcnt), sk,
                icsk->icsk_rto,
                icsk->icsk_ack.ato,
                (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
index f0ebaf0..43294ad 100644 (file)
@@ -218,7 +218,7 @@ static u32 tcp_lp_owd_calculator(struct sock *sk)
  *   3. calc smoothed OWD (SOWD).
  * Most ideas come from the original TCP-LP implementation.
  */
-static void tcp_lp_rtt_sample(struct sock *sk, u32 usrtt)
+static void tcp_lp_rtt_sample(struct sock *sk, u32 rtt)
 {
        struct lp *lp = inet_csk_ca(sk);
        s64 mowd = tcp_lp_owd_calculator(sk);
@@ -261,11 +261,13 @@ static void tcp_lp_rtt_sample(struct sock *sk, u32 usrtt)
  * newReno in increase case.
  * We work it out by following the idea from TCP-LP's paper directly
  */
-static void tcp_lp_pkts_acked(struct sock *sk, u32 num_acked)
+static void tcp_lp_pkts_acked(struct sock *sk, u32 num_acked, ktime_t last)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        struct lp *lp = inet_csk_ca(sk);
 
+       tcp_lp_rtt_sample(sk,  ktime_to_us(net_timedelta(last)));
+
        /* calc inference */
        if (tcp_time_stamp > tp->rx_opt.rcv_tsecr)
                lp->inference = 3 * (tcp_time_stamp - tp->rx_opt.rcv_tsecr);
@@ -312,11 +314,11 @@ static void tcp_lp_pkts_acked(struct sock *sk, u32 num_acked)
 }
 
 static struct tcp_congestion_ops tcp_lp = {
+       .flags = TCP_CONG_RTT_STAMP,
        .init = tcp_lp_init,
        .ssthresh = tcp_reno_ssthresh,
        .cong_avoid = tcp_lp_cong_avoid,
        .min_cwnd = tcp_reno_min_cwnd,
-       .rtt_sample = tcp_lp_rtt_sample,
        .pkts_acked = tcp_lp_pkts_acked,
 
        .owner = THIS_MODULE,
index 6b5c64f..a12b08f 100644 (file)
@@ -149,7 +149,7 @@ kill_with_rst:
                tw->tw_substate   = TCP_TIME_WAIT;
                tcptw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq;
                if (tmp_opt.saw_tstamp) {
-                       tcptw->tw_ts_recent_stamp = xtime.tv_sec;
+                       tcptw->tw_ts_recent_stamp = get_seconds();
                        tcptw->tw_ts_recent       = tmp_opt.rcv_tsval;
                }
 
@@ -208,7 +208,7 @@ kill:
 
                if (tmp_opt.saw_tstamp) {
                        tcptw->tw_ts_recent       = tmp_opt.rcv_tsval;
-                       tcptw->tw_ts_recent_stamp = xtime.tv_sec;
+                       tcptw->tw_ts_recent_stamp = get_seconds();
                }
 
                inet_twsk_put(tw);
@@ -246,7 +246,7 @@ kill:
        if (paws_reject)
                NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED);
 
-       if(!th->rst) {
+       if (!th->rst) {
                /* In this case we must reset the TIMEWAIT timer.
                 *
                 * If it is ACKless SYN it may be both old duplicate
@@ -324,7 +324,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
                                if (tcp_alloc_md5sig_pool() == NULL)
                                        BUG();
                        }
-               } while(0);
+               } while (0);
 #endif
 
                /* Linkage updates. */
@@ -387,8 +387,8 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
                /* Now setup tcp_sock */
                newtp = tcp_sk(newsk);
                newtp->pred_flags = 0;
-               newtp->rcv_nxt = treq->rcv_isn + 1;
-               newtp->snd_nxt = newtp->snd_una = newtp->snd_sml = treq->snt_isn + 1;
+               newtp->rcv_wup = newtp->copied_seq = newtp->rcv_nxt = treq->rcv_isn + 1;
+               newtp->snd_sml = newtp->snd_una = newtp->snd_nxt = treq->snt_isn + 1;
 
                tcp_prequeue_init(newtp);
 
@@ -422,10 +422,8 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
                tcp_set_ca_state(newsk, TCP_CA_Open);
                tcp_init_xmit_timers(newsk);
                skb_queue_head_init(&newtp->out_of_order_queue);
-               newtp->rcv_wup = treq->rcv_isn + 1;
                newtp->write_seq = treq->snt_isn + 1;
                newtp->pushed_seq = newtp->write_seq;
-               newtp->copied_seq = treq->rcv_isn + 1;
 
                newtp->rx_opt.saw_tstamp = 0;
 
@@ -440,7 +438,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
                                                       keepalive_time_when(newtp));
 
                newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
-               if((newtp->rx_opt.sack_ok = ireq->sack_ok) != 0) {
+               if ((newtp->rx_opt.sack_ok = ireq->sack_ok) != 0) {
                        if (sysctl_tcp_fack)
                                newtp->rx_opt.sack_ok |= 2;
                }
@@ -455,12 +453,13 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
                        newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0;
                        newtp->window_clamp = min(newtp->window_clamp, 65535U);
                }
-               newtp->snd_wnd = ntohs(skb->h.th->window) << newtp->rx_opt.snd_wscale;
+               newtp->snd_wnd = (ntohs(tcp_hdr(skb)->window) <<
+                                 newtp->rx_opt.snd_wscale);
                newtp->max_window = newtp->snd_wnd;
 
                if (newtp->rx_opt.tstamp_ok) {
                        newtp->rx_opt.ts_recent = req->ts_recent;
-                       newtp->rx_opt.ts_recent_stamp = xtime.tv_sec;
+                       newtp->rx_opt.ts_recent_stamp = get_seconds();
                        newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
                } else {
                        newtp->rx_opt.ts_recent_stamp = 0;
@@ -490,7 +489,7 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
                           struct request_sock *req,
                           struct request_sock **prev)
 {
-       struct tcphdr *th = skb->h.th;
+       const struct tcphdr *th = tcp_hdr(skb);
        __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
        int paws_reject = 0;
        struct tcp_options_received tmp_opt;
@@ -506,7 +505,7 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
                         * it can be estimated (approximately)
                         * from another data.
                         */
-                       tmp_opt.ts_recent_stamp = xtime.tv_sec - ((TCP_TIMEOUT_INIT/HZ)<<req->retrans);
+                       tmp_opt.ts_recent_stamp = get_seconds() - ((TCP_TIMEOUT_INIT/HZ)<<req->retrans);
                        paws_reject = tcp_paws_check(&tmp_opt, th->rst);
                }
        }
@@ -712,8 +711,8 @@ int tcp_child_process(struct sock *parent, struct sock *child,
        int state = child->sk_state;
 
        if (!sock_owned_by_user(child)) {
-               ret = tcp_rcv_state_process(child, skb, skb->h.th, skb->len);
-
+               ret = tcp_rcv_state_process(child, skb, tcp_hdr(skb),
+                                           skb->len);
                /* Wakeup parent, send SIGIO */
                if (state == TCP_SYN_RECV && child->sk_state != state)
                        parent->sk_data_ready(parent, 0);
index 3c24881..e70a684 100644 (file)
@@ -62,14 +62,13 @@ int sysctl_tcp_base_mss __read_mostly = 512;
 /* By default, RFC2861 behavior.  */
 int sysctl_tcp_slow_start_after_idle __read_mostly = 1;
 
-static void update_send_head(struct sock *sk, struct tcp_sock *tp,
-                            struct sk_buff *skb)
+static void update_send_head(struct sock *sk, struct sk_buff *skb)
 {
-       sk->sk_send_head = skb->next;
-       if (sk->sk_send_head == (struct sk_buff *)&sk->sk_write_queue)
-               sk->sk_send_head = NULL;
+       struct tcp_sock *tp = tcp_sk(sk);
+
+       tcp_advance_send_head(sk, skb);
        tp->snd_nxt = TCP_SKB_CB(skb)->end_seq;
-       tcp_packets_out_inc(sk, tp, skb);
+       tcp_packets_out_inc(sk, skb);
 }
 
 /* SND.NXT, if window was not shrunk.
@@ -78,8 +77,10 @@ static void update_send_head(struct sock *sk, struct tcp_sock *tp,
  * Anything in between SND.UNA...SND.UNA+SND.WND also can be already
  * invalid. OK, let's make this for now:
  */
-static inline __u32 tcp_acceptable_seq(struct sock *sk, struct tcp_sock *tp)
+static inline __u32 tcp_acceptable_seq(struct sock *sk)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
+
        if (!before(tp->snd_una+tp->snd_wnd, tp->snd_nxt))
                return tp->snd_nxt;
        else
@@ -238,7 +239,7 @@ static u16 tcp_select_window(struct sock *sk)
        u32 new_win = __tcp_select_window(sk);
 
        /* Never shrink the offered window */
-       if(new_win < cur_win) {
+       if (new_win < cur_win) {
                /* Danger Will Robinson!
                 * Don't update rcv_wup/rcv_wnd here or else
                 * we will not be able to advertise a zero
@@ -289,10 +290,12 @@ static void tcp_build_and_update_options(__be32 *ptr, struct tcp_sock *tp,
                               (TCPOPT_SACK <<  8) |
                               (TCPOLEN_SACK_BASE + (tp->rx_opt.eff_sacks *
                                                     TCPOLEN_SACK_PERBLOCK)));
-               for(this_sack = 0; this_sack < tp->rx_opt.eff_sacks; this_sack++) {
+
+               for (this_sack = 0; this_sack < tp->rx_opt.eff_sacks; this_sack++) {
                        *ptr++ = htonl(sp[this_sack].start_seq);
                        *ptr++ = htonl(sp[this_sack].end_seq);
                }
+
                if (tp->rx_opt.dsack) {
                        tp->rx_opt.dsack = 0;
                        tp->rx_opt.eff_sacks--;
@@ -337,7 +340,7 @@ static void tcp_syn_build_options(__be32 *ptr, int mss, int ts, int sack,
         */
        *ptr++ = htonl((TCPOPT_MSS << 24) | (TCPOLEN_MSS << 16) | mss);
        if (ts) {
-               if(sack)
+               if (sack)
                        *ptr++ = htonl((TCPOPT_SACK_PERM << 24) |
                                       (TCPOLEN_SACK_PERM << 16) |
                                       (TCPOPT_TIMESTAMP << 8) |
@@ -349,7 +352,7 @@ static void tcp_syn_build_options(__be32 *ptr, int mss, int ts, int sack,
                                       TCPOLEN_TIMESTAMP);
                *ptr++ = htonl(tstamp);         /* TSVAL */
                *ptr++ = htonl(ts_recent);      /* TSECR */
-       } else if(sack)
+       } else if (sack)
                *ptr++ = htonl((TCPOPT_NOP << 24) |
                               (TCPOPT_NOP << 16) |
                               (TCPOPT_SACK_PERM << 8) |
@@ -406,7 +409,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
        /* If congestion control is doing timestamping, we must
         * take such a timestamp before we potentially clone/copy.
         */
-       if (icsk->icsk_ca_ops->rtt_sample)
+       if (icsk->icsk_ca_ops->flags & TCP_CONG_RTT_STAMP)
                __net_timestamp(skb);
 
        if (likely(clone_it)) {
@@ -430,7 +433,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
        sysctl_flags = 0;
        if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) {
                tcp_header_size = sizeof(struct tcphdr) + TCPOLEN_MSS;
-               if(sysctl_tcp_timestamps) {
+               if (sysctl_tcp_timestamps) {
                        tcp_header_size += TCPOLEN_TSTAMP_ALIGNED;
                        sysctl_flags |= SYSCTL_FLAG_TSTAMPS;
                }
@@ -465,11 +468,12 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
                tcp_header_size += TCPOLEN_MD5SIG_ALIGNED;
 #endif
 
-       th = (struct tcphdr *) skb_push(skb, tcp_header_size);
-       skb->h.th = th;
+       skb_push(skb, tcp_header_size);
+       skb_reset_transport_header(skb);
        skb_set_owner_w(skb, sk);
 
        /* Build TCP header and checksum it. */
+       th = tcp_hdr(skb);
        th->source              = inet->sport;
        th->dest                = inet->dport;
        th->seq                 = htonl(tcb->seq);
@@ -515,7 +519,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
                                             md5 ? &md5_hash_location :
 #endif
                                             NULL);
-               TCP_ECN_send(sk, tp, skb, tcp_header_size);
+               TCP_ECN_send(sk, skb, tcp_header_size);
        }
 
 #ifdef CONFIG_TCP_MD5SIG
@@ -524,7 +528,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
                tp->af_specific->calc_md5_hash(md5_hash_location,
                                               md5,
                                               sk, NULL, NULL,
-                                              skb->h.th,
+                                              tcp_hdr(skb),
                                               sk->sk_protocol,
                                               skb->len);
        }
@@ -545,7 +549,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
        if (likely(err <= 0))
                return err;
 
-       tcp_enter_cwr(sk);
+       tcp_enter_cwr(sk, 1);
 
        return net_xmit_eval(err);
 
@@ -567,12 +571,8 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
        /* Advance write_seq and place onto the write_queue. */
        tp->write_seq = TCP_SKB_CB(skb)->end_seq;
        skb_header_release(skb);
-       __skb_queue_tail(&sk->sk_write_queue, skb);
+       tcp_add_write_queue_tail(sk, skb);
        sk_charge_skb(sk, skb);
-
-       /* Queue it, remembering where we must start sending. */
-       if (sk->sk_send_head == NULL)
-               sk->sk_send_head = skb;
 }
 
 static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned int mss_now)
@@ -705,7 +705,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss
 
        /* Link BUFF into the send queue. */
        skb_header_release(buff);
-       __skb_append(skb, buff, &sk->sk_write_queue);
+       tcp_insert_write_queue_after(skb, buff, sk);
 
        return 0;
 }
@@ -736,7 +736,7 @@ static void __pskb_trim_head(struct sk_buff *skb, int len)
        }
        skb_shinfo(skb)->nr_frags = k;
 
-       skb->tail = skb->data;
+       skb_reset_tail_pointer(skb);
        skb->data_len -= len;
        skb->len = skb->data_len;
 }
@@ -930,8 +930,9 @@ unsigned int tcp_current_mss(struct sock *sk, int large_allowed)
 
 /* Congestion window validation. (RFC2861) */
 
-static void tcp_cwnd_validate(struct sock *sk, struct tcp_sock *tp)
+static void tcp_cwnd_validate(struct sock *sk)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
        __u32 packets_out = tp->packets_out;
 
        if (packets_out >= tp->snd_cwnd) {
@@ -1056,7 +1057,7 @@ static inline int tcp_snd_wnd_test(struct tcp_sock *tp, struct sk_buff *skb, uns
        return !after(end_seq, tp->snd_una + tp->snd_wnd);
 }
 
-/* This checks if the data bearing packet SKB (usually sk->sk_send_head)
+/* This checks if the data bearing packet SKB (usually tcp_send_head(sk))
  * should be put on the wire right now.  If so, it returns the number of
  * packets allowed by the congestion window.
  */
@@ -1079,15 +1080,10 @@ static unsigned int tcp_snd_test(struct sock *sk, struct sk_buff *skb,
        return cwnd_quota;
 }
 
-static inline int tcp_skb_is_last(const struct sock *sk,
-                                 const struct sk_buff *skb)
-{
-       return skb->next == (struct sk_buff *)&sk->sk_write_queue;
-}
-
-int tcp_may_send_now(struct sock *sk, struct tcp_sock *tp)
+int tcp_may_send_now(struct sock *sk)
 {
-       struct sk_buff *skb = sk->sk_send_head;
+       struct tcp_sock *tp = tcp_sk(sk);
+       struct sk_buff *skb = tcp_send_head(sk);
 
        return (skb &&
                tcp_snd_test(sk, skb, tcp_current_mss(sk, 1),
@@ -1143,7 +1139,7 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
 
        /* Link BUFF into the send queue. */
        skb_header_release(buff);
-       __skb_append(skb, buff, &sk->sk_write_queue);
+       tcp_insert_write_queue_after(skb, buff, sk);
 
        return 0;
 }
@@ -1153,8 +1149,9 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
  *
  * This algorithm is from John Heffner.
  */
-static int tcp_tso_should_defer(struct sock *sk, struct tcp_sock *tp, struct sk_buff *skb)
+static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
        const struct inet_connection_sock *icsk = inet_csk(sk);
        u32 send_win, cong_win, limit, in_flight;
 
@@ -1249,10 +1246,10 @@ static int tcp_mtu_probe(struct sock *sk)
 
        /* Have enough data in the send queue to probe? */
        len = 0;
-       if ((skb = sk->sk_send_head) == NULL)
+       if ((skb = tcp_send_head(sk)) == NULL)
                return -1;
        while ((len += skb->len) < probe_size && !tcp_skb_is_last(sk, skb))
-               skb = skb->next;
+               skb = tcp_write_queue_next(sk, skb);
        if (len < probe_size)
                return -1;
 
@@ -1279,9 +1276,9 @@ static int tcp_mtu_probe(struct sock *sk)
                return -1;
        sk_charge_skb(sk, nskb);
 
-       skb = sk->sk_send_head;
-       __skb_insert(nskb, skb->prev, skb, &sk->sk_write_queue);
-       sk->sk_send_head = nskb;
+       skb = tcp_send_head(sk);
+       tcp_insert_write_queue_before(nskb, skb, sk);
+       tcp_advance_send_head(sk, skb);
 
        TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq;
        TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size;
@@ -1292,7 +1289,7 @@ static int tcp_mtu_probe(struct sock *sk)
 
        len = 0;
        while (len < probe_size) {
-               next = skb->next;
+               next = tcp_write_queue_next(sk, skb);
 
                copy = min_t(int, skb->len, probe_size - len);
                if (nskb->ip_summed)
@@ -1305,7 +1302,7 @@ static int tcp_mtu_probe(struct sock *sk)
                        /* We've eaten all the data from this skb.
                         * Throw it away. */
                        TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags;
-                       __skb_unlink(skb, &sk->sk_write_queue);
+                       tcp_unlink_write_queue(skb, sk);
                        sk_stream_free_skb(sk, skb);
                } else {
                        TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags &
@@ -1333,7 +1330,7 @@ static int tcp_mtu_probe(struct sock *sk)
                /* Decrement cwnd here because we are sending
                * effectively two packets. */
                tp->snd_cwnd--;
-               update_send_head(sk, tp, nskb);
+               update_send_head(sk, nskb);
 
                icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len);
                tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq;
@@ -1377,7 +1374,7 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle)
                sent_pkts = 1;
        }
 
-       while ((skb = sk->sk_send_head)) {
+       while ((skb = tcp_send_head(sk))) {
                unsigned int limit;
 
                tso_segs = tcp_init_tso_segs(sk, skb, mss_now);
@@ -1396,7 +1393,7 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle)
                                                      nonagle : TCP_NAGLE_PUSH))))
                                break;
                } else {
-                       if (tcp_tso_should_defer(sk, tp, skb))
+                       if (tcp_tso_should_defer(sk, skb))
                                break;
                }
 
@@ -1425,31 +1422,31 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle)
                /* Advance the send_head.  This one is sent out.
                 * This call will increment packets_out.
                 */
-               update_send_head(sk, tp, skb);
+               update_send_head(sk, skb);
 
                tcp_minshall_update(tp, mss_now, skb);
                sent_pkts++;
        }
 
        if (likely(sent_pkts)) {
-               tcp_cwnd_validate(sk, tp);
+               tcp_cwnd_validate(sk);
                return 0;
        }
-       return !tp->packets_out && sk->sk_send_head;
+       return !tp->packets_out && tcp_send_head(sk);
 }
 
 /* Push out any pending frames which were held back due to
  * TCP_CORK or attempt at coalescing tiny packets.
  * The socket must be locked by the caller.
  */
-void __tcp_push_pending_frames(struct sock *sk, struct tcp_sock *tp,
-                              unsigned int cur_mss, int nonagle)
+void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
+                              int nonagle)
 {
-       struct sk_buff *skb = sk->sk_send_head;
+       struct sk_buff *skb = tcp_send_head(sk);
 
        if (skb) {
                if (tcp_write_xmit(sk, cur_mss, nonagle))
-                       tcp_check_probe_timer(sk, tp);
+                       tcp_check_probe_timer(sk);
        }
 }
 
@@ -1459,7 +1456,7 @@ void __tcp_push_pending_frames(struct sock *sk, struct tcp_sock *tp,
 void tcp_push_one(struct sock *sk, unsigned int mss_now)
 {
        struct tcp_sock *tp = tcp_sk(sk);
-       struct sk_buff *skb = sk->sk_send_head;
+       struct sk_buff *skb = tcp_send_head(sk);
        unsigned int tso_segs, cwnd_quota;
 
        BUG_ON(!skb || skb->len < mss_now);
@@ -1493,8 +1490,8 @@ void tcp_push_one(struct sock *sk, unsigned int mss_now)
                TCP_SKB_CB(skb)->when = tcp_time_stamp;
 
                if (likely(!tcp_transmit_skb(sk, skb, 1, sk->sk_allocation))) {
-                       update_send_head(sk, tp, skb);
-                       tcp_cwnd_validate(sk, tp);
+                       update_send_head(sk, skb);
+                       tcp_cwnd_validate(sk);
                        return;
                }
        }
@@ -1620,7 +1617,7 @@ u32 __tcp_select_window(struct sock *sk)
 static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, int mss_now)
 {
        struct tcp_sock *tp = tcp_sk(sk);
-       struct sk_buff *next_skb = skb->next;
+       struct sk_buff *next_skb = tcp_write_queue_next(sk, skb);
 
        /* The first test we must make is that neither of these two
         * SKB's are still referenced by someone else.
@@ -1630,7 +1627,7 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, int m
                u16 flags = TCP_SKB_CB(skb)->flags;
 
                /* Also punt if next skb has been SACK'd. */
-               if(TCP_SKB_CB(next_skb)->sacked & TCPCB_SACKED_ACKED)
+               if (TCP_SKB_CB(next_skb)->sacked & TCPCB_SACKED_ACKED)
                        return;
 
                /* Next skb is out of window. */
@@ -1652,9 +1649,11 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, int m
                clear_all_retrans_hints(tp);
 
                /* Ok.  We will be able to collapse the packet. */
-               __skb_unlink(next_skb, &sk->sk_write_queue);
+               tcp_unlink_write_queue(next_skb, sk);
 
-               memcpy(skb_put(skb, next_skb_size), next_skb->data, next_skb_size);
+               skb_copy_from_linear_data(next_skb,
+                                         skb_put(skb, next_skb_size),
+                                         next_skb_size);
 
                if (next_skb->ip_summed == CHECKSUM_PARTIAL)
                        skb->ip_summed = CHECKSUM_PARTIAL;
@@ -1706,7 +1705,9 @@ void tcp_simple_retransmit(struct sock *sk)
        unsigned int mss = tcp_current_mss(sk, 0);
        int lost = 0;
 
-       sk_stream_for_retrans_queue(skb, sk) {
+       tcp_for_write_queue(skb, sk) {
+               if (skb == tcp_send_head(sk))
+                       break;
                if (skb->len > mss &&
                    !(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED)) {
                        if (TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_RETRANS) {
@@ -1788,13 +1789,13 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
        }
 
        /* Collapse two adjacent packets if worthwhile and we can. */
-       if(!(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_SYN) &&
-          (skb->len < (cur_mss >> 1)) &&
-          (skb->next != sk->sk_send_head) &&
-          (skb->next != (struct sk_buff *)&sk->sk_write_queue) &&
-          (skb_shinfo(skb)->nr_frags == 0 && skb_shinfo(skb->next)->nr_frags == 0) &&
-          (tcp_skb_pcount(skb) == 1 && tcp_skb_pcount(skb->next) == 1) &&
-          (sysctl_tcp_retrans_collapse != 0))
+       if (!(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_SYN) &&
+           (skb->len < (cur_mss >> 1)) &&
+           (tcp_write_queue_next(sk, skb) != tcp_send_head(sk)) &&
+           (!tcp_skb_is_last(sk, skb)) &&
+           (skb_shinfo(skb)->nr_frags == 0 && skb_shinfo(tcp_write_queue_next(sk, skb))->nr_frags == 0) &&
+           (tcp_skb_pcount(skb) == 1 && tcp_skb_pcount(tcp_write_queue_next(sk, skb)) == 1) &&
+           (sysctl_tcp_retrans_collapse != 0))
                tcp_retrans_try_collapse(sk, skb, cur_mss);
 
        if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
@@ -1804,9 +1805,9 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
         * retransmit when old data is attached.  So strip it off
         * since it is cheap to do so and saves bytes on the network.
         */
-       if(skb->len > 0 &&
-          (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) &&
-          tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) {
+       if (skb->len > 0 &&
+           (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) &&
+           tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) {
                if (!pskb_trim(skb, 0)) {
                        TCP_SKB_CB(skb)->seq = TCP_SKB_CB(skb)->end_seq - 1;
                        skb_shinfo(skb)->gso_segs = 1;
@@ -1872,15 +1873,17 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
                skb = tp->retransmit_skb_hint;
                packet_cnt = tp->retransmit_cnt_hint;
        }else{
-               skb = sk->sk_write_queue.next;
+               skb = tcp_write_queue_head(sk);
                packet_cnt = 0;
        }
 
        /* First pass: retransmit lost packets. */
        if (tp->lost_out) {
-               sk_stream_for_retrans_queue_from(skb, sk) {
+               tcp_for_write_queue_from(skb, sk) {
                        __u8 sacked = TCP_SKB_CB(skb)->sacked;
 
+                       if (skb == tcp_send_head(sk))
+                               break;
                        /* we could do better than to assign each time */
                        tp->retransmit_skb_hint = skb;
                        tp->retransmit_cnt_hint = packet_cnt;
@@ -1906,8 +1909,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
                                        else
                                                NET_INC_STATS_BH(LINUX_MIB_TCPSLOWSTARTRETRANS);
 
-                                       if (skb ==
-                                           skb_peek(&sk->sk_write_queue))
+                                       if (skb == tcp_write_queue_head(sk))
                                                inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
                                                                          inet_csk(sk)->icsk_rto,
                                                                          TCP_RTO_MAX);
@@ -1937,18 +1939,20 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
         * segments to send.
         */
 
-       if (tcp_may_send_now(sk, tp))
+       if (tcp_may_send_now(sk))
                return;
 
        if (tp->forward_skb_hint) {
                skb = tp->forward_skb_hint;
                packet_cnt = tp->forward_cnt_hint;
        } else{
-               skb = sk->sk_write_queue.next;
+               skb = tcp_write_queue_head(sk);
                packet_cnt = 0;
        }
 
-       sk_stream_for_retrans_queue_from(skb, sk) {
+       tcp_for_write_queue_from(skb, sk) {
+               if (skb == tcp_send_head(sk))
+                       break;
                tp->forward_cnt_hint = packet_cnt;
                tp->forward_skb_hint = skb;
 
@@ -1973,7 +1977,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
                        break;
                }
 
-               if (skb == skb_peek(&sk->sk_write_queue))
+               if (skb == tcp_write_queue_head(sk))
                        inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
                                                  inet_csk(sk)->icsk_rto,
                                                  TCP_RTO_MAX);
@@ -1989,7 +1993,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
 void tcp_send_fin(struct sock *sk)
 {
        struct tcp_sock *tp = tcp_sk(sk);
-       struct sk_buff *skb = skb_peek_tail(&sk->sk_write_queue);
+       struct sk_buff *skb = tcp_write_queue_tail(sk);
        int mss_now;
 
        /* Optimization, tack on the FIN if we have a queue of
@@ -1998,7 +2002,7 @@ void tcp_send_fin(struct sock *sk)
         */
        mss_now = tcp_current_mss(sk, 1);
 
-       if (sk->sk_send_head != NULL) {
+       if (tcp_send_head(sk) != NULL) {
                TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_FIN;
                TCP_SKB_CB(skb)->end_seq++;
                tp->write_seq++;
@@ -2025,7 +2029,7 @@ void tcp_send_fin(struct sock *sk)
                TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + 1;
                tcp_queue_skb(sk, skb);
        }
-       __tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_OFF);
+       __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF);
 }
 
 /* We get here when a process closes a file descriptor (either due to
@@ -2035,7 +2039,6 @@ void tcp_send_fin(struct sock *sk)
  */
 void tcp_send_active_reset(struct sock *sk, gfp_t priority)
 {
-       struct tcp_sock *tp = tcp_sk(sk);
        struct sk_buff *skb;
 
        /* NOTE: No TCP options attached and we never retransmit this. */
@@ -2055,7 +2058,7 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority)
        skb_shinfo(skb)->gso_type = 0;
 
        /* Send it off. */
-       TCP_SKB_CB(skb)->seq = tcp_acceptable_seq(sk, tp);
+       TCP_SKB_CB(skb)->seq = tcp_acceptable_seq(sk);
        TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq;
        TCP_SKB_CB(skb)->when = tcp_time_stamp;
        if (tcp_transmit_skb(sk, skb, 0, priority))
@@ -2071,7 +2074,7 @@ int tcp_send_synack(struct sock *sk)
 {
        struct sk_buff* skb;
 
-       skb = skb_peek(&sk->sk_write_queue);
+       skb = tcp_write_queue_head(sk);
        if (skb == NULL || !(TCP_SKB_CB(skb)->flags&TCPCB_FLAG_SYN)) {
                printk(KERN_DEBUG "tcp_send_synack: wrong queue state\n");
                return -EFAULT;
@@ -2081,9 +2084,9 @@ int tcp_send_synack(struct sock *sk)
                        struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC);
                        if (nskb == NULL)
                                return -ENOMEM;
-                       __skb_unlink(skb, &sk->sk_write_queue);
+                       tcp_unlink_write_queue(skb, sk);
                        skb_header_release(nskb);
-                       __skb_queue_head(&sk->sk_write_queue, nskb);
+                       __tcp_add_write_queue_head(sk, nskb);
                        sk_stream_free_skb(sk, skb);
                        sk_charge_skb(sk, nskb);
                        skb = nskb;
@@ -2133,8 +2136,10 @@ struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst,
        if (md5)
                tcp_header_size += TCPOLEN_MD5SIG_ALIGNED;
 #endif
-       skb->h.th = th = (struct tcphdr *) skb_push(skb, tcp_header_size);
+       skb_push(skb, tcp_header_size);
+       skb_reset_transport_header(skb);
 
+       th = tcp_hdr(skb);
        memset(th, 0, sizeof(struct tcphdr));
        th->syn = 1;
        th->ack = 1;
@@ -2188,7 +2193,7 @@ struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst,
                tp->af_specific->calc_md5_hash(md5_hash_location,
                                               md5,
                                               NULL, dst, req,
-                                              skb->h.th, sk->sk_protocol,
+                                              tcp_hdr(skb), sk->sk_protocol,
                                               skb->len);
        }
 #endif
@@ -2271,7 +2276,7 @@ int tcp_connect(struct sock *sk)
        skb_reserve(buff, MAX_TCP_HEADER);
 
        TCP_SKB_CB(buff)->flags = TCPCB_FLAG_SYN;
-       TCP_ECN_send_syn(sk, tp, buff);
+       TCP_ECN_send_syn(sk, buff);
        TCP_SKB_CB(buff)->sacked = 0;
        skb_shinfo(buff)->gso_segs = 1;
        skb_shinfo(buff)->gso_size = 0;
@@ -2285,7 +2290,7 @@ int tcp_connect(struct sock *sk)
        TCP_SKB_CB(buff)->when = tcp_time_stamp;
        tp->retrans_stamp = TCP_SKB_CB(buff)->when;
        skb_header_release(buff);
-       __skb_queue_tail(&sk->sk_write_queue, buff);
+       __tcp_add_write_queue_tail(sk, buff);
        sk_charge_skb(sk, buff);
        tp->packets_out += tcp_skb_pcount(buff);
        tcp_transmit_skb(sk, buff, 1, GFP_KERNEL);
@@ -2363,7 +2368,6 @@ void tcp_send_ack(struct sock *sk)
 {
        /* If we have been reset, we may not send again. */
        if (sk->sk_state != TCP_CLOSE) {
-               struct tcp_sock *tp = tcp_sk(sk);
                struct sk_buff *buff;
 
                /* We are not putting this on the write queue, so
@@ -2389,7 +2393,7 @@ void tcp_send_ack(struct sock *sk)
                skb_shinfo(buff)->gso_type = 0;
 
                /* Send it off, this clears delayed acks for us. */
-               TCP_SKB_CB(buff)->seq = TCP_SKB_CB(buff)->end_seq = tcp_acceptable_seq(sk, tp);
+               TCP_SKB_CB(buff)->seq = TCP_SKB_CB(buff)->end_seq = tcp_acceptable_seq(sk);
                TCP_SKB_CB(buff)->when = tcp_time_stamp;
                tcp_transmit_skb(sk, buff, 0, GFP_ATOMIC);
        }
@@ -2441,7 +2445,7 @@ int tcp_write_wakeup(struct sock *sk)
                struct tcp_sock *tp = tcp_sk(sk);
                struct sk_buff *skb;
 
-               if ((skb = sk->sk_send_head) != NULL &&
+               if ((skb = tcp_send_head(sk)) != NULL &&
                    before(TCP_SKB_CB(skb)->seq, tp->snd_una+tp->snd_wnd)) {
                        int err;
                        unsigned int mss = tcp_current_mss(sk, 0);
@@ -2467,7 +2471,7 @@ int tcp_write_wakeup(struct sock *sk)
                        TCP_SKB_CB(skb)->when = tcp_time_stamp;
                        err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
                        if (!err) {
-                               update_send_head(sk, tp, skb);
+                               update_send_head(sk, skb);
                        }
                        return err;
                } else {
@@ -2491,7 +2495,7 @@ void tcp_send_probe0(struct sock *sk)
 
        err = tcp_write_wakeup(sk);
 
-       if (tp->packets_out || !sk->sk_send_head) {
+       if (tp->packets_out || !tcp_send_head(sk)) {
                /* Cancel probe timer, if it is not required. */
                icsk->icsk_probes_out = 0;
                icsk->icsk_backoff = 0;
index 61f406f..3938d5d 100644 (file)
@@ -26,6 +26,8 @@
 #include <linux/proc_fs.h>
 #include <linux/module.h>
 #include <linux/kfifo.h>
+#include <linux/ktime.h>
+#include <linux/time.h>
 #include <linux/vmalloc.h>
 
 #include <net/tcp.h>
@@ -34,43 +36,45 @@ MODULE_AUTHOR("Stephen Hemminger <shemminger@linux-foundation.org>");
 MODULE_DESCRIPTION("TCP cwnd snooper");
 MODULE_LICENSE("GPL");
 
-static int port = 0;
+static int port __read_mostly = 0;
 MODULE_PARM_DESC(port, "Port to match (0=all)");
 module_param(port, int, 0);
 
-static int bufsize = 64*1024;
+static int bufsize __read_mostly = 64*1024;
 MODULE_PARM_DESC(bufsize, "Log buffer size (default 64k)");
 module_param(bufsize, int, 0);
 
+static int full __read_mostly;
+MODULE_PARM_DESC(full, "Full log (1=every ack packet received,  0=only cwnd changes)");
+module_param(full, int, 0);
+
 static const char procname[] = "tcpprobe";
 
 struct {
-       struct kfifo  *fifo;
-       spinlock_t    lock;
+       struct kfifo    *fifo;
+       spinlock_t      lock;
        wait_queue_head_t wait;
-       struct timeval tstart;
+       ktime_t         start;
+       u32             lastcwnd;
 } tcpw;
 
+/*
+ * Print to log with timestamps.
+ * FIXME: causes an extra copy
+ */
 static void printl(const char *fmt, ...)
 {
        va_list args;
        int len;
-       struct timeval now;
+       struct timespec tv;
        char tbuf[256];
 
        va_start(args, fmt);
-       do_gettimeofday(&now);
+       /* want monotonic time since start of tcp_probe */
+       tv = ktime_to_timespec(ktime_sub(ktime_get(), tcpw.start));
 
-       now.tv_sec -= tcpw.tstart.tv_sec;
-       now.tv_usec -= tcpw.tstart.tv_usec;
-       if (now.tv_usec < 0) {
-               --now.tv_sec;
-               now.tv_usec += 1000000;
-       }
-
-       len = sprintf(tbuf, "%lu.%06lu ",
-                     (unsigned long) now.tv_sec,
-                     (unsigned long) now.tv_usec);
+       len = sprintf(tbuf, "%lu.%09lu ",
+                     (unsigned long) tv.tv_sec, (unsigned long) tv.tv_nsec);
        len += vscnprintf(tbuf+len, sizeof(tbuf)-len, fmt, args);
        va_end(args);
 
@@ -78,38 +82,44 @@ static void printl(const char *fmt, ...)
        wake_up(&tcpw.wait);
 }
 
-static int jtcp_sendmsg(struct kiocb *iocb, struct sock *sk,
-                       struct msghdr *msg, size_t size)
+/*
+ * Hook inserted to be called before each receive packet.
+ * Note: arguments must match tcp_rcv_established()!
+ */
+static int jtcp_rcv_established(struct sock *sk, struct sk_buff *skb,
+                              struct tcphdr *th, unsigned len)
 {
        const struct tcp_sock *tp = tcp_sk(sk);
        const struct inet_sock *inet = inet_sk(sk);
 
-       if (port == 0 || ntohs(inet->dport) == port ||
-           ntohs(inet->sport) == port) {
+       /* Only update if port matches */
+       if ((port == 0 || ntohs(inet->dport) == port || ntohs(inet->sport) == port)
+           && (full || tp->snd_cwnd != tcpw.lastcwnd)) {
                printl("%d.%d.%d.%d:%u %d.%d.%d.%d:%u %d %#x %#x %u %u %u\n",
                       NIPQUAD(inet->saddr), ntohs(inet->sport),
                       NIPQUAD(inet->daddr), ntohs(inet->dport),
-                      size, tp->snd_nxt, tp->snd_una,
+                      skb->len, tp->snd_nxt, tp->snd_una,
                       tp->snd_cwnd, tcp_current_ssthresh(sk),
-                      tp->snd_wnd);
+                      tp->snd_wnd, tp->srtt >> 3);
+               tcpw.lastcwnd = tp->snd_cwnd;
        }
 
        jprobe_return();
        return 0;
 }
 
-static struct jprobe tcp_send_probe = {
+static struct jprobe tcp_probe = {
        .kp = {
-               .symbol_name    = "tcp_sendmsg",
+               .symbol_name    = "tcp_rcv_established",
        },
-       .entry  = JPROBE_ENTRY(jtcp_sendmsg),
+       .entry  = JPROBE_ENTRY(jtcp_rcv_established),
 };
 
 
 static int tcpprobe_open(struct inode * inode, struct file * file)
 {
        kfifo_reset(tcpw.fifo);
-       do_gettimeofday(&tcpw.tstart);
+       tcpw.start = ktime_get();
        return 0;
 }
 
@@ -162,7 +172,7 @@ static __init int tcpprobe_init(void)
        if (!proc_net_fops_create(procname, S_IRUSR, &tcpprobe_fops))
                goto err0;
 
-       ret = register_jprobe(&tcp_send_probe);
+       ret = register_jprobe(&tcp_probe);
        if (ret)
                goto err1;
 
@@ -180,7 +190,7 @@ static __exit void tcpprobe_exit(void)
 {
        kfifo_free(tcpw.fifo);
        proc_net_remove(procname);
-       unregister_jprobe(&tcp_send_probe);
+       unregister_jprobe(&tcp_probe);
 
 }
 module_exit(tcpprobe_exit);
index a9243cf..2ca97b2 100644 (file)
@@ -233,7 +233,7 @@ static void tcp_probe_timer(struct sock *sk)
        struct tcp_sock *tp = tcp_sk(sk);
        int max_probes;
 
-       if (tp->packets_out || !sk->sk_send_head) {
+       if (tp->packets_out || !tcp_send_head(sk)) {
                icsk->icsk_probes_out = 0;
                return;
        }
@@ -284,7 +284,7 @@ static void tcp_retransmit_timer(struct sock *sk)
        if (!tp->packets_out)
                goto out;
 
-       BUG_TRAP(!skb_queue_empty(&sk->sk_write_queue));
+       BUG_TRAP(!tcp_write_queue_empty(sk));
 
        if (!tp->snd_wnd && !sock_flag(sk, SOCK_DEAD) &&
            !((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))) {
@@ -306,7 +306,7 @@ static void tcp_retransmit_timer(struct sock *sk)
                        goto out;
                }
                tcp_enter_loss(sk, 0);
-               tcp_retransmit_skb(sk, skb_peek(&sk->sk_write_queue));
+               tcp_retransmit_skb(sk, tcp_write_queue_head(sk));
                __sk_dst_reset(sk);
                goto out_reset_timer;
        }
@@ -341,7 +341,7 @@ static void tcp_retransmit_timer(struct sock *sk)
                tcp_enter_loss(sk, 0);
        }
 
-       if (tcp_retransmit_skb(sk, skb_peek(&sk->sk_write_queue)) > 0) {
+       if (tcp_retransmit_skb(sk, tcp_write_queue_head(sk)) > 0) {
                /* Retransmission failed because of local congestion,
                 * do not backoff.
                 */
@@ -482,7 +482,7 @@ static void tcp_keepalive_timer (unsigned long data)
        elapsed = keepalive_time_when(tp);
 
        /* It is alive without keepalive 8) */
-       if (tp->packets_out || sk->sk_send_head)
+       if (tp->packets_out || tcp_send_head(sk))
                goto resched;
 
        elapsed = tcp_time_stamp - tp->rcv_tstamp;
index 5c484dc..73e19cf 100644 (file)
@@ -38,6 +38,8 @@
 
 #include <net/tcp.h>
 
+#include "tcp_vegas.h"
+
 /* Default values of the Vegas variables, in fixed-point representation
  * with V_PARAM_SHIFT bits to the right of the binary point.
  */
@@ -54,17 +56,6 @@ module_param(gamma, int, 0644);
 MODULE_PARM_DESC(gamma, "limit on increase (scale by 2)");
 
 
-/* Vegas variables */
-struct vegas {
-       u32     beg_snd_nxt;    /* right edge during last RTT */
-       u32     beg_snd_una;    /* left edge  during last RTT */
-       u32     beg_snd_cwnd;   /* saves the size of the cwnd */
-       u8      doing_vegas_now;/* if true, do vegas for this RTT */
-       u16     cntRTT;         /* # of RTTs measured within last RTT */
-       u32     minRTT;         /* min of RTTs measured within last RTT (in usec) */
-       u32     baseRTT;        /* the min of all Vegas RTT measurements seen (in usec) */
-};
-
 /* There are several situations when we must "re-start" Vegas:
  *
  *  o when a connection is established
@@ -81,7 +72,7 @@ struct vegas {
  * Instead we must wait until the completion of an RTT during
  * which we actually receive ACKs.
  */
-static inline void vegas_enable(struct sock *sk)
+static void vegas_enable(struct sock *sk)
 {
        const struct tcp_sock *tp = tcp_sk(sk);
        struct vegas *vegas = inet_csk_ca(sk);
@@ -104,13 +95,14 @@ static inline void vegas_disable(struct sock *sk)
        vegas->doing_vegas_now = 0;
 }
 
-static void tcp_vegas_init(struct sock *sk)
+void tcp_vegas_init(struct sock *sk)
 {
        struct vegas *vegas = inet_csk_ca(sk);
 
        vegas->baseRTT = 0x7fffffff;
        vegas_enable(sk);
 }
+EXPORT_SYMBOL_GPL(tcp_vegas_init);
 
 /* Do RTT sampling needed for Vegas.
  * Basically we:
@@ -120,10 +112,13 @@ static void tcp_vegas_init(struct sock *sk)
  *   o min-filter RTT samples from a much longer window (forever for now)
  *     to find the propagation delay (baseRTT)
  */
-static void tcp_vegas_rtt_calc(struct sock *sk, u32 usrtt)
+void tcp_vegas_pkts_acked(struct sock *sk, u32 cnt, ktime_t last)
 {
        struct vegas *vegas = inet_csk_ca(sk);
-       u32 vrtt = usrtt + 1; /* Never allow zero rtt or baseRTT */
+       u32 vrtt;
+
+       /* Never allow zero rtt or baseRTT */
+       vrtt = ktime_to_us(net_timedelta(last)) + 1;
 
        /* Filter to find propagation delay: */
        if (vrtt < vegas->baseRTT)
@@ -135,8 +130,9 @@ static void tcp_vegas_rtt_calc(struct sock *sk, u32 usrtt)
        vegas->minRTT = min(vegas->minRTT, vrtt);
        vegas->cntRTT++;
 }
+EXPORT_SYMBOL_GPL(tcp_vegas_pkts_acked);
 
-static void tcp_vegas_state(struct sock *sk, u8 ca_state)
+void tcp_vegas_state(struct sock *sk, u8 ca_state)
 {
 
        if (ca_state == TCP_CA_Open)
@@ -144,6 +140,7 @@ static void tcp_vegas_state(struct sock *sk, u8 ca_state)
        else
                vegas_disable(sk);
 }
+EXPORT_SYMBOL_GPL(tcp_vegas_state);
 
 /*
  * If the connection is idle and we are restarting,
@@ -154,12 +151,13 @@ static void tcp_vegas_state(struct sock *sk, u8 ca_state)
  * packets, _then_ we can make Vegas calculations
  * again.
  */
-static void tcp_vegas_cwnd_event(struct sock *sk, enum tcp_ca_event event)
+void tcp_vegas_cwnd_event(struct sock *sk, enum tcp_ca_event event)
 {
        if (event == CA_EVENT_CWND_RESTART ||
            event == CA_EVENT_TX_START)
                tcp_vegas_init(sk);
 }
+EXPORT_SYMBOL_GPL(tcp_vegas_cwnd_event);
 
 static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack,
                                 u32 seq_rtt, u32 in_flight, int flag)
@@ -336,30 +334,29 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack,
 }
 
 /* Extract info for Tcp socket info provided via netlink. */
-static void tcp_vegas_get_info(struct sock *sk, u32 ext,
-                              struct sk_buff *skb)
+void tcp_vegas_get_info(struct sock *sk, u32 ext, struct sk_buff *skb)
 {
        const struct vegas *ca = inet_csk_ca(sk);
        if (ext & (1 << (INET_DIAG_VEGASINFO - 1))) {
-               struct tcpvegas_info *info;
-
-               info = RTA_DATA(__RTA_PUT(skb, INET_DIAG_VEGASINFO,
-                                         sizeof(*info)));
-
-               info->tcpv_enabled = ca->doing_vegas_now;
-               info->tcpv_rttcnt = ca->cntRTT;
-               info->tcpv_rtt = ca->baseRTT;
-               info->tcpv_minrtt = ca->minRTT;
-       rtattr_failure: ;
+               struct tcpvegas_info info = {
+                       .tcpv_enabled = ca->doing_vegas_now,
+                       .tcpv_rttcnt = ca->cntRTT,
+                       .tcpv_rtt = ca->baseRTT,
+                       .tcpv_minrtt = ca->minRTT,
+               };
+
+               nla_put(skb, INET_DIAG_VEGASINFO, sizeof(info), &info);
        }
 }
+EXPORT_SYMBOL_GPL(tcp_vegas_get_info);
 
 static struct tcp_congestion_ops tcp_vegas = {
+       .flags          = TCP_CONG_RTT_STAMP,
        .init           = tcp_vegas_init,
        .ssthresh       = tcp_reno_ssthresh,
        .cong_avoid     = tcp_vegas_cong_avoid,
        .min_cwnd       = tcp_reno_min_cwnd,
-       .rtt_sample     = tcp_vegas_rtt_calc,
+       .pkts_acked     = tcp_vegas_pkts_acked,
        .set_state      = tcp_vegas_state,
        .cwnd_event     = tcp_vegas_cwnd_event,
        .get_info       = tcp_vegas_get_info,
diff --git a/net/ipv4/tcp_vegas.h b/net/ipv4/tcp_vegas.h
new file mode 100644 (file)
index 0000000..502fa81
--- /dev/null
@@ -0,0 +1,24 @@
+/*
+ * TCP Vegas congestion control interface
+ */
+#ifndef __TCP_VEGAS_H
+#define __TCP_VEGAS_H 1
+
+/* Vegas variables */
+struct vegas {
+       u32     beg_snd_nxt;    /* right edge during last RTT */
+       u32     beg_snd_una;    /* left edge  during last RTT */
+       u32     beg_snd_cwnd;   /* saves the size of the cwnd */
+       u8      doing_vegas_now;/* if true, do vegas for this RTT */
+       u16     cntRTT;         /* # of RTTs measured within last RTT */
+       u32     minRTT;         /* min of RTTs measured within last RTT (in usec) */
+       u32     baseRTT;        /* the min of all Vegas RTT measurements seen (in usec) */
+};
+
+extern void tcp_vegas_init(struct sock *sk);
+extern void tcp_vegas_state(struct sock *sk, u8 ca_state);
+extern void tcp_vegas_pkts_acked(struct sock *sk, u32 cnt, ktime_t last);
+extern void tcp_vegas_cwnd_event(struct sock *sk, enum tcp_ca_event event);
+extern void tcp_vegas_get_info(struct sock *sk, u32 ext, struct sk_buff *skb);
+
+#endif /* __TCP_VEGAS_H */
index ce57bf3..9edb340 100644 (file)
@@ -69,10 +69,13 @@ static void tcp_veno_init(struct sock *sk)
 }
 
 /* Do rtt sampling needed for Veno. */
-static void tcp_veno_rtt_calc(struct sock *sk, u32 usrtt)
+static void tcp_veno_pkts_acked(struct sock *sk, u32 cnt, ktime_t last)
 {
        struct veno *veno = inet_csk_ca(sk);
-       u32 vrtt = usrtt + 1;   /* Never allow zero rtt or basertt */
+       u32 vrtt;
+
+       /* Never allow zero rtt or baseRTT */
+       vrtt = ktime_to_us(net_timedelta(last)) + 1;
 
        /* Filter to find propagation delay: */
        if (vrtt < veno->basertt)
@@ -199,10 +202,11 @@ static u32 tcp_veno_ssthresh(struct sock *sk)
 }
 
 static struct tcp_congestion_ops tcp_veno = {
+       .flags          = TCP_CONG_RTT_STAMP,
        .init           = tcp_veno_init,
        .ssthresh       = tcp_veno_ssthresh,
        .cong_avoid     = tcp_veno_cong_avoid,
-       .rtt_sample     = tcp_veno_rtt_calc,
+       .pkts_acked     = tcp_veno_pkts_acked,
        .set_state      = tcp_veno_state,
        .cwnd_event     = tcp_veno_cwnd_event,
 
index 4e1b610..e61e09d 100644 (file)
@@ -100,7 +100,7 @@ static void westwood_filter(struct westwood *w, u32 delta)
  * Called after processing group of packets.
  * but all westwood needs is the last sample of srtt.
  */
-static void tcp_westwood_pkts_acked(struct sock *sk, u32 cnt)
+static void tcp_westwood_pkts_acked(struct sock *sk, u32 cnt, ktime_t last)
 {
        struct westwood *w = inet_csk_ca(sk);
        if (cnt > 0)
@@ -226,7 +226,7 @@ static void tcp_westwood_event(struct sock *sk, enum tcp_ca_event event)
        struct tcp_sock *tp = tcp_sk(sk);
        struct westwood *w = inet_csk_ca(sk);
 
-       switch(event) {
+       switch (event) {
        case CA_EVENT_FAST_ACK:
                westwood_fast_bw(sk);
                break;
@@ -260,16 +260,13 @@ static void tcp_westwood_info(struct sock *sk, u32 ext,
 {
        const struct westwood *ca = inet_csk_ca(sk);
        if (ext & (1 << (INET_DIAG_VEGASINFO - 1))) {
-               struct rtattr *rta;
-               struct tcpvegas_info *info;
-
-               rta = __RTA_PUT(skb, INET_DIAG_VEGASINFO, sizeof(*info));
-               info = RTA_DATA(rta);
-               info->tcpv_enabled = 1;
-               info->tcpv_rttcnt = 0;
-               info->tcpv_rtt = jiffies_to_usecs(ca->rtt);
-               info->tcpv_minrtt = jiffies_to_usecs(ca->rtt_min);
-       rtattr_failure: ;
+               struct tcpvegas_info info = {
+                       .tcpv_enabled = 1,
+                       .tcpv_rtt = jiffies_to_usecs(ca->rtt),
+                       .tcpv_minrtt = jiffies_to_usecs(ca->rtt_min),
+               };
+
+               nla_put(skb, INET_DIAG_VEGASINFO, sizeof(info), &info);
        }
 }
 
diff --git a/net/ipv4/tcp_yeah.c b/net/ipv4/tcp_yeah.c
new file mode 100644 (file)
index 0000000..545ed23
--- /dev/null
@@ -0,0 +1,268 @@
+/*
+ *
+ *   YeAH TCP
+ *
+ * For further details look at:
+ *    http://wil.cs.caltech.edu/pfldnet2007/paper/YeAH_TCP.pdf
+ *
+ */
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/inet_diag.h>
+
+#include <net/tcp.h>
+
+#include "tcp_vegas.h"
+
+#define TCP_YEAH_ALPHA       80 //lin number of packets queued at the bottleneck
+#define TCP_YEAH_GAMMA        1 //lin fraction of queue to be removed per rtt
+#define TCP_YEAH_DELTA        3 //log minimum fraction of cwnd to be removed on loss
+#define TCP_YEAH_EPSILON      1 //log maximum fraction to be removed on early decongestion
+#define TCP_YEAH_PHY          8 //lin maximum delta from base
+#define TCP_YEAH_RHO         16 //lin minumum number of consecutive rtt to consider competition on loss
+#define TCP_YEAH_ZETA        50 //lin minimum number of state switchs to reset reno_count
+
+#define TCP_SCALABLE_AI_CNT     100U
+
+/* YeAH variables */
+struct yeah {
+       struct vegas vegas;     /* must be first */
+
+       /* YeAH */
+       u32 lastQ;
+       u32 doing_reno_now;
+
+       u32 reno_count;
+       u32 fast_count;
+
+       u32 pkts_acked;
+};
+
+static void tcp_yeah_init(struct sock *sk)
+{
+       struct tcp_sock *tp = tcp_sk(sk);
+       struct yeah *yeah = inet_csk_ca(sk);
+
+       tcp_vegas_init(sk);
+
+       yeah->doing_reno_now = 0;
+       yeah->lastQ = 0;
+
+       yeah->reno_count = 2;
+
+       /* Ensure the MD arithmetic works.  This is somewhat pedantic,
+        * since I don't think we will see a cwnd this large. :) */
+       tp->snd_cwnd_clamp = min_t(u32, tp->snd_cwnd_clamp, 0xffffffff/128);
+
+}
+
+
+static void tcp_yeah_pkts_acked(struct sock *sk, u32 pkts_acked, ktime_t last)
+{
+       const struct inet_connection_sock *icsk = inet_csk(sk);
+       struct yeah *yeah = inet_csk_ca(sk);
+
+       if (icsk->icsk_ca_state == TCP_CA_Open)
+               yeah->pkts_acked = pkts_acked;
+
+       tcp_vegas_pkts_acked(sk, pkts_acked, last);
+}
+
+static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack,
+                               u32 seq_rtt, u32 in_flight, int flag)
+{
+       struct tcp_sock *tp = tcp_sk(sk);
+       struct yeah *yeah = inet_csk_ca(sk);
+
+       if (!tcp_is_cwnd_limited(sk, in_flight))
+               return;
+
+       if (tp->snd_cwnd <= tp->snd_ssthresh)
+               tcp_slow_start(tp);
+
+       else if (!yeah->doing_reno_now) {
+               /* Scalable */
+
+               tp->snd_cwnd_cnt+=yeah->pkts_acked;
+               if (tp->snd_cwnd_cnt > min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT)){
+                       if (tp->snd_cwnd < tp->snd_cwnd_clamp)
+                               tp->snd_cwnd++;
+                       tp->snd_cwnd_cnt = 0;
+               }
+
+               yeah->pkts_acked = 1;
+
+       } else {
+               /* Reno */
+
+               if (tp->snd_cwnd_cnt < tp->snd_cwnd)
+                       tp->snd_cwnd_cnt++;
+
+               if (tp->snd_cwnd_cnt >= tp->snd_cwnd) {
+                       tp->snd_cwnd++;
+                       tp->snd_cwnd_cnt = 0;
+               }
+       }
+
+       /* The key players are v_vegas.beg_snd_una and v_beg_snd_nxt.
+        *
+        * These are so named because they represent the approximate values
+        * of snd_una and snd_nxt at the beginning of the current RTT. More
+        * precisely, they represent the amount of data sent during the RTT.
+        * At the end of the RTT, when we receive an ACK for v_beg_snd_nxt,
+        * we will calculate that (v_beg_snd_nxt - v_vegas.beg_snd_una) outstanding
+        * bytes of data have been ACKed during the course of the RTT, giving
+        * an "actual" rate of:
+        *
+        *     (v_beg_snd_nxt - v_vegas.beg_snd_una) / (rtt duration)
+        *
+        * Unfortunately, v_vegas.beg_snd_una is not exactly equal to snd_una,
+        * because delayed ACKs can cover more than one segment, so they
+        * don't line up yeahly with the boundaries of RTTs.
+        *
+        * Another unfortunate fact of life is that delayed ACKs delay the
+        * advance of the left edge of our send window, so that the number
+        * of bytes we send in an RTT is often less than our cwnd will allow.
+        * So we keep track of our cwnd separately, in v_beg_snd_cwnd.
+        */
+
+       if (after(ack, yeah->vegas.beg_snd_nxt)) {
+
+               /* We do the Vegas calculations only if we got enough RTT
+                * samples that we can be reasonably sure that we got
+                * at least one RTT sample that wasn't from a delayed ACK.
+                * If we only had 2 samples total,
+                * then that means we're getting only 1 ACK per RTT, which
+                * means they're almost certainly delayed ACKs.
+                * If  we have 3 samples, we should be OK.
+                */
+
+               if (yeah->vegas.cntRTT > 2) {
+                       u32 rtt, queue;
+                       u64 bw;
+
+                       /* We have enough RTT samples, so, using the Vegas
+                        * algorithm, we determine if we should increase or
+                        * decrease cwnd, and by how much.
+                        */
+
+                       /* Pluck out the RTT we are using for the Vegas
+                        * calculations. This is the min RTT seen during the
+                        * last RTT. Taking the min filters out the effects
+                        * of delayed ACKs, at the cost of noticing congestion
+                        * a bit later.
+                        */
+                       rtt = yeah->vegas.minRTT;
+
+                       /* Compute excess number of packets above bandwidth
+                        * Avoid doing full 64 bit divide.
+                        */
+                       bw = tp->snd_cwnd;
+                       bw *= rtt - yeah->vegas.baseRTT;
+                       do_div(bw, rtt);
+                       queue = bw;
+
+                       if (queue > TCP_YEAH_ALPHA ||
+                           rtt - yeah->vegas.baseRTT > (yeah->vegas.baseRTT / TCP_YEAH_PHY)) {
+                               if (queue > TCP_YEAH_ALPHA
+                                   && tp->snd_cwnd > yeah->reno_count) {
+                                       u32 reduction = min(queue / TCP_YEAH_GAMMA ,
+                                                           tp->snd_cwnd >> TCP_YEAH_EPSILON);
+
+                                       tp->snd_cwnd -= reduction;
+
+                                       tp->snd_cwnd = max(tp->snd_cwnd,
+                                                          yeah->reno_count);
+
+                                       tp->snd_ssthresh = tp->snd_cwnd;
+                               }
+
+                               if (yeah->reno_count <= 2)
+                                       yeah->reno_count = max(tp->snd_cwnd>>1, 2U);
+                               else
+                                       yeah->reno_count++;
+
+                               yeah->doing_reno_now = min(yeah->doing_reno_now + 1,
+                                                          0xffffffU);
+                       } else {
+                               yeah->fast_count++;
+
+                               if (yeah->fast_count > TCP_YEAH_ZETA) {
+                                       yeah->reno_count = 2;
+                                       yeah->fast_count = 0;
+                               }
+
+                               yeah->doing_reno_now = 0;
+                       }
+
+                       yeah->lastQ = queue;
+
+               }
+
+               /* Save the extent of the current window so we can use this
+                * at the end of the next RTT.
+                */
+               yeah->vegas.beg_snd_una  = yeah->vegas.beg_snd_nxt;
+               yeah->vegas.beg_snd_nxt  = tp->snd_nxt;
+               yeah->vegas.beg_snd_cwnd = tp->snd_cwnd;
+
+               /* Wipe the slate clean for the next RTT. */
+               yeah->vegas.cntRTT = 0;
+               yeah->vegas.minRTT = 0x7fffffff;
+       }
+}
+
+static u32 tcp_yeah_ssthresh(struct sock *sk) {
+       const struct tcp_sock *tp = tcp_sk(sk);
+       struct yeah *yeah = inet_csk_ca(sk);
+       u32 reduction;
+
+       if (yeah->doing_reno_now < TCP_YEAH_RHO) {
+               reduction = yeah->lastQ;
+
+               reduction = min( reduction, max(tp->snd_cwnd>>1, 2U) );
+
+               reduction = max( reduction, tp->snd_cwnd >> TCP_YEAH_DELTA);
+       } else
+               reduction = max(tp->snd_cwnd>>1,2U);
+
+       yeah->fast_count = 0;
+       yeah->reno_count = max(yeah->reno_count>>1, 2U);
+
+       return tp->snd_cwnd - reduction;
+}
+
+static struct tcp_congestion_ops tcp_yeah = {
+       .flags          = TCP_CONG_RTT_STAMP,
+       .init           = tcp_yeah_init,
+       .ssthresh       = tcp_yeah_ssthresh,
+       .cong_avoid     = tcp_yeah_cong_avoid,
+       .min_cwnd       = tcp_reno_min_cwnd,
+       .set_state      = tcp_vegas_state,
+       .cwnd_event     = tcp_vegas_cwnd_event,
+       .get_info       = tcp_vegas_get_info,
+       .pkts_acked     = tcp_yeah_pkts_acked,
+
+       .owner          = THIS_MODULE,
+       .name           = "yeah",
+};
+
+static int __init tcp_yeah_register(void)
+{
+       BUG_ON(sizeof(struct yeah) > ICSK_CA_PRIV_SIZE);
+       tcp_register_congestion_control(&tcp_yeah);
+       return 0;
+}
+
+static void __exit tcp_yeah_unregister(void)
+{
+       tcp_unregister_congestion_control(&tcp_yeah);
+}
+
+module_init(tcp_yeah_register);
+module_exit(tcp_yeah_unregister);
+
+MODULE_AUTHOR("Angelo P. Castellani");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("YeAH TCP");
diff --git a/net/ipv4/tcp_yeah.h b/net/ipv4/tcp_yeah.h
new file mode 100644 (file)
index 0000000..ed3b719
--- /dev/null
@@ -0,0 +1,7 @@
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/inet_diag.h>
+#include <asm/div64.h>
+
+#include <net/tcp.h>
index fc620a7..cec0f2c 100644 (file)
@@ -175,7 +175,8 @@ int __udp_lib_get_port(struct sock *sk, unsigned short snum,
                        ;
                }
                result = best;
-               for(i = 0; i < (1 << 16) / UDP_HTABLE_SIZE; i++, result += UDP_HTABLE_SIZE) {
+               for (i = 0; i < (1 << 16) / UDP_HTABLE_SIZE;
+                    i++, result += UDP_HTABLE_SIZE) {
                        if (result > sysctl_local_port_range[1])
                                result = sysctl_local_port_range[0]
                                        + ((result - sysctl_local_port_range[0]) &
@@ -212,13 +213,13 @@ fail:
        return error;
 }
 
-__inline__ int udp_get_port(struct sock *sk, unsigned short snum,
+int udp_get_port(struct sock *sk, unsigned short snum,
                        int (*scmp)(const struct sock *, const struct sock *))
 {
        return  __udp_lib_get_port(sk, snum, udp_hash, &udp_port_rover, scmp);
 }
 
-inline int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2)
+int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2)
 {
        struct inet_sock *inet1 = inet_sk(sk1), *inet2 = inet_sk(sk2);
 
@@ -270,10 +271,10 @@ static struct sock *__udp4_lib_lookup(__be32 saddr, __be16 sport,
                                        continue;
                                score+=2;
                        }
-                       if(score == 9) {
+                       if (score == 9) {
                                result = sk;
                                break;
-                       } else if(score > badness) {
+                       } else if (score > badness) {
                                result = sk;
                                badness = score;
                        }
@@ -329,8 +330,8 @@ void __udp4_lib_err(struct sk_buff *skb, u32 info, struct hlist_head udptable[])
        struct inet_sock *inet;
        struct iphdr *iph = (struct iphdr*)skb->data;
        struct udphdr *uh = (struct udphdr*)(skb->data+(iph->ihl<<2));
-       int type = skb->h.icmph->type;
-       int code = skb->h.icmph->code;
+       const int type = icmp_hdr(skb)->type;
+       const int code = icmp_hdr(skb)->code;
        struct sock *sk;
        int harderr;
        int err;
@@ -390,7 +391,7 @@ out:
        sock_put(sk);
 }
 
-__inline__ void udp_err(struct sk_buff *skb, u32 info)
+void udp_err(struct sk_buff *skb, u32 info)
 {
        return __udp4_lib_err(skb, info, udp_hash);
 }
@@ -419,13 +420,14 @@ static void udp4_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb,
                                 __be32 src, __be32 dst, int len      )
 {
        unsigned int offset;
-       struct udphdr *uh = skb->h.uh;
+       struct udphdr *uh = udp_hdr(skb);
        __wsum csum = 0;
 
        if (skb_queue_len(&sk->sk_write_queue) == 1) {
                /*
                 * Only one fragment on the socket.
                 */
+               skb->csum_start = skb_transport_header(skb) - skb->head;
                skb->csum_offset = offsetof(struct udphdr, check);
                uh->check = ~csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, 0);
        } else {
@@ -434,7 +436,7 @@ static void udp4_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb,
                 * fragments on the socket so that all csums of sk_buffs
                 * should be together
                 */
-               offset = skb->h.raw - skb->data;
+               offset = skb_transport_offset(skb);
                skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
 
                skb->ip_summed = CHECKSUM_NONE;
@@ -469,7 +471,7 @@ static int udp_push_pending_frames(struct sock *sk)
        /*
         * Create a UDP header
         */
-       uh = skb->h.uh;
+       uh = udp_hdr(skb);
        uh->source = fl->fl_ip_sport;
        uh->dest = fl->fl_ip_dport;
        uh->len = htons(up->len);
@@ -765,38 +767,38 @@ out:
 
 int udp_ioctl(struct sock *sk, int cmd, unsigned long arg)
 {
-       switch(cmd)
+       switch (cmd) {
+       case SIOCOUTQ:
        {
-               case SIOCOUTQ:
-               {
-                       int amount = atomic_read(&sk->sk_wmem_alloc);
-                       return put_user(amount, (int __user *)arg);
-               }
+               int amount = atomic_read(&sk->sk_wmem_alloc);
+               return put_user(amount, (int __user *)arg);
+       }
 
-               case SIOCINQ:
-               {
-                       struct sk_buff *skb;
-                       unsigned long amount;
-
-                       amount = 0;
-                       spin_lock_bh(&sk->sk_receive_queue.lock);
-                       skb = skb_peek(&sk->sk_receive_queue);
-                       if (skb != NULL) {
-                               /*
-                                * We will only return the amount
-                                * of this packet since that is all
-                                * that will be read.
-                                */
-                               amount = skb->len - sizeof(struct udphdr);
-                       }
-                       spin_unlock_bh(&sk->sk_receive_queue.lock);
-                       return put_user(amount, (int __user *)arg);
+       case SIOCINQ:
+       {
+               struct sk_buff *skb;
+               unsigned long amount;
+
+               amount = 0;
+               spin_lock_bh(&sk->sk_receive_queue.lock);
+               skb = skb_peek(&sk->sk_receive_queue);
+               if (skb != NULL) {
+                       /*
+                        * We will only return the amount
+                        * of this packet since that is all
+                        * that will be read.
+                        */
+                       amount = skb->len - sizeof(struct udphdr);
                }
+               spin_unlock_bh(&sk->sk_receive_queue.lock);
+               return put_user(amount, (int __user *)arg);
+       }
 
-               default:
-                       return -ENOIOCTLCMD;
+       default:
+               return -ENOIOCTLCMD;
        }
-       return(0);
+
+       return 0;
 }
 
 /*
@@ -810,7 +812,9 @@ int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
        struct inet_sock *inet = inet_sk(sk);
        struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name;
        struct sk_buff *skb;
-       int copied, err, copy_only, is_udplite = IS_UDPLITE(sk);
+       unsigned int ulen, copied;
+       int err;
+       int is_udplite = IS_UDPLITE(sk);
 
        /*
         *      Check any passed addresses
@@ -826,28 +830,25 @@ try_again:
        if (!skb)
                goto out;
 
-       copied = skb->len - sizeof(struct udphdr);
-       if (copied > len) {
-               copied = len;
+       ulen = skb->len - sizeof(struct udphdr);
+       copied = len;
+       if (copied > ulen)
+               copied = ulen;
+       else if (copied < ulen)
                msg->msg_flags |= MSG_TRUNC;
-       }
 
        /*
-        *      Decide whether to checksum and/or copy data.
-        *
-        *      UDP:      checksum may have been computed in HW,
-        *                (re-)compute it if message is truncated.
-        *      UDP-Lite: always needs to checksum, no HW support.
+        * If checksum is needed at all, try to do it while copying the
+        * data.  If the data is truncated, or if we only want a partial
+        * coverage checksum (UDP-Lite), do it before the copy.
         */
-       copy_only = (skb->ip_summed==CHECKSUM_UNNECESSARY);
 
-       if (is_udplite  ||  (!copy_only  &&  msg->msg_flags&MSG_TRUNC)) {
-               if (__udp_lib_checksum_complete(skb))
+       if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) {
+               if (udp_lib_checksum_complete(skb))
                        goto csum_copy_err;
-               copy_only = 1;
        }
 
-       if (copy_only)
+       if (skb_csum_unnecessary(skb))
                err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr),
                                              msg->msg_iov, copied       );
        else {
@@ -866,8 +867,8 @@ try_again:
        if (sin)
        {
                sin->sin_family = AF_INET;
-               sin->sin_port = skb->h.uh->source;
-               sin->sin_addr.s_addr = skb->nh.iph->saddr;
+               sin->sin_port = udp_hdr(skb)->source;
+               sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
                memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
        }
        if (inet->cmsg_flags)
@@ -875,7 +876,7 @@ try_again:
 
        err = copied;
        if (flags & MSG_TRUNC)
-               err = skb->len - sizeof(struct udphdr);
+               err = ulen;
 
 out_free:
        skb_free_datagram(sk, skb);
@@ -949,7 +950,7 @@ static int udp_encap_rcv(struct sock * sk, struct sk_buff *skb)
                return 1;
 
        /* Now we can get the pointers */
-       uh = skb->h.uh;
+       uh = udp_hdr(skb);
        udpdata = (__u8 *)uh + sizeof(struct udphdr);
        udpdata32 = (__be32 *)udpdata;
 
@@ -959,7 +960,7 @@ static int udp_encap_rcv(struct sock * sk, struct sk_buff *skb)
                /* Check if this is a keepalive packet.  If so, eat it. */
                if (len == 1 && udpdata[0] == 0xff) {
                        return 0;
-               } else if (len > sizeof(struct ip_esp_hdr) && udpdata32[0] != 0 ) {
+               } else if (len > sizeof(struct ip_esp_hdr) && udpdata32[0] != 0) {
                        /* ESP Packet without Non-ESP header */
                        len = sizeof(struct udphdr);
                } else
@@ -990,7 +991,7 @@ static int udp_encap_rcv(struct sock * sk, struct sk_buff *skb)
                return 0;
 
        /* Now we can update and verify the packet length... */
-       iph = skb->nh.iph;
+       iph = ip_hdr(skb);
        iphlen = iph->ihl << 2;
        iph->tot_len = htons(ntohs(iph->tot_len) - len);
        if (skb->len < iphlen + len) {
@@ -1002,7 +1003,8 @@ static int udp_encap_rcv(struct sock * sk, struct sk_buff *skb)
         * transport header to point to ESP.  Keep UDP on the stack
         * for later.
         */
-       skb->h.raw = skb_pull(skb, len);
+       __skb_pull(skb, len);
+       skb_reset_transport_header(skb);
 
        /* modify the protocol (it's ESP!) */
        iph->protocol = IPPROTO_ESP;
@@ -1095,10 +1097,9 @@ int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
                }
        }
 
-       if (sk->sk_filter && skb->ip_summed != CHECKSUM_UNNECESSARY) {
-               if (__udp_lib_checksum_complete(skb))
+       if (sk->sk_filter) {
+               if (udp_lib_checksum_complete(skb))
                        goto drop;
-               skb->ip_summed = CHECKSUM_UNNECESSARY;
        }
 
        if ((rc = sock_queue_rcv_skb(sk,skb)) < 0) {
@@ -1143,10 +1144,10 @@ static int __udp4_lib_mcast_deliver(struct sk_buff *skb,
 
                        sknext = udp_v4_mcast_next(sk_next(sk), uh->dest, daddr,
                                                   uh->source, saddr, dif);
-                       if(sknext)
+                       if (sknext)
                                skb1 = skb_clone(skb, GFP_ATOMIC);
 
-                       if(skb1) {
+                       if (skb1) {
                                int ret = udp_queue_rcv_skb(sk, skb1);
                                if (ret > 0)
                                        /* we should probably re-process instead
@@ -1154,7 +1155,7 @@ static int __udp4_lib_mcast_deliver(struct sk_buff *skb,
                                        kfree_skb(skb1);
                        }
                        sk = sknext;
-               } while(sknext);
+               } while (sknext);
        } else
                kfree_skb(skb);
        read_unlock(&udp_hash_lock);
@@ -1166,25 +1167,37 @@ static int __udp4_lib_mcast_deliver(struct sk_buff *skb,
  * Otherwise, csum completion requires chacksumming packet body,
  * including udp header and folding it to skb->csum.
  */
-static inline void udp4_csum_init(struct sk_buff *skb, struct udphdr *uh)
+static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh,
+                                int proto)
 {
+       const struct iphdr *iph;
+       int err;
+
+       UDP_SKB_CB(skb)->partial_cov = 0;
+       UDP_SKB_CB(skb)->cscov = skb->len;
+
+       if (proto == IPPROTO_UDPLITE) {
+               err = udplite_checksum_init(skb, uh);
+               if (err)
+                       return err;
+       }
+
+       iph = ip_hdr(skb);
        if (uh->check == 0) {
                skb->ip_summed = CHECKSUM_UNNECESSARY;
        } else if (skb->ip_summed == CHECKSUM_COMPLETE) {
-              if (!csum_tcpudp_magic(skb->nh.iph->saddr, skb->nh.iph->daddr,
-                                     skb->len, IPPROTO_UDP, skb->csum       ))
+              if (!csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len,
+                                     proto, skb->csum))
                        skb->ip_summed = CHECKSUM_UNNECESSARY;
        }
-       if (skb->ip_summed != CHECKSUM_UNNECESSARY)
-               skb->csum = csum_tcpudp_nofold(skb->nh.iph->saddr,
-                                              skb->nh.iph->daddr,
-                                              skb->len, IPPROTO_UDP, 0);
+       if (!skb_csum_unnecessary(skb))
+               skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
+                                              skb->len, proto, 0);
        /* Probably, we should checksum udp header (it should be in cache
         * in any case) and data in tiny packets (< rx copybreak).
         */
 
-       /* UDP = UDP-Lite with a non-partial checksum coverage */
-       UDP_SKB_CB(skb)->partial_cov = 0;
+       return 0;
 }
 
 /*
@@ -1192,14 +1205,14 @@ static inline void udp4_csum_init(struct sk_buff *skb, struct udphdr *uh)
  */
 
 int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[],
-                  int is_udplite)
+                  int proto)
 {
        struct sock *sk;
-       struct udphdr *uh = skb->h.uh;
+       struct udphdr *uh = udp_hdr(skb);
        unsigned short ulen;
        struct rtable *rt = (struct rtable*)skb->dst;
-       __be32 saddr = skb->nh.iph->saddr;
-       __be32 daddr = skb->nh.iph->daddr;
+       __be32 saddr = ip_hdr(skb)->saddr;
+       __be32 daddr = ip_hdr(skb)->daddr;
 
        /*
         *  Validate the packet.
@@ -1211,20 +1224,17 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[],
        if (ulen > skb->len)
                goto short_packet;
 
-       if(! is_udplite ) {             /* UDP validates ulen. */
-
+       if (proto == IPPROTO_UDP) {
+               /* UDP validates ulen. */
                if (ulen < sizeof(*uh) || pskb_trim_rcsum(skb, ulen))
                        goto short_packet;
-               uh = skb->h.uh;
-
-               udp4_csum_init(skb, uh);
-
-       } else  {                       /* UDP-Lite validates cscov. */
-               if (udplite4_csum_init(skb, uh))
-                       goto csum_error;
+               uh = udp_hdr(skb);
        }
 
-       if(rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST))
+       if (udp4_csum_init(skb, uh, proto))
+               goto csum_error;
+
+       if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST))
                return __udp4_lib_mcast_deliver(skb, uh, saddr, daddr, udptable);
 
        sk = __udp4_lib_lookup(saddr, uh->source, daddr, uh->dest,
@@ -1250,7 +1260,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[],
        if (udp_lib_checksum_complete(skb))
                goto csum_error;
 
-       UDP_INC_STATS_BH(UDP_MIB_NOPORTS, is_udplite);
+       UDP_INC_STATS_BH(UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
        icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
 
        /*
@@ -1258,11 +1268,11 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[],
         * don't wanna listen.  Ignore it.
         */
        kfree_skb(skb);
-       return(0);
+       return 0;
 
 short_packet:
        LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: short packet: From %u.%u.%u.%u:%u %d/%d to %u.%u.%u.%u:%u\n",
-                      is_udplite? "-Lite" : "",
+                      proto == IPPROTO_UDPLITE ? "-Lite" : "",
                       NIPQUAD(saddr),
                       ntohs(uh->source),
                       ulen,
@@ -1277,21 +1287,21 @@ csum_error:
         * the network is concerned, anyway) as per 4.1.3.4 (MUST).
         */
        LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: bad checksum. From %d.%d.%d.%d:%d to %d.%d.%d.%d:%d ulen %d\n",
-                      is_udplite? "-Lite" : "",
+                      proto == IPPROTO_UDPLITE ? "-Lite" : "",
                       NIPQUAD(saddr),
                       ntohs(uh->source),
                       NIPQUAD(daddr),
                       ntohs(uh->dest),
                       ulen);
 drop:
-       UDP_INC_STATS_BH(UDP_MIB_INERRORS, is_udplite);
+       UDP_INC_STATS_BH(UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
        kfree_skb(skb);
-       return(0);
+       return 0;
 }
 
-__inline__ int udp_rcv(struct sk_buff *skb)
+int udp_rcv(struct sk_buff *skb)
 {
-       return __udp4_lib_rcv(skb, udp_hash, 0);
+       return __udp4_lib_rcv(skb, udp_hash, IPPROTO_UDP);
 }
 
 int udp_destroy_sock(struct sock *sk)
@@ -1313,13 +1323,13 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
        int val;
        int err = 0;
 
-       if(optlen<sizeof(int))
+       if (optlen<sizeof(int))
                return -EINVAL;
 
        if (get_user(val, (int __user *)optval))
                return -EFAULT;
 
-       switch(optname) {
+       switch (optname) {
        case UDP_CORK:
                if (val != 0) {
                        up->corkflag = 1;
@@ -1373,7 +1383,7 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
        default:
                err = -ENOPROTOOPT;
                break;
-       };
+       }
 
        return err;
 }
@@ -1404,15 +1414,15 @@ int udp_lib_getsockopt(struct sock *sk, int level, int optname,
        struct udp_sock *up = udp_sk(sk);
        int val, len;
 
-       if(get_user(len,optlen))
+       if (get_user(len,optlen))
                return -EFAULT;
 
        len = min_t(unsigned int, len, sizeof(int));
 
-       if(len < 0)
+       if (len < 0)
                return -EINVAL;
 
-       switch(optname) {
+       switch (optname) {
        case UDP_CORK:
                val = up->corkflag;
                break;
@@ -1433,11 +1443,11 @@ int udp_lib_getsockopt(struct sock *sk, int level, int optname,
 
        default:
                return -ENOPROTOOPT;
-       };
+       }
 
-       if(put_user(len, optlen))
+       if (put_user(len, optlen))
                return -EFAULT;
-       if(copy_to_user(optval, &val,len))
+       if (copy_to_user(optval, &val,len))
                return -EFAULT;
        return 0;
 }
@@ -1486,15 +1496,11 @@ unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait)
                struct sk_buff *skb;
 
                spin_lock_bh(&rcvq->lock);
-               while ((skb = skb_peek(rcvq)) != NULL) {
-                       if (udp_lib_checksum_complete(skb)) {
-                               UDP_INC_STATS_BH(UDP_MIB_INERRORS, is_lite);
-                               __skb_unlink(skb, rcvq);
-                               kfree_skb(skb);
-                       } else {
-                               skb->ip_summed = CHECKSUM_UNNECESSARY;
-                               break;
-                       }
+               while ((skb = skb_peek(rcvq)) != NULL &&
+                      udp_lib_checksum_complete(skb)) {
+                       UDP_INC_STATS_BH(UDP_MIB_INERRORS, is_lite);
+                       __skb_unlink(skb, rcvq);
+                       kfree_skb(skb);
                }
                spin_unlock_bh(&rcvq->lock);
 
@@ -1573,7 +1579,7 @@ static struct sock *udp_get_idx(struct seq_file *seq, loff_t pos)
        struct sock *sk = udp_get_first(seq);
 
        if (sk)
-               while(pos && (sk = udp_get_next(seq, sk)) != NULL)
+               while (pos && (sk = udp_get_next(seq, sk)) != NULL)
                        --pos;
        return pos ? NULL : sk;
 }
index b28fe1e..f34fd68 100644 (file)
@@ -31,7 +31,7 @@ static int udplite_v4_get_port(struct sock *sk, unsigned short snum)
 
 static int udplite_rcv(struct sk_buff *skb)
 {
-       return __udp4_lib_rcv(skb, udplite_hash, 1);
+       return __udp4_lib_rcv(skb, udplite_hash, IPPROTO_UDPLITE);
 }
 
 static void udplite_err(struct sk_buff *skb, u32 info)
index 78e80de..5ceca95 100644 (file)
@@ -28,7 +28,7 @@ static int xfrm4_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32
        switch (nexthdr) {
        case IPPROTO_IPIP:
        case IPPROTO_IPV6:
-               *spi = skb->nh.iph->saddr;
+               *spi = ip_hdr(skb)->saddr;
                *seq = 0;
                return 0;
        }
@@ -39,9 +39,9 @@ static int xfrm4_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32
 #ifdef CONFIG_NETFILTER
 static inline int xfrm4_rcv_encap_finish(struct sk_buff *skb)
 {
-       struct iphdr *iph = skb->nh.iph;
-
        if (skb->dst == NULL) {
+               const struct iphdr *iph = ip_hdr(skb);
+
                if (ip_route_input(skb, iph->daddr, iph->saddr, iph->tos,
                                   skb->dev))
                        goto drop;
@@ -55,18 +55,18 @@ drop:
 
 int xfrm4_rcv_encap(struct sk_buff *skb, __u16 encap_type)
 {
-       int err;
        __be32 spi, seq;
        struct xfrm_state *xfrm_vec[XFRM_MAX_DEPTH];
        struct xfrm_state *x;
        int xfrm_nr = 0;
        int decaps = 0;
+       int err = xfrm4_parse_spi(skb, ip_hdr(skb)->protocol, &spi, &seq);
 
-       if ((err = xfrm4_parse_spi(skb, skb->nh.iph->protocol, &spi, &seq)) != 0)
+       if (err != 0)
                goto drop;
 
        do {
-               struct iphdr *iph = skb->nh.iph;
+               const struct iphdr *iph = ip_hdr(skb);
 
                if (xfrm_nr == XFRM_MAX_DEPTH)
                        goto drop;
@@ -113,7 +113,8 @@ int xfrm4_rcv_encap(struct sk_buff *skb, __u16 encap_type)
                        break;
                }
 
-               if ((err = xfrm_parse_spi(skb, skb->nh.iph->protocol, &spi, &seq)) < 0)
+               err = xfrm_parse_spi(skb, ip_hdr(skb)->protocol, &spi, &seq);
+               if (err < 0)
                        goto drop;
        } while (!err);
 
@@ -146,15 +147,15 @@ int xfrm4_rcv_encap(struct sk_buff *skb, __u16 encap_type)
                return 0;
        } else {
 #ifdef CONFIG_NETFILTER
-               __skb_push(skb, skb->data - skb->nh.raw);
-               skb->nh.iph->tot_len = htons(skb->len);
-               ip_send_check(skb->nh.iph);
+               __skb_push(skb, skb->data - skb_network_header(skb));
+               ip_hdr(skb)->tot_len = htons(skb->len);
+               ip_send_check(ip_hdr(skb));
 
                NF_HOOK(PF_INET, NF_IP_PRE_ROUTING, skb, skb->dev, NULL,
                        xfrm4_rcv_encap_finish);
                return 0;
 #else
-               return -skb->nh.iph->protocol;
+               return -ip_hdr(skb)->protocol;
 #endif
        }
 
index f68dfd8..a73e710 100644 (file)
  */
 static int xfrm4_beet_output(struct xfrm_state *x, struct sk_buff *skb)
 {
-       struct iphdr *iph, *top_iph = NULL;
+       struct iphdr *iph, *top_iph;
        int hdrlen, optlen;
 
-       iph = skb->nh.iph;
-       skb->h.ipiph = iph;
+       iph = ip_hdr(skb);
+       skb->transport_header = skb->network_header;
 
        hdrlen = 0;
        optlen = iph->ihl * 4 - sizeof(*iph);
        if (unlikely(optlen))
                hdrlen += IPV4_BEET_PHMAXLEN - (optlen & 4);
 
-       skb->nh.raw = skb_push(skb, x->props.header_len + hdrlen);
-       top_iph = skb->nh.iph;
-       skb->h.raw += sizeof(*iph) - hdrlen;
+       skb_push(skb, x->props.header_len - IPV4_BEET_PHMAXLEN + hdrlen);
+       skb_reset_network_header(skb);
+       top_iph = ip_hdr(skb);
+       skb->transport_header += sizeof(*iph) - hdrlen;
 
        memmove(top_iph, iph, sizeof(*iph));
        if (unlikely(optlen)) {
@@ -50,9 +51,9 @@ static int xfrm4_beet_output(struct xfrm_state *x, struct sk_buff *skb)
 
                BUG_ON(optlen < 0);
 
-               ph = (struct ip_beet_phdr *)skb->h.raw;
+               ph = (struct ip_beet_phdr *)skb_transport_header(skb);
                ph->padlen = 4 - (optlen & 4);
-               ph->hdrlen = (optlen + ph->padlen + sizeof(*ph)) / 8;
+               ph->hdrlen = optlen / 8;
                ph->nexthdr = top_iph->protocol;
                if (ph->padlen)
                        memset(ph + 1, IPOPT_NOP, ph->padlen);
@@ -69,23 +70,21 @@ static int xfrm4_beet_output(struct xfrm_state *x, struct sk_buff *skb)
 
 static int xfrm4_beet_input(struct xfrm_state *x, struct sk_buff *skb)
 {
-       struct iphdr *iph = skb->nh.iph;
+       struct iphdr *iph = ip_hdr(skb);
        int phlen = 0;
        int optlen = 0;
-       __u8 ph_nexthdr = 0, protocol = 0;
+       u8 ph_nexthdr = 0;
        int err = -EINVAL;
 
-       protocol = iph->protocol;
-
        if (unlikely(iph->protocol == IPPROTO_BEETPH)) {
                struct ip_beet_phdr *ph;
 
                if (!pskb_may_pull(skb, sizeof(*ph)))
                        goto out;
-               ph = (struct ip_beet_phdr *)(skb->h.ipiph + 1);
+               ph = (struct ip_beet_phdr *)(ipip_hdr(skb) + 1);
 
                phlen = sizeof(*ph) + ph->padlen;
-               optlen = ph->hdrlen * 8 - phlen;
+               optlen = ph->hdrlen * 8 + (IPV4_BEET_PHMAXLEN - phlen);
                if (optlen < 0 || optlen & 3 || optlen > 250)
                        goto out;
 
@@ -96,22 +95,20 @@ static int xfrm4_beet_input(struct xfrm_state *x, struct sk_buff *skb)
                ph_nexthdr = ph->nexthdr;
        }
 
-       skb->nh.raw = skb->data + (phlen - sizeof(*iph));
-       memmove(skb->nh.raw, iph, sizeof(*iph));
-       skb->h.raw = skb->data + (phlen + optlen);
-       skb->data = skb->h.raw;
+       skb_set_network_header(skb, phlen - sizeof(*iph));
+       memmove(skb_network_header(skb), iph, sizeof(*iph));
+       skb_set_transport_header(skb, phlen + optlen);
+       skb->data = skb_transport_header(skb);
 
-       iph = skb->nh.iph;
+       iph = ip_hdr(skb);
        iph->ihl = (sizeof(*iph) + optlen) / 4;
        iph->tot_len = htons(skb->len + iph->ihl * 4);
        iph->daddr = x->sel.daddr.a4;
        iph->saddr = x->sel.saddr.a4;
        if (ph_nexthdr)
                iph->protocol = ph_nexthdr;
-       else
-               iph->protocol = protocol;
        iph->check = 0;
-       iph->check = ip_fast_csum(skb->nh.raw, iph->ihl);
+       iph->check = ip_fast_csum(skb_network_header(skb), iph->ihl);
        err = 0;
 out:
        return err;
index 92676b7..6010471 100644 (file)
  */
 static int xfrm4_transport_output(struct xfrm_state *x, struct sk_buff *skb)
 {
-       struct iphdr *iph;
-       int ihl;
+       struct iphdr *iph = ip_hdr(skb);
+       int ihl = iph->ihl * 4;
 
-       iph = skb->nh.iph;
-       skb->h.ipiph = iph;
-
-       ihl = iph->ihl * 4;
-       skb->h.raw += ihl;
-
-       skb->nh.raw = memmove(skb_push(skb, x->props.header_len), iph, ihl);
+       skb->transport_header = skb->network_header + ihl;
+       skb_push(skb, x->props.header_len);
+       skb_reset_network_header(skb);
+       memmove(skb_network_header(skb), iph, ihl);
        return 0;
 }
 
@@ -46,12 +43,15 @@ static int xfrm4_transport_output(struct xfrm_state *x, struct sk_buff *skb)
  */
 static int xfrm4_transport_input(struct xfrm_state *x, struct sk_buff *skb)
 {
-       int ihl = skb->data - skb->h.raw;
+       int ihl = skb->data - skb_transport_header(skb);
 
-       if (skb->h.raw != skb->nh.raw)
-               skb->nh.raw = memmove(skb->h.raw, skb->nh.raw, ihl);
-       skb->nh.iph->tot_len = htons(skb->len + ihl);
-       skb->h.raw = skb->data;
+       if (skb->transport_header != skb->network_header) {
+               memmove(skb_transport_header(skb),
+                       skb_network_header(skb), ihl);
+               skb->network_header = skb->transport_header;
+       }
+       ip_hdr(skb)->tot_len = htons(skb->len + ihl);
+       skb_reset_transport_header(skb);
        return 0;
 }
 
index ceb4376..a2f2e6a 100644 (file)
@@ -16,8 +16,8 @@
 
 static inline void ipip_ecn_decapsulate(struct sk_buff *skb)
 {
-       struct iphdr *outer_iph = skb->nh.iph;
-       struct iphdr *inner_iph = skb->h.ipiph;
+       struct iphdr *outer_iph = ip_hdr(skb);
+       struct iphdr *inner_iph = ipip_hdr(skb);
 
        if (INET_ECN_is_ce(outer_iph->tos))
                IP_ECN_set_ce(inner_iph);
@@ -26,7 +26,7 @@ static inline void ipip_ecn_decapsulate(struct sk_buff *skb)
 static inline void ipip6_ecn_decapsulate(struct iphdr *iph, struct sk_buff *skb)
 {
        if (INET_ECN_is_ce(iph->tos))
-               IP6_ECN_set_ce(skb->nh.ipv6h);
+               IP6_ECN_set_ce(ipv6_hdr(skb));
 }
 
 /* Add encapsulation header.
@@ -46,11 +46,12 @@ static int xfrm4_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
        struct iphdr *iph, *top_iph;
        int flags;
 
-       iph = skb->nh.iph;
-       skb->h.ipiph = iph;
+       iph = ip_hdr(skb);
+       skb->transport_header = skb->network_header;
 
-       skb->nh.raw = skb_push(skb, x->props.header_len);
-       top_iph = skb->nh.iph;
+       skb_push(skb, x->props.header_len);
+       skb_reset_network_header(skb);
+       top_iph = ip_hdr(skb);
 
        top_iph->ihl = 5;
        top_iph->version = 4;
@@ -90,10 +91,11 @@ static int xfrm4_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
 
 static int xfrm4_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
 {
-       struct iphdr *iph = skb->nh.iph;
+       struct iphdr *iph = ip_hdr(skb);
+       const unsigned char *old_mac;
        int err = -EINVAL;
 
-       switch(iph->protocol){
+       switch (iph->protocol){
                case IPPROTO_IPIP:
                        break;
 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
@@ -111,10 +113,10 @@ static int xfrm4_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
            (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
                goto out;
 
-       iph = skb->nh.iph;
+       iph = ip_hdr(skb);
        if (iph->protocol == IPPROTO_IPIP) {
                if (x->props.flags & XFRM_STATE_DECAP_DSCP)
-                       ipv4_copy_dscp(iph, skb->h.ipiph);
+                       ipv4_copy_dscp(iph, ipip_hdr(skb));
                if (!(x->props.flags & XFRM_STATE_NOECN))
                        ipip_ecn_decapsulate(skb);
        }
@@ -125,9 +127,10 @@ static int xfrm4_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
                skb->protocol = htons(ETH_P_IPV6);
        }
 #endif
-       skb->mac.raw = memmove(skb->data - skb->mac_len,
-                              skb->mac.raw, skb->mac_len);
-       skb->nh.raw = skb->data;
+       old_mac = skb_mac_header(skb);
+       skb_set_mac_header(skb, -skb->mac_len);
+       memmove(skb_mac_header(skb), old_mac, skb->mac_len);
+       skb_reset_network_header(skb);
        err = 0;
 
 out:
index 038ca16..44ef208 100644 (file)
@@ -22,14 +22,13 @@ static int xfrm4_tunnel_check_size(struct sk_buff *skb)
 {
        int mtu, ret = 0;
        struct dst_entry *dst;
-       struct iphdr *iph = skb->nh.iph;
 
        if (IPCB(skb)->flags & IPSKB_XFRM_TUNNEL_SIZE)
                goto out;
 
        IPCB(skb)->flags |= IPSKB_XFRM_TUNNEL_SIZE;
 
-       if (!(iph->frag_off & htons(IP_DF)) || skb->local_df)
+       if (!(ip_hdr(skb)->frag_off & htons(IP_DF)) || skb->local_df)
                goto out;
 
        dst = skb->dst;
index 5d51a2a..4ff8ed3 100644 (file)
@@ -119,7 +119,7 @@ __xfrm4_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int
 
                if (xfrm[i]->props.mode == XFRM_MODE_TUNNEL) {
                        unsigned short encap_family = xfrm[i]->props.family;
-                       switch(encap_family) {
+                       switch (encap_family) {
                        case AF_INET:
                                fl_tunnel.fl4_dst = xfrm[i]->id.daddr.a4;
                                fl_tunnel.fl4_src = xfrm[i]->props.saddr.a4;
@@ -209,8 +209,8 @@ error:
 static void
 _decode_session4(struct sk_buff *skb, struct flowi *fl)
 {
-       struct iphdr *iph = skb->nh.iph;
-       u8 *xprth = skb->nh.raw + iph->ihl*4;
+       struct iphdr *iph = ip_hdr(skb);
+       u8 *xprth = skb_network_header(skb) + iph->ihl * 4;
 
        memset(fl, 0, sizeof(struct flowi));
        if (!(iph->frag_off & htons(IP_MF | IP_OFFSET))) {
@@ -263,7 +263,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl)
                default:
                        fl->fl_ipsec_spi = 0;
                        break;
-               };
+               }
        }
        fl->proto = iph->protocol;
        fl->fl4_dst = iph->daddr;
index 3eef064..5685103 100644 (file)
@@ -12,9 +12,8 @@
 
 static int ipip_output(struct xfrm_state *x, struct sk_buff *skb)
 {
-       struct iphdr *iph;
+       struct iphdr *iph = ip_hdr(skb);
 
-       iph = skb->nh.iph;
        iph->tot_len = htons(skb->len);
        ip_send_check(iph);
 
index 79682ef..8e5d54f 100644 (file)
@@ -57,6 +57,16 @@ config IPV6_ROUTE_INFO
 
          If unsure, say N.
 
+config IPV6_OPTIMISTIC_DAD
+       bool "IPv6: Enable RFC 4429 Optimistic DAD (EXPERIMENTAL)"
+       depends on IPV6 && EXPERIMENTAL
+       ---help---
+         This is experimental support for optimistic Duplicate
+         Address Detection.  It allows for autoconfigured addresses
+         to be used more quickly.
+
+         If unsure, say N.
+
 config INET6_AH
        tristate "IPv6: AH transformation"
        depends on IPV6
index d460017..bb33309 100644 (file)
@@ -7,14 +7,15 @@ obj-$(CONFIG_IPV6) += ipv6.o
 ipv6-objs :=   af_inet6.o anycast.o ip6_output.o ip6_input.o addrconf.o \
                route.o ip6_fib.o ipv6_sockglue.o ndisc.o udp.o udplite.o \
                raw.o protocol.o icmp.o mcast.o reassembly.o tcp_ipv6.o \
-               exthdrs.o sysctl_net_ipv6.o datagram.o proc.o \
-               ip6_flowlabel.o ipv6_syms.o inet6_connection_sock.o
+               exthdrs.o sysctl_net_ipv6.o datagram.o \
+               ip6_flowlabel.o inet6_connection_sock.o
 
 ipv6-$(CONFIG_XFRM) += xfrm6_policy.o xfrm6_state.o xfrm6_input.o \
        xfrm6_output.o
 ipv6-$(CONFIG_NETFILTER) += netfilter.o
 ipv6-$(CONFIG_IPV6_MULTIPLE_TABLES) += fib6_rules.o
 ipv6-$(CONFIG_IPV6_MIP6) += mip6.o
+ipv6-$(CONFIG_PROC_FS) += proc.o
 
 ipv6-objs += $(ipv6-y)
 
index 7552663..e04e493 100644 (file)
@@ -81,6 +81,7 @@
 #endif
 
 #include <asm/uaccess.h>
+#include <asm/unaligned.h>
 
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
@@ -172,6 +173,7 @@ struct ipv6_devconf ipv6_devconf __read_mostly = {
 #endif
 #endif
        .proxy_ndp              = 0,
+       .accept_source_route    = 0,    /* we do not accept RH0 by default. */
 };
 
 static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
@@ -203,12 +205,11 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
 #endif
 #endif
        .proxy_ndp              = 0,
+       .accept_source_route    = 0,    /* we do not accept RH0 by default. */
 };
 
 /* IPv6 Wildcard Address and Loopback Address defined by RFC2553 */
-#if 0
 const struct in6_addr in6addr_any = IN6ADDR_ANY_INIT;
-#endif
 const struct in6_addr in6addr_loopback = IN6ADDR_LOOPBACK_INIT;
 
 static void addrconf_del_timer(struct inet6_ifaddr *ifp)
@@ -244,6 +245,37 @@ static void addrconf_mod_timer(struct inet6_ifaddr *ifp,
        add_timer(&ifp->timer);
 }
 
+static int snmp6_alloc_dev(struct inet6_dev *idev)
+{
+       int err = -ENOMEM;
+
+       if (!idev || !idev->dev)
+               return -EINVAL;
+
+       if (snmp_mib_init((void **)idev->stats.ipv6,
+                         sizeof(struct ipstats_mib),
+                         __alignof__(struct ipstats_mib)) < 0)
+               goto err_ip;
+       if (snmp_mib_init((void **)idev->stats.icmpv6,
+                         sizeof(struct icmpv6_mib),
+                         __alignof__(struct icmpv6_mib)) < 0)
+               goto err_icmp;
+
+       return 0;
+
+err_icmp:
+       snmp_mib_free((void **)idev->stats.ipv6);
+err_ip:
+       return err;
+}
+
+static int snmp6_free_dev(struct inet6_dev *idev)
+{
+       snmp_mib_free((void **)idev->stats.icmpv6);
+       snmp_mib_free((void **)idev->stats.ipv6);
+       return 0;
+}
+
 /* Nobody refers to this device, we may destroy it. */
 
 static void in6_dev_finish_destroy_rcu(struct rcu_head *head)
@@ -269,6 +301,8 @@ void in6_dev_finish_destroy(struct inet6_dev *idev)
        call_rcu(&idev->rcu, in6_dev_finish_destroy_rcu);
 }
 
+EXPORT_SYMBOL(in6_dev_finish_destroy);
+
 static struct inet6_dev * ipv6_add_dev(struct net_device *dev)
 {
        struct inet6_dev *ndev;
@@ -526,6 +560,16 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen,
 
        ifa->rt = rt;
 
+       /*
+        * part one of RFC 4429, section 3.3
+        * We should not configure an address as
+        * optimistic if we do not yet know the link
+        * layer address of our nexhop router
+        */
+
+       if (rt->rt6i_nexthop == NULL)
+               ifa->flags &= ~IFA_F_OPTIMISTIC;
+
        ifa->idev = idev;
        in6_dev_hold(idev);
        /* For caller */
@@ -702,6 +746,7 @@ static int ipv6_create_tempaddr(struct inet6_ifaddr *ifp, struct inet6_ifaddr *i
        int tmp_plen;
        int ret = 0;
        int max_addresses;
+       u32 addr_flags;
 
        write_lock(&idev->lock);
        if (ift) {
@@ -759,10 +804,17 @@ retry:
        spin_unlock_bh(&ifp->lock);
 
        write_unlock(&idev->lock);
+
+       addr_flags = IFA_F_TEMPORARY;
+       /* set in addrconf_prefix_rcv() */
+       if (ifp->flags & IFA_F_OPTIMISTIC)
+               addr_flags |= IFA_F_OPTIMISTIC;
+
        ift = !max_addresses ||
              ipv6_count_addresses(idev) < max_addresses ?
                ipv6_add_addr(idev, &addr, tmp_plen,
-                             ipv6_addr_type(&addr)&IPV6_ADDR_SCOPE_MASK, IFA_F_TEMPORARY) : NULL;
+                             ipv6_addr_type(&addr)&IPV6_ADDR_SCOPE_MASK,
+                             addr_flags) : NULL;
        if (!ift || IS_ERR(ift)) {
                in6_ifa_put(ifp);
                in6_dev_put(idev);
@@ -894,13 +946,14 @@ int ipv6_dev_get_saddr(struct net_device *daddr_dev,
                         * - Tentative Address (RFC2462 section 5.4)
                         *  - A tentative address is not considered
                         *    "assigned to an interface" in the traditional
-                        *    sense.
+                        *    sense, unless it is also flagged as optimistic.
                         * - Candidate Source Address (section 4)
                         *  - In any case, anycast addresses, multicast
                         *    addresses, and the unspecified address MUST
                         *    NOT be included in a candidate set.
                         */
-                       if (ifa->flags & IFA_F_TENTATIVE)
+                       if ((ifa->flags & IFA_F_TENTATIVE) &&
+                           (!(ifa->flags & IFA_F_OPTIMISTIC)))
                                continue;
                        if (unlikely(score.addr_type == IPV6_ADDR_ANY ||
                                     score.addr_type & IPV6_ADDR_MULTICAST)) {
@@ -959,15 +1012,17 @@ int ipv6_dev_get_saddr(struct net_device *daddr_dev,
                                }
                        }
 
-                       /* Rule 3: Avoid deprecated address */
+                       /* Rule 3: Avoid deprecated and optimistic addresses */
                        if (hiscore.rule < 3) {
                                if (ipv6_saddr_preferred(hiscore.addr_type) ||
-                                   !(ifa_result->flags & IFA_F_DEPRECATED))
+                                  (((ifa_result->flags &
+                                   (IFA_F_DEPRECATED|IFA_F_OPTIMISTIC)) == 0)))
                                        hiscore.attrs |= IPV6_SADDR_SCORE_PREFERRED;
                                hiscore.rule++;
                        }
                        if (ipv6_saddr_preferred(score.addr_type) ||
-                           !(ifa->flags & IFA_F_DEPRECATED)) {
+                          (((ifa_result->flags &
+                           (IFA_F_DEPRECATED|IFA_F_OPTIMISTIC)) == 0))) {
                                score.attrs |= IPV6_SADDR_SCORE_PREFERRED;
                                if (!(hiscore.attrs & IPV6_SADDR_SCORE_PREFERRED)) {
                                        score.rule = 3;
@@ -1105,8 +1160,10 @@ int ipv6_get_saddr(struct dst_entry *dst,
        return ipv6_dev_get_saddr(dst ? ip6_dst_idev(dst)->dev : NULL, daddr, saddr);
 }
 
+EXPORT_SYMBOL(ipv6_get_saddr);
 
-int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr)
+int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr,
+                   unsigned char banned_flags)
 {
        struct inet6_dev *idev;
        int err = -EADDRNOTAVAIL;
@@ -1117,7 +1174,7 @@ int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr)
 
                read_lock_bh(&idev->lock);
                for (ifp=idev->addr_list; ifp; ifp=ifp->if_next) {
-                       if (ifp->scope == IFA_LINK && !(ifp->flags&IFA_F_TENTATIVE)) {
+                       if (ifp->scope == IFA_LINK && !(ifp->flags & banned_flags)) {
                                ipv6_addr_copy(addr, &ifp->addr);
                                err = 0;
                                break;
@@ -1159,6 +1216,8 @@ int ipv6_chk_addr(struct in6_addr *addr, struct net_device *dev, int strict)
        return ifp != NULL;
 }
 
+EXPORT_SYMBOL(ipv6_chk_addr);
+
 static
 int ipv6_chk_same_addr(const struct in6_addr *addr, struct net_device *dev)
 {
@@ -1667,6 +1726,13 @@ ok:
 
                if (ifp == NULL && valid_lft) {
                        int max_addresses = in6_dev->cnf.max_addresses;
+                       u32 addr_flags = 0;
+
+#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
+                       if (in6_dev->cnf.optimistic_dad &&
+                           !ipv6_devconf.forwarding)
+                               addr_flags = IFA_F_OPTIMISTIC;
+#endif
 
                        /* Do not allow to create too much of autoconfigured
                         * addresses; this would be too easy way to crash kernel.
@@ -1674,7 +1740,8 @@ ok:
                        if (!max_addresses ||
                            ipv6_count_addresses(in6_dev) < max_addresses)
                                ifp = ipv6_add_addr(in6_dev, &addr, pinfo->prefix_len,
-                                                   addr_type&IPV6_ADDR_SCOPE_MASK, 0);
+                                                   addr_type&IPV6_ADDR_SCOPE_MASK,
+                                                   addr_flags);
 
                        if (!ifp || IS_ERR(ifp)) {
                                in6_dev_put(in6_dev);
@@ -1882,6 +1949,11 @@ static int inet6_addr_add(int ifindex, struct in6_addr *pfx, int plen,
 
                addrconf_prefix_route(&ifp->addr, ifp->prefix_len, dev,
                                      jiffies_to_clock_t(valid_lft * HZ), flags);
+               /*
+                * Note that section 3.1 of RFC 4429 indicates
+                * that the Optimistic flag should not be set for
+                * manually configured addresses
+                */
                addrconf_dad_start(ifp, 0);
                in6_ifa_put(ifp);
                addrconf_verify(0);
@@ -2058,8 +2130,16 @@ static void init_loopback(struct net_device *dev)
 static void addrconf_add_linklocal(struct inet6_dev *idev, struct in6_addr *addr)
 {
        struct inet6_ifaddr * ifp;
+       u32 addr_flags = IFA_F_PERMANENT;
 
-       ifp = ipv6_add_addr(idev, addr, 64, IFA_LINK, IFA_F_PERMANENT);
+#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
+       if (idev->cnf.optimistic_dad &&
+           !ipv6_devconf.forwarding)
+               addr_flags |= IFA_F_OPTIMISTIC;
+#endif
+
+
+       ifp = ipv6_add_addr(idev, addr, 64, IFA_LINK, addr_flags);
        if (!IS_ERR(ifp)) {
                addrconf_prefix_route(&ifp->addr, ifp->prefix_len, idev->dev, 0, 0);
                addrconf_dad_start(ifp, 0);
@@ -2127,7 +2207,7 @@ ipv6_inherit_linklocal(struct inet6_dev *idev, struct net_device *link_dev)
 {
        struct in6_addr lladdr;
 
-       if (!ipv6_get_lladdr(link_dev, &lladdr)) {
+       if (!ipv6_get_lladdr(link_dev, &lladdr, IFA_F_TENTATIVE)) {
                addrconf_add_linklocal(idev, &lladdr);
                return 0;
        }
@@ -2238,7 +2318,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
                default:
                        addrconf_dev_config(dev);
                        break;
-               };
+               }
                if (idev) {
                        if (run_pending)
                                addrconf_dad_run(idev);
@@ -2291,7 +2371,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
                }
 #endif
                break;
-       };
+       }
 
        return NOTIFY_OK;
 }
@@ -2472,7 +2552,11 @@ static void addrconf_dad_kick(struct inet6_ifaddr *ifp)
        unsigned long rand_num;
        struct inet6_dev *idev = ifp->idev;
 
-       rand_num = net_random() % (idev->cnf.rtr_solicit_delay ? : 1);
+       if (ifp->flags & IFA_F_OPTIMISTIC)
+               rand_num = 0;
+       else
+               rand_num = net_random() % (idev->cnf.rtr_solicit_delay ? : 1);
+
        ifp->probes = idev->cnf.dad_transmits;
        addrconf_mod_timer(ifp, AC_DAD, rand_num);
 }
@@ -2494,7 +2578,7 @@ static void addrconf_dad_start(struct inet6_ifaddr *ifp, u32 flags)
        if (dev->flags&(IFF_NOARP|IFF_LOOPBACK) ||
            !(ifp->flags&IFA_F_TENTATIVE) ||
            ifp->flags & IFA_F_NODAD) {
-               ifp->flags &= ~IFA_F_TENTATIVE;
+               ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC);
                spin_unlock_bh(&ifp->lock);
                read_unlock_bh(&idev->lock);
 
@@ -2514,6 +2598,14 @@ static void addrconf_dad_start(struct inet6_ifaddr *ifp, u32 flags)
                addrconf_dad_stop(ifp);
                return;
        }
+
+       /*
+        * Optimistic nodes can start receiving
+        * Frames right away
+        */
+       if(ifp->flags & IFA_F_OPTIMISTIC)
+               ip6_ins_rt(ifp->rt);
+
        addrconf_dad_kick(ifp);
        spin_unlock_bh(&ifp->lock);
 out:
@@ -2538,7 +2630,7 @@ static void addrconf_dad_timer(unsigned long data)
                 * DAD was successful
                 */
 
-               ifp->flags &= ~IFA_F_TENTATIVE;
+               ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC);
                spin_unlock_bh(&ifp->lock);
                read_unlock_bh(&idev->lock);
 
@@ -3162,7 +3254,6 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
 
        s_idx = cb->args[0];
        s_ip_idx = ip_idx = cb->args[1];
-       read_lock(&dev_base_lock);
 
        for (dev = dev_base, idx = 0; dev; dev = dev->next, idx++) {
                if (idx < s_idx)
@@ -3224,7 +3315,6 @@ done:
                read_unlock_bh(&idev->lock);
                in6_dev_put(idev);
        }
-       read_unlock(&dev_base_lock);
        cb->args[0] = idx;
        cb->args[1] = ip_idx;
        return skb->len;
@@ -3356,6 +3446,10 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
 #endif
 #endif
        array[DEVCONF_PROXY_NDP] = cnf->proxy_ndp;
+       array[DEVCONF_ACCEPT_SOURCE_ROUTE] = cnf->accept_source_route;
+#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
+       array[DEVCONF_OPTIMISTIC_DAD] = cnf->optimistic_dad;
+#endif
 }
 
 static inline size_t inet6_if_nlmsg_size(void)
@@ -3369,14 +3463,44 @@ static inline size_t inet6_if_nlmsg_size(void)
                        nla_total_size(4) /* IFLA_INET6_FLAGS */
                        + nla_total_size(sizeof(struct ifla_cacheinfo))
                        + nla_total_size(DEVCONF_MAX * 4) /* IFLA_INET6_CONF */
+                       + nla_total_size(IPSTATS_MIB_MAX * 8) /* IFLA_INET6_STATS */
+                       + nla_total_size(ICMP6_MIB_MAX * 8) /* IFLA_INET6_ICMP6STATS */
                 );
 }
 
+static inline void __snmp6_fill_stats(u64 *stats, void **mib, int items,
+                                     int bytes)
+{
+       int i;
+       int pad = bytes - sizeof(u64) * items;
+       BUG_ON(pad < 0);
+
+       /* Use put_unaligned() because stats may not be aligned for u64. */
+       put_unaligned(items, &stats[0]);
+       for (i = 1; i < items; i++)
+               put_unaligned(snmp_fold_field(mib, i), &stats[i]);
+
+       memset(&stats[items], 0, pad);
+}
+
+static void snmp6_fill_stats(u64 *stats, struct inet6_dev *idev, int attrtype,
+                            int bytes)
+{
+       switch(attrtype) {
+       case IFLA_INET6_STATS:
+               __snmp6_fill_stats(stats, (void **)idev->stats.ipv6, IPSTATS_MIB_MAX, bytes);
+               break;
+       case IFLA_INET6_ICMP6STATS:
+               __snmp6_fill_stats(stats, (void **)idev->stats.icmpv6, ICMP6_MIB_MAX, bytes);
+               break;
+       }
+}
+
 static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev,
                             u32 pid, u32 seq, int event, unsigned int flags)
 {
        struct net_device *dev = idev->dev;
-       struct nlattr *conf;
+       struct nlattr *nla;
        struct ifinfomsg *hdr;
        struct nlmsghdr *nlh;
        void *protoinfo;
@@ -3416,12 +3540,22 @@ static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev,
        ci.retrans_time = idev->nd_parms->retrans_time;
        NLA_PUT(skb, IFLA_INET6_CACHEINFO, sizeof(ci), &ci);
 
-       conf = nla_reserve(skb, IFLA_INET6_CONF, DEVCONF_MAX * sizeof(s32));
-       if (conf == NULL)
+       nla = nla_reserve(skb, IFLA_INET6_CONF, DEVCONF_MAX * sizeof(s32));
+       if (nla == NULL)
                goto nla_put_failure;
-       ipv6_store_devconf(&idev->cnf, nla_data(conf), nla_len(conf));
+       ipv6_store_devconf(&idev->cnf, nla_data(nla), nla_len(nla));
 
-       /* XXX - Statistics/MC not implemented */
+       /* XXX - MC not implemented */
+
+       nla = nla_reserve(skb, IFLA_INET6_STATS, IPSTATS_MIB_MAX * sizeof(u64));
+       if (nla == NULL)
+               goto nla_put_failure;
+       snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_STATS, nla_len(nla));
+
+       nla = nla_reserve(skb, IFLA_INET6_ICMP6STATS, ICMP6_MIB_MAX * sizeof(u64));
+       if (nla == NULL)
+               goto nla_put_failure;
+       snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_ICMP6STATS, nla_len(nla));
 
        nla_nest_end(skb, protoinfo);
        return nlmsg_end(skb, nlh);
@@ -3547,30 +3681,20 @@ errout:
                rtnl_set_sk_err(RTNLGRP_IPV6_PREFIX, err);
 }
 
-static struct rtnetlink_link inet6_rtnetlink_table[RTM_NR_MSGTYPES] = {
-       [RTM_GETLINK - RTM_BASE] = { .dumpit    = inet6_dump_ifinfo, },
-       [RTM_NEWADDR - RTM_BASE] = { .doit      = inet6_rtm_newaddr, },
-       [RTM_DELADDR - RTM_BASE] = { .doit      = inet6_rtm_deladdr, },
-       [RTM_GETADDR - RTM_BASE] = { .doit      = inet6_rtm_getaddr,
-                                    .dumpit    = inet6_dump_ifaddr, },
-       [RTM_GETMULTICAST - RTM_BASE] = { .dumpit = inet6_dump_ifmcaddr, },
-       [RTM_GETANYCAST - RTM_BASE] = { .dumpit = inet6_dump_ifacaddr, },
-       [RTM_NEWROUTE - RTM_BASE] = { .doit     = inet6_rtm_newroute, },
-       [RTM_DELROUTE - RTM_BASE] = { .doit     = inet6_rtm_delroute, },
-       [RTM_GETROUTE - RTM_BASE] = { .doit     = inet6_rtm_getroute,
-                                     .dumpit   = inet6_dump_fib, },
-#ifdef CONFIG_IPV6_MULTIPLE_TABLES
-       [RTM_GETRULE  - RTM_BASE] = { .dumpit   = fib6_rules_dump,   },
-#endif
-};
-
 static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
 {
        inet6_ifa_notify(event ? : RTM_NEWADDR, ifp);
 
        switch (event) {
        case RTM_NEWADDR:
-               ip6_ins_rt(ifp->rt);
+               /*
+                * If the address was optimistic
+                * we inserted the route at the start of
+                * our DAD process, so we don't need
+                * to do it again
+                */
+               if (!(ifp->rt->rt6i_node))
+                       ip6_ins_rt(ifp->rt);
                if (ifp->idev->cnf.forwarding)
                        addrconf_join_anycast(ifp);
                break;
@@ -3883,6 +4007,25 @@ static struct addrconf_sysctl_table
                        .mode           =       0644,
                        .proc_handler   =       &proc_dointvec,
                },
+               {
+                       .ctl_name       =       NET_IPV6_ACCEPT_SOURCE_ROUTE,
+                       .procname       =       "accept_source_route",
+                       .data           =       &ipv6_devconf.accept_source_route,
+                       .maxlen         =       sizeof(int),
+                       .mode           =       0644,
+                       .proc_handler   =       &proc_dointvec,
+               },
+#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
+               {
+                       .ctl_name       =       CTL_UNNUMBERED,
+                       .procname       =       "optimistic_dad",
+                       .data           =       &ipv6_devconf.optimistic_dad,
+                       .maxlen         =       sizeof(int),
+                       .mode           =       0644,
+                       .proc_handler   =       &proc_dointvec,
+
+               },
+#endif
                {
                        .ctl_name       =       0,      /* sentinel */
                }
@@ -4010,11 +4153,15 @@ int register_inet6addr_notifier(struct notifier_block *nb)
        return atomic_notifier_chain_register(&inet6addr_chain, nb);
 }
 
+EXPORT_SYMBOL(register_inet6addr_notifier);
+
 int unregister_inet6addr_notifier(struct notifier_block *nb)
 {
        return atomic_notifier_chain_unregister(&inet6addr_chain,nb);
 }
 
+EXPORT_SYMBOL(unregister_inet6addr_notifier);
+
 /*
  *     Init / cleanup code
  */
@@ -4053,7 +4200,18 @@ int __init addrconf_init(void)
        register_netdevice_notifier(&ipv6_dev_notf);
 
        addrconf_verify(0);
-       rtnetlink_links[PF_INET6] = inet6_rtnetlink_table;
+
+       err = __rtnl_register(PF_INET6, RTM_GETLINK, NULL, inet6_dump_ifinfo);
+       if (err < 0)
+               goto errout;
+
+       /* Only the first call to __rtnl_register can fail */
+       __rtnl_register(PF_INET6, RTM_NEWADDR, inet6_rtm_newaddr, NULL);
+       __rtnl_register(PF_INET6, RTM_DELADDR, inet6_rtm_deladdr, NULL);
+       __rtnl_register(PF_INET6, RTM_GETADDR, inet6_rtm_getaddr, inet6_dump_ifaddr);
+       __rtnl_register(PF_INET6, RTM_GETMULTICAST, NULL, inet6_dump_ifmcaddr);
+       __rtnl_register(PF_INET6, RTM_GETANYCAST, NULL, inet6_dump_ifacaddr);
+
 #ifdef CONFIG_SYSCTL
        addrconf_sysctl.sysctl_header =
                register_sysctl_table(addrconf_sysctl.addrconf_root_dir);
@@ -4061,6 +4219,10 @@ int __init addrconf_init(void)
 #endif
 
        return 0;
+errout:
+       unregister_netdevice_notifier(&ipv6_dev_notf);
+
+       return err;
 }
 
 void __exit addrconf_cleanup(void)
@@ -4072,7 +4234,6 @@ void __exit addrconf_cleanup(void)
 
        unregister_netdevice_notifier(&ipv6_dev_notf);
 
-       rtnetlink_links[PF_INET6] = NULL;
 #ifdef CONFIG_SYSCTL
        addrconf_sysctl_unregister(&ipv6_devconf_dflt);
        addrconf_sysctl_unregister(&ipv6_devconf);
index 5cac14a..18cb928 100644 (file)
@@ -98,6 +98,11 @@ static int inet6_create(struct socket *sock, int protocol)
        int try_loading_module = 0;
        int err;
 
+       if (sock->type != SOCK_RAW &&
+           sock->type != SOCK_DGRAM &&
+           !inet_ehash_secret)
+               build_ehash_secret();
+
        /* Look for the requested type/protocol pair. */
        answer = NULL;
 lookup_protocol:
@@ -349,6 +354,8 @@ out:
        return err;
 }
 
+EXPORT_SYMBOL(inet6_bind);
+
 int inet6_release(struct socket *sock)
 {
        struct sock *sk = sock->sk;
@@ -365,6 +372,8 @@ int inet6_release(struct socket *sock)
        return inet_release(sock);
 }
 
+EXPORT_SYMBOL(inet6_release);
+
 int inet6_destroy_sock(struct sock *sk)
 {
        struct ipv6_pinfo *np = inet6_sk(sk);
@@ -428,6 +437,8 @@ int inet6_getname(struct socket *sock, struct sockaddr *uaddr,
        return(0);
 }
 
+EXPORT_SYMBOL(inet6_getname);
+
 int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
 {
        struct sock *sk = sock->sk;
@@ -437,6 +448,9 @@ int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
        case SIOCGSTAMP:
                return sock_get_timestamp(sk, (struct timeval __user *)arg);
 
+       case SIOCGSTAMPNS:
+               return sock_get_timestampns(sk, (struct timespec __user *)arg);
+
        case SIOCADDRT:
        case SIOCDELRT:
 
@@ -457,6 +471,8 @@ int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
        return(0);
 }
 
+EXPORT_SYMBOL(inet6_ioctl);
+
 const struct proto_ops inet6_stream_ops = {
        .family            = PF_INET6,
        .owner             = THIS_MODULE,
@@ -603,6 +619,8 @@ out_illegal:
        goto out;
 }
 
+EXPORT_SYMBOL(inet6_register_protosw);
+
 void
 inet6_unregister_protosw(struct inet_protosw *p)
 {
@@ -619,6 +637,8 @@ inet6_unregister_protosw(struct inet_protosw *p)
        }
 }
 
+EXPORT_SYMBOL(inet6_unregister_protosw);
+
 int inet6_sk_rebuild_header(struct sock *sk)
 {
        int err;
@@ -678,7 +698,8 @@ int ipv6_opt_accepted(struct sock *sk, struct sk_buff *skb)
        if (np->rxopt.all) {
                if ((opt->hop && (np->rxopt.bits.hopopts ||
                                  np->rxopt.bits.ohopopts)) ||
-                   ((IPV6_FLOWINFO_MASK & *(__be32*)skb->nh.raw) &&
+                   ((IPV6_FLOWINFO_MASK &
+                     *(__be32 *)skb_network_header(skb)) &&
                     np->rxopt.bits.rxflow) ||
                    (opt->srcrt && (np->rxopt.bits.srcrt ||
                     np->rxopt.bits.osrcrt)) ||
@@ -691,61 +712,28 @@ int ipv6_opt_accepted(struct sock *sk, struct sk_buff *skb)
 
 EXPORT_SYMBOL_GPL(ipv6_opt_accepted);
 
-int
-snmp6_mib_init(void *ptr[2], size_t mibsize, size_t mibalign)
-{
-       if (ptr == NULL)
-               return -EINVAL;
-
-       ptr[0] = __alloc_percpu(mibsize);
-       if (!ptr[0])
-               goto err0;
-
-       ptr[1] = __alloc_percpu(mibsize);
-       if (!ptr[1])
-               goto err1;
-
-       return 0;
-
-err1:
-       free_percpu(ptr[0]);
-       ptr[0] = NULL;
-err0:
-       return -ENOMEM;
-}
-
-void
-snmp6_mib_free(void *ptr[2])
-{
-       if (ptr == NULL)
-               return;
-       free_percpu(ptr[0]);
-       free_percpu(ptr[1]);
-       ptr[0] = ptr[1] = NULL;
-}
-
 static int __init init_ipv6_mibs(void)
 {
-       if (snmp6_mib_init((void **)ipv6_statistics, sizeof (struct ipstats_mib),
-                          __alignof__(struct ipstats_mib)) < 0)
+       if (snmp_mib_init((void **)ipv6_statistics, sizeof (struct ipstats_mib),
+                         __alignof__(struct ipstats_mib)) < 0)
                goto err_ip_mib;
-       if (snmp6_mib_init((void **)icmpv6_statistics, sizeof (struct icmpv6_mib),
-                          __alignof__(struct icmpv6_mib)) < 0)
+       if (snmp_mib_init((void **)icmpv6_statistics, sizeof (struct icmpv6_mib),
+                         __alignof__(struct icmpv6_mib)) < 0)
                goto err_icmp_mib;
-       if (snmp6_mib_init((void **)udp_stats_in6, sizeof (struct udp_mib),
-                          __alignof__(struct udp_mib)) < 0)
+       if (snmp_mib_init((void **)udp_stats_in6, sizeof (struct udp_mib),
+                         __alignof__(struct udp_mib)) < 0)
                goto err_udp_mib;
-       if (snmp6_mib_init((void **)udplite_stats_in6, sizeof (struct udp_mib),
-                          __alignof__(struct udp_mib)) < 0)
+       if (snmp_mib_init((void **)udplite_stats_in6, sizeof (struct udp_mib),
+                         __alignof__(struct udp_mib)) < 0)
                goto err_udplite_mib;
        return 0;
 
 err_udplite_mib:
-       snmp6_mib_free((void **)udp_stats_in6);
+       snmp_mib_free((void **)udp_stats_in6);
 err_udp_mib:
-       snmp6_mib_free((void **)icmpv6_statistics);
+       snmp_mib_free((void **)icmpv6_statistics);
 err_icmp_mib:
-       snmp6_mib_free((void **)ipv6_statistics);
+       snmp_mib_free((void **)ipv6_statistics);
 err_ip_mib:
        return -ENOMEM;
 
@@ -753,10 +741,10 @@ err_ip_mib:
 
 static void cleanup_ipv6_mibs(void)
 {
-       snmp6_mib_free((void **)ipv6_statistics);
-       snmp6_mib_free((void **)icmpv6_statistics);
-       snmp6_mib_free((void **)udp_stats_in6);
-       snmp6_mib_free((void **)udplite_stats_in6);
+       snmp_mib_free((void **)ipv6_statistics);
+       snmp_mib_free((void **)icmpv6_statistics);
+       snmp_mib_free((void **)udp_stats_in6);
+       snmp_mib_free((void **)udplite_stats_in6);
 }
 
 static int __init inet6_init(void)
@@ -929,6 +917,8 @@ static void __exit inet6_exit(void)
 {
        /* First of all disallow new sockets creation. */
        sock_unregister(PF_INET6);
+       /* Disallow any further netlink messages */
+       rtnl_unregister_all(PF_INET6);
 
        /* Cleanup code parts. */
        ipv6_packet_cleanup();
index dc68b72..b696c84 100644 (file)
@@ -238,8 +238,8 @@ static int ah6_output(struct xfrm_state *x, struct sk_buff *skb)
        top_iph = (struct ipv6hdr *)skb->data;
        top_iph->payload_len = htons(skb->len - sizeof(*top_iph));
 
-       nexthdr = *skb->nh.raw;
-       *skb->nh.raw = IPPROTO_AH;
+       nexthdr = *skb_network_header(skb);
+       *skb_network_header(skb) = IPPROTO_AH;
 
        /* When there are no extension headers, we only need to save the first
         * 8 bytes of the base IP header.
@@ -247,7 +247,7 @@ static int ah6_output(struct xfrm_state *x, struct sk_buff *skb)
        memcpy(tmp_base, top_iph, sizeof(tmp_base));
 
        tmp_ext = NULL;
-       extlen = skb->h.raw - (unsigned char *)(top_iph + 1);
+       extlen = skb_transport_offset(skb) + sizeof(struct ipv6hdr);
        if (extlen) {
                extlen += sizeof(*tmp_ext);
                tmp_ext = kmalloc(extlen, GFP_ATOMIC);
@@ -268,7 +268,7 @@ static int ah6_output(struct xfrm_state *x, struct sk_buff *skb)
                        goto error_free_iph;
        }
 
-       ah = (struct ip_auth_hdr *)skb->h.raw;
+       ah = (struct ip_auth_hdr *)skb_transport_header(skb);
        ah->nexthdr = nexthdr;
 
        top_iph->priority    = 0;
@@ -316,8 +316,8 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb)
         *
         * To erase AH:
         * Keeping copy of cleared headers. After AH processing,
-        * Moving the pointer of skb->nh.raw by using skb_pull as long as AH
-        * header length. Then copy back the copy as long as hdr_len
+        * Moving the pointer of skb->network_header by using skb_pull as long
+        * as AH header length. Then copy back the copy as long as hdr_len
         * If destination header following AH exists, copy it into after [Ext2].
         *
         * |<>|[IPv6][Ext1][Ext2][Dest][Payload]
@@ -325,6 +325,7 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb)
         */
 
        struct ipv6_auth_hdr *ah;
+       struct ipv6hdr *ip6h;
        struct ah_data *ahp;
        unsigned char *tmp_hdr = NULL;
        u16 hdr_len;
@@ -341,7 +342,7 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb)
            pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
                goto out;
 
-       hdr_len = skb->data - skb->nh.raw;
+       hdr_len = skb->data - skb_network_header(skb);
        ah = (struct ipv6_auth_hdr*)skb->data;
        ahp = x->data;
        nexthdr = ah->nexthdr;
@@ -354,16 +355,17 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb)
        if (!pskb_may_pull(skb, ah_hlen))
                goto out;
 
-       tmp_hdr = kmemdup(skb->nh.raw, hdr_len, GFP_ATOMIC);
+       tmp_hdr = kmemdup(skb_network_header(skb), hdr_len, GFP_ATOMIC);
        if (!tmp_hdr)
                goto out;
-       if (ipv6_clear_mutable_options(skb->nh.ipv6h, hdr_len, XFRM_POLICY_IN))
+       ip6h = ipv6_hdr(skb);
+       if (ipv6_clear_mutable_options(ip6h, hdr_len, XFRM_POLICY_IN))
                goto free_out;
-       skb->nh.ipv6h->priority    = 0;
-       skb->nh.ipv6h->flow_lbl[0] = 0;
-       skb->nh.ipv6h->flow_lbl[1] = 0;
-       skb->nh.ipv6h->flow_lbl[2] = 0;
-       skb->nh.ipv6h->hop_limit   = 0;
+       ip6h->priority    = 0;
+       ip6h->flow_lbl[0] = 0;
+       ip6h->flow_lbl[1] = 0;
+       ip6h->flow_lbl[2] = 0;
+       ip6h->hop_limit   = 0;
 
        {
                u8 auth_data[MAX_AH_AUTH_LEN];
@@ -382,7 +384,9 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb)
                }
        }
 
-       skb->h.raw = memcpy(skb->nh.raw += ah_hlen, tmp_hdr, hdr_len);
+       skb->network_header += ah_hlen;
+       memcpy(skb_network_header(skb), tmp_hdr, hdr_len);
+       skb->transport_header = skb->network_header;
        __skb_pull(skb, ah_hlen + hdr_len);
 
        kfree(tmp_hdr);
index 3b4e8dc..403eee6 100644 (file)
@@ -209,7 +209,7 @@ void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
                     __be16 port, u32 info, u8 *payload)
 {
        struct ipv6_pinfo *np  = inet6_sk(sk);
-       struct icmp6hdr *icmph = (struct icmp6hdr *)skb->h.raw;
+       struct icmp6hdr *icmph = icmp6_hdr(skb);
        struct sock_exterr_skb *serr;
 
        if (!np->recverr)
@@ -227,11 +227,12 @@ void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
        serr->ee.ee_pad = 0;
        serr->ee.ee_info = info;
        serr->ee.ee_data = 0;
-       serr->addr_offset = (u8*)&(((struct ipv6hdr*)(icmph+1))->daddr) - skb->nh.raw;
+       serr->addr_offset = (u8 *)&(((struct ipv6hdr *)(icmph + 1))->daddr) -
+                                 skb_network_header(skb);
        serr->port = port;
 
-       skb->h.raw = payload;
        __skb_pull(skb, payload - skb->data);
+       skb_reset_transport_header(skb);
 
        if (sock_queue_err_skb(sk, skb))
                kfree_skb(skb);
@@ -251,8 +252,9 @@ void ipv6_local_error(struct sock *sk, int err, struct flowi *fl, u32 info)
        if (!skb)
                return;
 
-       iph = (struct ipv6hdr*)skb_put(skb, sizeof(struct ipv6hdr));
-       skb->nh.ipv6h = iph;
+       skb_put(skb, sizeof(struct ipv6hdr));
+       skb_reset_network_header(skb);
+       iph = ipv6_hdr(skb);
        ipv6_addr_copy(&iph->daddr, &fl->fl6_dst);
 
        serr = SKB_EXT_ERR(skb);
@@ -263,11 +265,11 @@ void ipv6_local_error(struct sock *sk, int err, struct flowi *fl, u32 info)
        serr->ee.ee_pad = 0;
        serr->ee.ee_info = info;
        serr->ee.ee_data = 0;
-       serr->addr_offset = (u8*)&iph->daddr - skb->nh.raw;
+       serr->addr_offset = (u8 *)&iph->daddr - skb_network_header(skb);
        serr->port = fl->fl_ip_dport;
 
-       skb->h.raw = skb->tail;
-       __skb_pull(skb, skb->tail - skb->data);
+       __skb_pull(skb, skb_tail_pointer(skb) - skb->data);
+       skb_reset_transport_header(skb);
 
        if (sock_queue_err_skb(sk, skb))
                kfree_skb(skb);
@@ -309,21 +311,24 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len)
 
        sin = (struct sockaddr_in6 *)msg->msg_name;
        if (sin) {
+               const unsigned char *nh = skb_network_header(skb);
                sin->sin6_family = AF_INET6;
                sin->sin6_flowinfo = 0;
                sin->sin6_port = serr->port;
                sin->sin6_scope_id = 0;
                if (serr->ee.ee_origin == SO_EE_ORIGIN_ICMP6) {
                        ipv6_addr_copy(&sin->sin6_addr,
-                         (struct in6_addr *)(skb->nh.raw + serr->addr_offset));
+                                 (struct in6_addr *)(nh + serr->addr_offset));
                        if (np->sndflow)
-                               sin->sin6_flowinfo = *(__be32*)(skb->nh.raw + serr->addr_offset - 24) & IPV6_FLOWINFO_MASK;
+                               sin->sin6_flowinfo =
+                                       (*(__be32 *)(nh + serr->addr_offset - 24) &
+                                        IPV6_FLOWINFO_MASK);
                        if (ipv6_addr_type(&sin->sin6_addr) & IPV6_ADDR_LINKLOCAL)
                                sin->sin6_scope_id = IP6CB(skb)->iif;
                } else {
                        ipv6_addr_set(&sin->sin6_addr, 0, 0,
                                      htonl(0xffff),
-                                     *(__be32*)(skb->nh.raw + serr->addr_offset));
+                                     *(__be32 *)(nh + serr->addr_offset));
                }
        }
 
@@ -335,7 +340,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len)
                sin->sin6_flowinfo = 0;
                sin->sin6_scope_id = 0;
                if (serr->ee.ee_origin == SO_EE_ORIGIN_ICMP6) {
-                       ipv6_addr_copy(&sin->sin6_addr, &skb->nh.ipv6h->saddr);
+                       ipv6_addr_copy(&sin->sin6_addr, &ipv6_hdr(skb)->saddr);
                        if (np->rxopt.all)
                                datagram_recv_ctl(sk, msg, skb);
                        if (ipv6_addr_type(&sin->sin6_addr) & IPV6_ADDR_LINKLOCAL)
@@ -344,8 +349,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len)
                        struct inet_sock *inet = inet_sk(sk);
 
                        ipv6_addr_set(&sin->sin6_addr, 0, 0,
-                                     htonl(0xffff),
-                                     skb->nh.iph->saddr);
+                                     htonl(0xffff), ip_hdr(skb)->saddr);
                        if (inet->cmsg_flags)
                                ip_cmsg_recv(msg, skb);
                }
@@ -381,33 +385,34 @@ int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
 {
        struct ipv6_pinfo *np = inet6_sk(sk);
        struct inet6_skb_parm *opt = IP6CB(skb);
+       unsigned char *nh = skb_network_header(skb);
 
        if (np->rxopt.bits.rxinfo) {
                struct in6_pktinfo src_info;
 
                src_info.ipi6_ifindex = opt->iif;
-               ipv6_addr_copy(&src_info.ipi6_addr, &skb->nh.ipv6h->daddr);
+               ipv6_addr_copy(&src_info.ipi6_addr, &ipv6_hdr(skb)->daddr);
                put_cmsg(msg, SOL_IPV6, IPV6_PKTINFO, sizeof(src_info), &src_info);
        }
 
        if (np->rxopt.bits.rxhlim) {
-               int hlim = skb->nh.ipv6h->hop_limit;
+               int hlim = ipv6_hdr(skb)->hop_limit;
                put_cmsg(msg, SOL_IPV6, IPV6_HOPLIMIT, sizeof(hlim), &hlim);
        }
 
        if (np->rxopt.bits.rxtclass) {
-               int tclass = (ntohl(*(__be32 *)skb->nh.ipv6h) >> 20) & 0xff;
+               int tclass = (ntohl(*(__be32 *)ipv6_hdr(skb)) >> 20) & 0xff;
                put_cmsg(msg, SOL_IPV6, IPV6_TCLASS, sizeof(tclass), &tclass);
        }
 
-       if (np->rxopt.bits.rxflow && (*(__be32*)skb->nh.raw & IPV6_FLOWINFO_MASK)) {
-               __be32 flowinfo = *(__be32*)skb->nh.raw & IPV6_FLOWINFO_MASK;
+       if (np->rxopt.bits.rxflow && (*(__be32 *)nh & IPV6_FLOWINFO_MASK)) {
+               __be32 flowinfo = *(__be32 *)nh & IPV6_FLOWINFO_MASK;
                put_cmsg(msg, SOL_IPV6, IPV6_FLOWINFO, sizeof(flowinfo), &flowinfo);
        }
 
        /* HbH is allowed only once */
        if (np->rxopt.bits.hopopts && opt->hop) {
-               u8 *ptr = skb->nh.raw + opt->hop;
+               u8 *ptr = nh + opt->hop;
                put_cmsg(msg, SOL_IPV6, IPV6_HOPOPTS, (ptr[1]+1)<<3, ptr);
        }
 
@@ -423,11 +428,11 @@ int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
                 * IPV6_RECVDSTOPTS is more generic. --yoshfuji
                 */
                unsigned int off = sizeof(struct ipv6hdr);
-               u8 nexthdr = skb->nh.ipv6h->nexthdr;
+               u8 nexthdr = ipv6_hdr(skb)->nexthdr;
 
                while (off <= opt->lastopt) {
                        unsigned len;
-                       u8 *ptr = skb->nh.raw + off;
+                       u8 *ptr = nh + off;
 
                        switch(nexthdr) {
                        case IPPROTO_DSTOPTS:
@@ -461,27 +466,27 @@ int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
                struct in6_pktinfo src_info;
 
                src_info.ipi6_ifindex = opt->iif;
-               ipv6_addr_copy(&src_info.ipi6_addr, &skb->nh.ipv6h->daddr);
+               ipv6_addr_copy(&src_info.ipi6_addr, &ipv6_hdr(skb)->daddr);
                put_cmsg(msg, SOL_IPV6, IPV6_2292PKTINFO, sizeof(src_info), &src_info);
        }
        if (np->rxopt.bits.rxohlim) {
-               int hlim = skb->nh.ipv6h->hop_limit;
+               int hlim = ipv6_hdr(skb)->hop_limit;
                put_cmsg(msg, SOL_IPV6, IPV6_2292HOPLIMIT, sizeof(hlim), &hlim);
        }
        if (np->rxopt.bits.ohopopts && opt->hop) {
-               u8 *ptr = skb->nh.raw + opt->hop;
+               u8 *ptr = nh + opt->hop;
                put_cmsg(msg, SOL_IPV6, IPV6_2292HOPOPTS, (ptr[1]+1)<<3, ptr);
        }
        if (np->rxopt.bits.odstopts && opt->dst0) {
-               u8 *ptr = skb->nh.raw + opt->dst0;
+               u8 *ptr = nh + opt->dst0;
                put_cmsg(msg, SOL_IPV6, IPV6_2292DSTOPTS, (ptr[1]+1)<<3, ptr);
        }
        if (np->rxopt.bits.osrcrt && opt->srcrt) {
-               struct ipv6_rt_hdr *rthdr = (struct ipv6_rt_hdr *)(skb->nh.raw + opt->srcrt);
+               struct ipv6_rt_hdr *rthdr = (struct ipv6_rt_hdr *)(nh + opt->srcrt);
                put_cmsg(msg, SOL_IPV6, IPV6_2292RTHDR, (rthdr->hdrlen+1) << 3, rthdr);
        }
        if (np->rxopt.bits.odstopts && opt->dst1) {
-               u8 *ptr = skb->nh.raw + opt->dst1;
+               u8 *ptr = nh + opt->dst1;
                put_cmsg(msg, SOL_IPV6, IPV6_2292DSTOPTS, (ptr[1]+1)<<3, ptr);
        }
        return 0;
@@ -718,7 +723,7 @@ int datagram_send_ctl(struct msghdr *msg, struct flowi *fl,
                                       cmsg->cmsg_type);
                        err = -EINVAL;
                        break;
-               };
+               }
        }
 
 exit_f:
index 363e63f..7107bb7 100644 (file)
 static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
 {
        int err;
-       int hdr_len;
        struct ipv6hdr *top_iph;
        struct ipv6_esp_hdr *esph;
        struct crypto_blkcipher *tfm;
        struct blkcipher_desc desc;
-       struct esp_data *esp;
        struct sk_buff *trailer;
        int blksize;
        int clen;
        int alen;
        int nfrags;
-
-       esp = x->data;
-       hdr_len = skb->h.raw - skb->data +
-                 sizeof(*esph) + esp->conf.ivlen;
+       u8 *tail;
+       struct esp_data *esp = x->data;
+       int hdr_len = (skb_transport_offset(skb) +
+                      sizeof(*esph) + esp->conf.ivlen);
 
        /* Strip IP+ESP header. */
        __skb_pull(skb, hdr_len);
@@ -81,19 +79,20 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
        }
 
        /* Fill padding... */
+       tail = skb_tail_pointer(trailer);
        do {
                int i;
                for (i=0; i<clen-skb->len - 2; i++)
-                       *(u8*)(trailer->tail + i) = i+1;
+                       tail[i] = i + 1;
        } while (0);
-       *(u8*)(trailer->tail + clen-skb->len - 2) = (clen - skb->len)-2;
+       tail[clen-skb->len - 2] = (clen - skb->len) - 2;
        pskb_put(skb, trailer, clen - skb->len);
 
        top_iph = (struct ipv6hdr *)__skb_push(skb, hdr_len);
-       esph = (struct ipv6_esp_hdr *)skb->h.raw;
+       esph = (struct ipv6_esp_hdr *)skb_transport_header(skb);
        top_iph->payload_len = htons(skb->len + alen - sizeof(*top_iph));
-       *(u8*)(trailer->tail - 1) = *skb->nh.raw;
-       *skb->nh.raw = IPPROTO_ESP;
+       *(skb_tail_pointer(trailer) - 1) = *skb_network_header(skb);
+       *skb_network_header(skb) = IPPROTO_ESP;
 
        esph->spi = x->id.spi;
        esph->seq_no = htonl(++x->replay.oseq);
@@ -150,8 +149,7 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
        int blksize = ALIGN(crypto_blkcipher_blocksize(tfm), 4);
        int alen = esp->auth.icv_trunc_len;
        int elen = skb->len - sizeof(struct ipv6_esp_hdr) - esp->conf.ivlen - alen;
-
-       int hdr_len = skb->h.raw - skb->nh.raw;
+       int hdr_len = skb_network_header_len(skb);
        int nfrags;
        int ret = 0;
 
@@ -191,7 +189,7 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
        skb->ip_summed = CHECKSUM_NONE;
 
        esph = (struct ipv6_esp_hdr*)skb->data;
-       iph = skb->nh.ipv6h;
+       iph = ipv6_hdr(skb);
 
        /* Get ivec. This can be wrong, check against another impls. */
        if (esp->conf.ivlen)
@@ -231,28 +229,30 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
                ret = nexthdr[1];
        }
 
-       skb->h.raw = __skb_pull(skb, sizeof(*esph) + esp->conf.ivlen) - hdr_len;
-
+       __skb_pull(skb, sizeof(*esph) + esp->conf.ivlen);
+       skb_set_transport_header(skb, -hdr_len);
 out:
        return ret;
 }
 
-static u32 esp6_get_max_size(struct xfrm_state *x, int mtu)
+static u32 esp6_get_mtu(struct xfrm_state *x, int mtu)
 {
        struct esp_data *esp = x->data;
        u32 blksize = ALIGN(crypto_blkcipher_blocksize(esp->conf.tfm), 4);
+       u32 align = max_t(u32, blksize, esp->conf.padlen);
+       u32 rem;
+
+       mtu -= x->props.header_len + esp->auth.icv_trunc_len;
+       rem = mtu & (align - 1);
+       mtu &= ~(align - 1);
 
-       if (x->props.mode == XFRM_MODE_TUNNEL) {
-               mtu = ALIGN(mtu + 2, blksize);
-       } else {
-               /* The worst case. */
+       if (x->props.mode != XFRM_MODE_TUNNEL) {
                u32 padsize = ((blksize - 1) & 7) + 1;
-               mtu = ALIGN(mtu + 2, padsize) + blksize - padsize;
+               mtu -= blksize - padsize;
+               mtu += min_t(u32, blksize - padsize, rem);
        }
-       if (esp->conf.padlen)
-               mtu = ALIGN(mtu, esp->conf.padlen);
 
-       return mtu + x->props.header_len + esp->auth.icv_trunc_len;
+       return mtu - 2;
 }
 
 static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
@@ -382,7 +382,7 @@ static struct xfrm_type esp6_type =
        .proto          = IPPROTO_ESP,
        .init_state     = esp6_init_state,
        .destructor     = esp6_destroy,
-       .get_max_size   = esp6_get_max_size,
+       .get_mtu        = esp6_get_mtu,
        .input          = esp6_input,
        .output         = esp6_output,
        .hdr_offset     = xfrm6_find_1stfragopt,
index 28e0c65..6d8e4ac 100644 (file)
 
 int ipv6_find_tlv(struct sk_buff *skb, int offset, int type)
 {
-       int packet_len = skb->tail - skb->nh.raw;
+       const unsigned char *nh = skb_network_header(skb);
+       int packet_len = skb->tail - skb->network_header;
        struct ipv6_opt_hdr *hdr;
        int len;
 
        if (offset + 2 > packet_len)
                goto bad;
-       hdr = (struct ipv6_opt_hdr*)(skb->nh.raw + offset);
+       hdr = (struct ipv6_opt_hdr *)(nh + offset);
        len = ((hdr->hdrlen + 1) << 3);
 
        if (offset + len > packet_len)
@@ -66,7 +67,7 @@ int ipv6_find_tlv(struct sk_buff *skb, int offset, int type)
        len -= 2;
 
        while (len > 0) {
-               int opttype = skb->nh.raw[offset];
+               int opttype = nh[offset];
                int optlen;
 
                if (opttype == type)
@@ -77,7 +78,7 @@ int ipv6_find_tlv(struct sk_buff *skb, int offset, int type)
                        optlen = 1;
                        break;
                default:
-                       optlen = skb->nh.raw[offset + 1] + 2;
+                       optlen = nh[offset + 1] + 2;
                        if (optlen > len)
                                goto bad;
                        break;
@@ -113,7 +114,7 @@ static int ip6_tlvopt_unknown(struct sk_buff **skbp, int optoff)
 {
        struct sk_buff *skb = *skbp;
 
-       switch ((skb->nh.raw[optoff] & 0xC0) >> 6) {
+       switch ((skb_network_header(skb)[optoff] & 0xC0) >> 6) {
        case 0: /* ignore */
                return 1;
 
@@ -124,12 +125,12 @@ static int ip6_tlvopt_unknown(struct sk_buff **skbp, int optoff)
                /* Actually, it is redundant check. icmp_send
                   will recheck in any case.
                 */
-               if (ipv6_addr_is_multicast(&skb->nh.ipv6h->daddr))
+               if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr))
                        break;
        case 2: /* send ICMP PARM PROB regardless and drop packet */
                icmpv6_param_prob(skb, ICMPV6_UNK_OPTION, optoff);
                return 0;
-       };
+       }
 
        kfree_skb(skb);
        return 0;
@@ -141,19 +142,20 @@ static int ip6_parse_tlv(struct tlvtype_proc *procs, struct sk_buff **skbp)
 {
        struct sk_buff *skb = *skbp;
        struct tlvtype_proc *curr;
-       int off = skb->h.raw - skb->nh.raw;
-       int len = ((skb->h.raw[1]+1)<<3);
+       const unsigned char *nh = skb_network_header(skb);
+       int off = skb_network_header_len(skb);
+       int len = (skb_transport_header(skb)[1] + 1) << 3;
 
-       if ((skb->h.raw + len) - skb->data > skb_headlen(skb))
+       if (skb_transport_offset(skb) + len > skb_headlen(skb))
                goto bad;
 
        off += 2;
        len -= 2;
 
        while (len > 0) {
-               int optlen = skb->nh.raw[off+1]+2;
+               int optlen = nh[off + 1] + 2;
 
-               switch (skb->nh.raw[off]) {
+               switch (nh[off]) {
                case IPV6_TLV_PAD0:
                        optlen = 1;
                        break;
@@ -165,7 +167,7 @@ static int ip6_parse_tlv(struct tlvtype_proc *procs, struct sk_buff **skbp)
                        if (optlen > len)
                                goto bad;
                        for (curr=procs; curr->type >= 0; curr++) {
-                               if (curr->type == skb->nh.raw[off]) {
+                               if (curr->type == nh[off]) {
                                        /* type specific length/alignment
                                           checks will be performed in the
                                           func(). */
@@ -200,7 +202,7 @@ static int ipv6_dest_hao(struct sk_buff **skbp, int optoff)
        struct sk_buff *skb = *skbp;
        struct ipv6_destopt_hao *hao;
        struct inet6_skb_parm *opt = IP6CB(skb);
-       struct ipv6hdr *ipv6h = (struct ipv6hdr *)skb->nh.raw;
+       struct ipv6hdr *ipv6h = ipv6_hdr(skb);
        struct in6_addr tmp_addr;
        int ret;
 
@@ -211,7 +213,7 @@ static int ipv6_dest_hao(struct sk_buff **skbp, int optoff)
        opt->dsthao = opt->dst1;
        opt->dst1 = 0;
 
-       hao = (struct ipv6_destopt_hao *)(skb->nh.raw + optoff);
+       hao = (struct ipv6_destopt_hao *)(skb_network_header(skb) + optoff);
 
        if (hao->length != 16) {
                LIMIT_NETDEBUG(
@@ -244,8 +246,9 @@ static int ipv6_dest_hao(struct sk_buff **skbp, int optoff)
 
                /* update all variable using below by copied skbuff */
                *skbp = skb = skb2;
-               hao = (struct ipv6_destopt_hao *)(skb2->nh.raw + optoff);
-               ipv6h = (struct ipv6hdr *)skb2->nh.raw;
+               hao = (struct ipv6_destopt_hao *)(skb_network_header(skb2) +
+                                                 optoff);
+               ipv6h = ipv6_hdr(skb2);
        }
 
        if (skb->ip_summed == CHECKSUM_COMPLETE)
@@ -255,7 +258,7 @@ static int ipv6_dest_hao(struct sk_buff **skbp, int optoff)
        ipv6_addr_copy(&ipv6h->saddr, &hao->addr);
        ipv6_addr_copy(&hao->addr, &tmp_addr);
 
-       if (skb->tstamp.off_sec == 0)
+       if (skb->tstamp.tv64 == 0)
                __net_timestamp(skb);
 
        return 1;
@@ -285,16 +288,16 @@ static int ipv6_destopt_rcv(struct sk_buff **skbp)
 #endif
        struct dst_entry *dst;
 
-       if (!pskb_may_pull(skb, (skb->h.raw-skb->data)+8) ||
-           !pskb_may_pull(skb, (skb->h.raw-skb->data)+((skb->h.raw[1]+1)<<3))) {
+       if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) ||
+           !pskb_may_pull(skb, (skb_transport_offset(skb) +
+                                ((skb_transport_header(skb)[1] + 1) << 3)))) {
                IP6_INC_STATS_BH(ip6_dst_idev(skb->dst),
                                 IPSTATS_MIB_INHDRERRORS);
                kfree_skb(skb);
                return -1;
        }
 
-       opt->lastopt = skb->h.raw - skb->nh.raw;
-       opt->dst1 = skb->h.raw - skb->nh.raw;
+       opt->lastopt = opt->dst1 = skb_network_header_len(skb);
 #ifdef CONFIG_IPV6_MIP6
        dstbuf = opt->dst1;
 #endif
@@ -303,7 +306,7 @@ static int ipv6_destopt_rcv(struct sk_buff **skbp)
        if (ip6_parse_tlv(tlvprocdestopt_lst, skbp)) {
                dst_release(dst);
                skb = *skbp;
-               skb->h.raw += ((skb->h.raw[1]+1)<<3);
+               skb->transport_header += (skb_transport_header(skb)[1] + 1) << 3;
                opt = IP6CB(skb);
 #ifdef CONFIG_IPV6_MIP6
                opt->nhoff = dstbuf;
@@ -362,22 +365,58 @@ static int ipv6_rthdr_rcv(struct sk_buff **skbp)
        struct inet6_skb_parm *opt = IP6CB(skb);
        struct in6_addr *addr = NULL;
        struct in6_addr daddr;
+       struct inet6_dev *idev;
        int n, i;
-
        struct ipv6_rt_hdr *hdr;
        struct rt0_hdr *rthdr;
+       int accept_source_route = ipv6_devconf.accept_source_route;
+
+       if (accept_source_route < 0 ||
+           ((idev = in6_dev_get(skb->dev)) == NULL)) {
+               kfree_skb(skb);
+               return -1;
+       }
+       if (idev->cnf.accept_source_route < 0) {
+               in6_dev_put(idev);
+               kfree_skb(skb);
+               return -1;
+       }
+
+       if (accept_source_route > idev->cnf.accept_source_route)
+               accept_source_route = idev->cnf.accept_source_route;
 
-       if (!pskb_may_pull(skb, (skb->h.raw-skb->data)+8) ||
-           !pskb_may_pull(skb, (skb->h.raw-skb->data)+((skb->h.raw[1]+1)<<3))) {
+       in6_dev_put(idev);
+
+       if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) ||
+           !pskb_may_pull(skb, (skb_transport_offset(skb) +
+                                ((skb_transport_header(skb)[1] + 1) << 3)))) {
                IP6_INC_STATS_BH(ip6_dst_idev(skb->dst),
                                 IPSTATS_MIB_INHDRERRORS);
                kfree_skb(skb);
                return -1;
        }
 
-       hdr = (struct ipv6_rt_hdr *) skb->h.raw;
+       hdr = (struct ipv6_rt_hdr *)skb_transport_header(skb);
 
-       if (ipv6_addr_is_multicast(&skb->nh.ipv6h->daddr) ||
+       switch (hdr->type) {
+#ifdef CONFIG_IPV6_MIP6
+       case IPV6_SRCRT_TYPE_2:
+               break;
+#endif
+       case IPV6_SRCRT_TYPE_0:
+               if (accept_source_route > 0)
+                       break;
+               kfree_skb(skb);
+               return -1;
+       default:
+               IP6_INC_STATS_BH(ip6_dst_idev(skb->dst),
+                                IPSTATS_MIB_INHDRERRORS);
+               icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
+                                 (&hdr->type) - skb_network_header(skb));
+               return -1;
+       }
+
+       if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) ||
            skb->pkt_type != PACKET_HOST) {
                IP6_INC_STATS_BH(ip6_dst_idev(skb->dst),
                                 IPSTATS_MIB_INADDRERRORS);
@@ -405,12 +444,11 @@ looped_back:
                        break;
                }
 
-               opt->lastopt = skb->h.raw - skb->nh.raw;
-               opt->srcrt = skb->h.raw - skb->nh.raw;
-               skb->h.raw += (hdr->hdrlen + 1) << 3;
+               opt->lastopt = opt->srcrt = skb_network_header_len(skb);
+               skb->transport_header += (hdr->hdrlen + 1) << 3;
                opt->dst0 = opt->dst1;
                opt->dst1 = 0;
-               opt->nhoff = (&hdr->nexthdr) - skb->nh.raw;
+               opt->nhoff = (&hdr->nexthdr) - skb_network_header(skb);
                return 1;
        }
 
@@ -419,7 +457,9 @@ looped_back:
                if (hdr->hdrlen & 0x01) {
                        IP6_INC_STATS_BH(ip6_dst_idev(skb->dst),
                                         IPSTATS_MIB_INHDRERRORS);
-                       icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, (&hdr->hdrlen) - skb->nh.raw);
+                       icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
+                                         ((&hdr->hdrlen) -
+                                          skb_network_header(skb)));
                        return -1;
                }
                break;
@@ -434,11 +474,6 @@ looped_back:
                }
                break;
 #endif
-       default:
-               IP6_INC_STATS_BH(ip6_dst_idev(skb->dst),
-                                IPSTATS_MIB_INHDRERRORS);
-               icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, (&hdr->type) - skb->nh.raw);
-               return -1;
        }
 
        /*
@@ -451,7 +486,9 @@ looped_back:
        if (hdr->segments_left > n) {
                IP6_INC_STATS_BH(ip6_dst_idev(skb->dst),
                                 IPSTATS_MIB_INHDRERRORS);
-               icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, (&hdr->segments_left) - skb->nh.raw);
+               icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
+                                 ((&hdr->segments_left) -
+                                  skb_network_header(skb)));
                return -1;
        }
 
@@ -470,7 +507,7 @@ looped_back:
                kfree_skb(skb);
                *skbp = skb = skb2;
                opt = IP6CB(skb2);
-               hdr = (struct ipv6_rt_hdr *) skb2->h.raw;
+               hdr = (struct ipv6_rt_hdr *)skb_transport_header(skb2);
        }
 
        if (skb->ip_summed == CHECKSUM_COMPLETE)
@@ -486,7 +523,7 @@ looped_back:
 #ifdef CONFIG_IPV6_MIP6
        case IPV6_SRCRT_TYPE_2:
                if (xfrm6_input_addr(skb, (xfrm_address_t *)addr,
-                                    (xfrm_address_t *)&skb->nh.ipv6h->saddr,
+                                    (xfrm_address_t *)&ipv6_hdr(skb)->saddr,
                                     IPPROTO_ROUTING) < 0) {
                        IP6_INC_STATS_BH(ip6_dst_idev(skb->dst),
                                         IPSTATS_MIB_INADDRERRORS);
@@ -513,19 +550,19 @@ looped_back:
        }
 
        ipv6_addr_copy(&daddr, addr);
-       ipv6_addr_copy(addr, &skb->nh.ipv6h->daddr);
-       ipv6_addr_copy(&skb->nh.ipv6h->daddr, &daddr);
+       ipv6_addr_copy(addr, &ipv6_hdr(skb)->daddr);
+       ipv6_addr_copy(&ipv6_hdr(skb)->daddr, &daddr);
 
        dst_release(xchg(&skb->dst, NULL));
        ip6_route_input(skb);
        if (skb->dst->error) {
-               skb_push(skb, skb->data - skb->nh.raw);
+               skb_push(skb, skb->data - skb_network_header(skb));
                dst_input(skb);
                return -1;
        }
 
        if (skb->dst->dev->flags&IFF_LOOPBACK) {
-               if (skb->nh.ipv6h->hop_limit <= 1) {
+               if (ipv6_hdr(skb)->hop_limit <= 1) {
                        IP6_INC_STATS_BH(ip6_dst_idev(skb->dst),
                                         IPSTATS_MIB_INHDRERRORS);
                        icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT,
@@ -533,11 +570,11 @@ looped_back:
                        kfree_skb(skb);
                        return -1;
                }
-               skb->nh.ipv6h->hop_limit--;
+               ipv6_hdr(skb)->hop_limit--;
                goto looped_back;
        }
 
-       skb_push(skb, skb->data - skb->nh.raw);
+       skb_push(skb, skb->data - skb_network_header(skb));
        dst_input(skb);
        return -1;
 }
@@ -628,13 +665,14 @@ EXPORT_SYMBOL_GPL(ipv6_invert_rthdr);
 static int ipv6_hop_ra(struct sk_buff **skbp, int optoff)
 {
        struct sk_buff *skb = *skbp;
+       const unsigned char *nh = skb_network_header(skb);
 
-       if (skb->nh.raw[optoff+1] == 2) {
+       if (nh[optoff + 1] == 2) {
                IP6CB(skb)->ra = optoff;
                return 1;
        }
        LIMIT_NETDEBUG(KERN_DEBUG "ipv6_hop_ra: wrong RA length %d\n",
-                      skb->nh.raw[optoff+1]);
+                      nh[optoff + 1]);
        kfree_skb(skb);
        return 0;
 }
@@ -644,23 +682,24 @@ static int ipv6_hop_ra(struct sk_buff **skbp, int optoff)
 static int ipv6_hop_jumbo(struct sk_buff **skbp, int optoff)
 {
        struct sk_buff *skb = *skbp;
+       const unsigned char *nh = skb_network_header(skb);
        u32 pkt_len;
 
-       if (skb->nh.raw[optoff+1] != 4 || (optoff&3) != 2) {
+       if (nh[optoff + 1] != 4 || (optoff & 3) != 2) {
                LIMIT_NETDEBUG(KERN_DEBUG "ipv6_hop_jumbo: wrong jumbo opt length/alignment %d\n",
-                              skb->nh.raw[optoff+1]);
+                              nh[optoff+1]);
                IP6_INC_STATS_BH(ip6_dst_idev(skb->dst),
                                 IPSTATS_MIB_INHDRERRORS);
                goto drop;
        }
 
-       pkt_len = ntohl(*(__be32*)(skb->nh.raw+optoff+2));
+       pkt_len = ntohl(*(__be32 *)(nh + optoff + 2));
        if (pkt_len <= IPV6_MAXPLEN) {
                IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_INHDRERRORS);
                icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff+2);
                return 0;
        }
-       if (skb->nh.ipv6h->payload_len) {
+       if (ipv6_hdr(skb)->payload_len) {
                IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_INHDRERRORS);
                icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff);
                return 0;
@@ -699,13 +738,14 @@ int ipv6_parse_hopopts(struct sk_buff **skbp)
        struct inet6_skb_parm *opt = IP6CB(skb);
 
        /*
-        * skb->nh.raw is equal to skb->data, and
-        * skb->h.raw - skb->nh.raw is always equal to
+        * skb_network_header(skb) is equal to skb->data, and
+        * skb_network_header_len(skb) is always equal to
         * sizeof(struct ipv6hdr) by definition of
         * hop-by-hop options.
         */
        if (!pskb_may_pull(skb, sizeof(struct ipv6hdr) + 8) ||
-           !pskb_may_pull(skb, sizeof(struct ipv6hdr) + ((skb->h.raw[1] + 1) << 3))) {
+           !pskb_may_pull(skb, (sizeof(struct ipv6hdr) +
+                                ((skb_transport_header(skb)[1] + 1) << 3)))) {
                kfree_skb(skb);
                return -1;
        }
@@ -713,7 +753,7 @@ int ipv6_parse_hopopts(struct sk_buff **skbp)
        opt->hop = sizeof(struct ipv6hdr);
        if (ip6_parse_tlv(tlvprochopopt_lst, skbp)) {
                skb = *skbp;
-               skb->h.raw += (skb->h.raw[1]+1)<<3;
+               skb->transport_header += (skb_transport_header(skb)[1] + 1) << 3;
                opt = IP6CB(skb);
                opt->nhoff = sizeof(struct ipv6hdr);
                return 1;
@@ -782,6 +822,8 @@ void ipv6_push_nfrag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt,
                ipv6_push_exthdr(skb, proto, NEXTHDR_HOP, opt->hopopt);
 }
 
+EXPORT_SYMBOL(ipv6_push_nfrag_opts);
+
 void ipv6_push_frag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt, u8 *proto)
 {
        if (opt->dst1opt)
index ea3035b..fc3882c 100644 (file)
@@ -17,6 +17,7 @@
 
 #include <net/fib_rules.h>
 #include <net/ipv6.h>
+#include <net/addrconf.h>
 #include <net/ip6_route.h>
 #include <net/netlink.h>
 
@@ -95,8 +96,27 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
        if (table)
                rt = lookup(table, flp, flags);
 
-       if (rt != &ip6_null_entry)
+       if (rt != &ip6_null_entry) {
+               struct fib6_rule *r = (struct fib6_rule *)rule;
+
+               /*
+                * If we need to find a source address for this traffic,
+                * we check the result if it meets requirement of the rule.
+                */
+               if ((rule->flags & FIB_RULE_FIND_SADDR) &&
+                   r->src.plen && !(flags & RT6_LOOKUP_F_HAS_SADDR)) {
+                       struct in6_addr saddr;
+                       if (ipv6_get_saddr(&rt->u.dst, &flp->fl6_dst,
+                                          &saddr))
+                               goto again;
+                       if (!ipv6_prefix_equal(&saddr, &r->src.addr,
+                                              r->src.plen))
+                               goto again;
+                       ipv6_addr_copy(&flp->fl6_src, &saddr);
+               }
                goto out;
+       }
+again:
        dst_release(&rt->u.dst);
        rt = NULL;
        goto out;
@@ -117,9 +137,17 @@ static int fib6_rule_match(struct fib_rule *rule, struct flowi *fl, int flags)
            !ipv6_prefix_equal(&fl->fl6_dst, &r->dst.addr, r->dst.plen))
                return 0;
 
+       /*
+        * If FIB_RULE_FIND_SADDR is set and we do not have a
+        * source address for the traffic, we defer check for
+        * source address.
+        */
        if (r->src.plen) {
-               if (!(flags & RT6_LOOKUP_F_HAS_SADDR) ||
-                   !ipv6_prefix_equal(&fl->fl6_src, &r->src.addr, r->src.plen))
+               if (flags & RT6_LOOKUP_F_HAS_SADDR) {
+                       if (!ipv6_prefix_equal(&fl->fl6_src, &r->src.addr,
+                                              r->src.plen))
+                               return 0;
+               } else if (!(r->common.flags & FIB_RULE_FIND_SADDR))
                        return 0;
        }
 
@@ -216,11 +244,6 @@ nla_put_failure:
        return -ENOBUFS;
 }
 
-int fib6_rules_dump(struct sk_buff *skb, struct netlink_callback *cb)
-{
-       return fib_rules_dump(skb, cb, AF_INET6);
-}
-
 static u32 fib6_rule_default_pref(void)
 {
        return 0x3FFF;
index edfe98b..e9bcce9 100644 (file)
@@ -68,6 +68,7 @@
 #include <asm/system.h>
 
 DEFINE_SNMP_STAT(struct icmpv6_mib, icmpv6_statistics) __read_mostly;
+EXPORT_SYMBOL(icmpv6_statistics);
 
 /*
  *     The ICMP socket(s). This is the most convenient way to flow control
@@ -128,9 +129,9 @@ void icmpv6_param_prob(struct sk_buff *skb, int code, int pos)
 
 static int is_ineligible(struct sk_buff *skb)
 {
-       int ptr = (u8*)(skb->nh.ipv6h+1) - skb->data;
+       int ptr = (u8 *)(ipv6_hdr(skb) + 1) - skb->data;
        int len = skb->len - ptr;
-       __u8 nexthdr = skb->nh.ipv6h->nexthdr;
+       __u8 nexthdr = ipv6_hdr(skb)->nexthdr;
 
        if (len < 0)
                return 1;
@@ -205,7 +206,7 @@ static __inline__ int opt_unrec(struct sk_buff *skb, __u32 offset)
 {
        u8 _optval, *op;
 
-       offset += skb->nh.raw - skb->data;
+       offset += skb_network_offset(skb);
        op = skb_header_pointer(skb, offset, sizeof(_optval), &_optval);
        if (op == NULL)
                return 1;
@@ -221,7 +222,7 @@ static int icmpv6_push_pending_frames(struct sock *sk, struct flowi *fl, struct
        if ((skb = skb_peek(&sk->sk_write_queue)) == NULL)
                goto out;
 
-       icmp6h = (struct icmp6hdr*) skb->h.raw;
+       icmp6h = icmp6_hdr(skb);
        memcpy(icmp6h, thdr, sizeof(struct icmp6hdr));
        icmp6h->icmp6_cksum = 0;
 
@@ -274,7 +275,7 @@ static int icmpv6_getfrag(void *from, char *to, int offset, int len, int odd, st
 #ifdef CONFIG_IPV6_MIP6
 static void mip6_addr_swap(struct sk_buff *skb)
 {
-       struct ipv6hdr *iph = skb->nh.ipv6h;
+       struct ipv6hdr *iph = ipv6_hdr(skb);
        struct inet6_skb_parm *opt = IP6CB(skb);
        struct ipv6_destopt_hao *hao;
        struct in6_addr tmp;
@@ -283,7 +284,8 @@ static void mip6_addr_swap(struct sk_buff *skb)
        if (opt->dsthao) {
                off = ipv6_find_tlv(skb, opt->dsthao, IPV6_TLV_HAO);
                if (likely(off >= 0)) {
-                       hao = (struct ipv6_destopt_hao *)(skb->nh.raw + off);
+                       hao = (struct ipv6_destopt_hao *)
+                                       (skb_network_header(skb) + off);
                        ipv6_addr_copy(&tmp, &iph->saddr);
                        ipv6_addr_copy(&iph->saddr, &hao->addr);
                        ipv6_addr_copy(&hao->addr, &tmp);
@@ -301,7 +303,7 @@ void icmpv6_send(struct sk_buff *skb, int type, int code, __u32 info,
                 struct net_device *dev)
 {
        struct inet6_dev *idev = NULL;
-       struct ipv6hdr *hdr = skb->nh.ipv6h;
+       struct ipv6hdr *hdr = ipv6_hdr(skb);
        struct sock *sk;
        struct ipv6_pinfo *np;
        struct in6_addr *saddr = NULL;
@@ -315,7 +317,8 @@ void icmpv6_send(struct sk_buff *skb, int type, int code, __u32 info,
        int hlimit, tclass;
        int err = 0;
 
-       if ((u8*)hdr < skb->head || (u8*)(hdr+1) > skb->tail)
+       if ((u8 *)hdr < skb->head ||
+           (skb->network_header + sizeof(*hdr)) > skb->tail)
                return;
 
        /*
@@ -430,7 +433,7 @@ void icmpv6_send(struct sk_buff *skb, int type, int code, __u32 info,
                tclass = 0;
 
        msg.skb = skb;
-       msg.offset = skb->nh.raw - skb->data;
+       msg.offset = skb_network_offset(skb);
        msg.type = type;
 
        len = skb->len - msg.offset;
@@ -466,13 +469,15 @@ out:
        icmpv6_xmit_unlock();
 }
 
+EXPORT_SYMBOL(icmpv6_send);
+
 static void icmpv6_echo_reply(struct sk_buff *skb)
 {
        struct sock *sk;
        struct inet6_dev *idev;
        struct ipv6_pinfo *np;
        struct in6_addr *saddr = NULL;
-       struct icmp6hdr *icmph = (struct icmp6hdr *) skb->h.raw;
+       struct icmp6hdr *icmph = icmp6_hdr(skb);
        struct icmp6hdr tmp_hdr;
        struct flowi fl;
        struct icmpv6_msg msg;
@@ -481,7 +486,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
        int hlimit;
        int tclass;
 
-       saddr = &skb->nh.ipv6h->daddr;
+       saddr = &ipv6_hdr(skb)->daddr;
 
        if (!ipv6_unicast_destination(skb))
                saddr = NULL;
@@ -491,7 +496,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
 
        memset(&fl, 0, sizeof(fl));
        fl.proto = IPPROTO_ICMPV6;
-       ipv6_addr_copy(&fl.fl6_dst, &skb->nh.ipv6h->saddr);
+       ipv6_addr_copy(&fl.fl6_dst, &ipv6_hdr(skb)->saddr);
        if (saddr)
                ipv6_addr_copy(&fl.fl6_src, saddr);
        fl.oif = skb->dev->ifindex;
@@ -579,8 +584,8 @@ static void icmpv6_notify(struct sk_buff *skb, int type, int code, __be32 info)
        if (!pskb_may_pull(skb, inner_offset+8))
                return;
 
-       saddr = &skb->nh.ipv6h->saddr;
-       daddr = &skb->nh.ipv6h->daddr;
+       saddr = &ipv6_hdr(skb)->saddr;
+       daddr = &ipv6_hdr(skb)->daddr;
 
        /* BUGGG_FUTURE: we should try to parse exthdrs in this packet.
           Without this we will not able f.e. to make source routed
@@ -624,8 +629,8 @@ static int icmpv6_rcv(struct sk_buff **pskb)
 
        ICMP6_INC_STATS_BH(idev, ICMP6_MIB_INMSGS);
 
-       saddr = &skb->nh.ipv6h->saddr;
-       daddr = &skb->nh.ipv6h->daddr;
+       saddr = &ipv6_hdr(skb)->saddr;
+       daddr = &ipv6_hdr(skb)->daddr;
 
        /* Perform checksum. */
        switch (skb->ip_summed) {
@@ -647,7 +652,7 @@ static int icmpv6_rcv(struct sk_buff **pskb)
        if (!pskb_pull(skb, sizeof(struct icmp6hdr)))
                goto discard_it;
 
-       hdr = (struct icmp6hdr *) skb->h.raw;
+       hdr = icmp6_hdr(skb);
 
        type = hdr->icmp6_type;
 
@@ -673,7 +678,7 @@ static int icmpv6_rcv(struct sk_buff **pskb)
                 */
                if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
                        goto discard_it;
-               hdr = (struct icmp6hdr *) skb->h.raw;
+               hdr = icmp6_hdr(skb);
                orig_hdr = (struct ipv6hdr *) (hdr + 1);
                rt6_pmtu_discovery(&orig_hdr->daddr, &orig_hdr->saddr, dev,
                                   ntohl(hdr->icmp6_mtu));
@@ -727,7 +732,8 @@ static int icmpv6_rcv(struct sk_buff **pskb)
                 */
 
                icmpv6_notify(skb, type, hdr->icmp6_code, hdr->icmp6_mtu);
-       };
+       }
+
        kfree_skb(skb);
        return 0;
 
@@ -860,11 +866,13 @@ int icmpv6_err_convert(int type, int code, int *err)
        case ICMPV6_TIME_EXCEED:
                *err = EHOSTUNREACH;
                break;
-       };
+       }
 
        return fatal;
 }
 
+EXPORT_SYMBOL(icmpv6_err_convert);
+
 #ifdef CONFIG_SYSCTL
 ctl_table ipv6_icmp_table[] = {
        {
index 268f476..ca08ee8 100644 (file)
@@ -359,7 +359,7 @@ end:
        return res;
 }
 
-int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
+static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
 {
        unsigned int h, s_h;
        unsigned int e = 0, s_e;
@@ -1486,6 +1486,8 @@ void __init fib6_init(void)
                                           NULL, NULL);
 
        fib6_tables_init();
+
+       __rtnl_register(PF_INET6, RTM_GETROUTE, NULL, inet6_dump_fib);
 }
 
 void fib6_gc_cleanup(void)
index 61e7a6c..be0ee8a 100644 (file)
@@ -96,12 +96,12 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
        if (unlikely(!pskb_may_pull(skb, sizeof(*hdr))))
                goto err;
 
-       hdr = skb->nh.ipv6h;
+       hdr = ipv6_hdr(skb);
 
        if (hdr->version != 6)
                goto err;
 
-       skb->h.raw = (u8 *)(hdr + 1);
+       skb->transport_header = skb->network_header + sizeof(*hdr);
        IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr);
 
        pkt_len = ntohs(hdr->payload_len);
@@ -116,7 +116,7 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
                        IP6_INC_STATS_BH(idev, IPSTATS_MIB_INHDRERRORS);
                        goto drop;
                }
-               hdr = skb->nh.ipv6h;
+               hdr = ipv6_hdr(skb);
        }
 
        if (hdr->nexthdr == NEXTHDR_HOP) {
@@ -160,10 +160,10 @@ static inline int ip6_input_finish(struct sk_buff *skb)
        rcu_read_lock();
 resubmit:
        idev = ip6_dst_idev(skb->dst);
-       if (!pskb_pull(skb, skb->h.raw - skb->data))
+       if (!pskb_pull(skb, skb_transport_offset(skb)))
                goto discard;
        nhoff = IP6CB(skb)->nhoff;
-       nexthdr = skb->nh.raw[nhoff];
+       nexthdr = skb_network_header(skb)[nhoff];
 
        raw_sk = sk_head(&raw_v6_htable[nexthdr & (MAX_INET_PROTOS - 1)]);
        if (raw_sk && !ipv6_raw_deliver(skb, nexthdr))
@@ -181,9 +181,9 @@ resubmit:
                           indefinitely. */
                        nf_reset(skb);
 
-                       skb_postpull_rcsum(skb, skb->nh.raw,
-                                          skb->h.raw - skb->nh.raw);
-                       hdr = skb->nh.ipv6h;
+                       skb_postpull_rcsum(skb, skb_network_header(skb),
+                                          skb_network_header_len(skb));
+                       hdr = ipv6_hdr(skb);
                        if (ipv6_addr_is_multicast(&hdr->daddr) &&
                            !ipv6_chk_mcast_addr(skb->dev, &hdr->daddr,
                            &hdr->saddr) &&
@@ -234,7 +234,7 @@ int ip6_mc_input(struct sk_buff *skb)
 
        IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_INMCASTPKTS);
 
-       hdr = skb->nh.ipv6h;
+       hdr = ipv6_hdr(skb);
        deliver = likely(!(skb->dev->flags & (IFF_PROMISC|IFF_ALLMULTI))) ||
            ipv6_chk_mcast_addr(skb->dev, &hdr->daddr, NULL);
 
index 3055169..f508171 100644 (file)
@@ -88,8 +88,8 @@ static inline int ip6_output_finish(struct sk_buff *skb)
 /* dev_loopback_xmit for use with netfilter. */
 static int ip6_dev_loopback_xmit(struct sk_buff *newskb)
 {
-       newskb->mac.raw = newskb->data;
-       __skb_pull(newskb, newskb->nh.raw - newskb->data);
+       skb_reset_mac_header(newskb);
+       __skb_pull(newskb, skb_network_offset(newskb));
        newskb->pkt_type = PACKET_LOOPBACK;
        newskb->ip_summed = CHECKSUM_UNNECESSARY;
        BUG_TRAP(newskb->dst);
@@ -107,13 +107,13 @@ static int ip6_output2(struct sk_buff *skb)
        skb->protocol = htons(ETH_P_IPV6);
        skb->dev = dev;
 
-       if (ipv6_addr_is_multicast(&skb->nh.ipv6h->daddr)) {
+       if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) {
                struct ipv6_pinfo* np = skb->sk ? inet6_sk(skb->sk) : NULL;
                struct inet6_dev *idev = ip6_dst_idev(skb->dst);
 
                if (!(dev->flags & IFF_LOOPBACK) && (!np || np->mc_loop) &&
-                   ipv6_chk_mcast_addr(dev, &skb->nh.ipv6h->daddr,
-                               &skb->nh.ipv6h->saddr)) {
+                   ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr,
+                                       &ipv6_hdr(skb)->saddr)) {
                        struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
 
                        /* Do not check for IFF_ALLMULTI; multicast routing
@@ -124,7 +124,7 @@ static int ip6_output2(struct sk_buff *skb)
                                        newskb->dev,
                                        ip6_dev_loopback_xmit);
 
-                       if (skb->nh.ipv6h->hop_limit == 0) {
+                       if (ipv6_hdr(skb)->hop_limit == 0) {
                                IP6_INC_STATS(idev, IPSTATS_MIB_OUTDISCARDS);
                                kfree_skb(skb);
                                return 0;
@@ -137,9 +137,17 @@ static int ip6_output2(struct sk_buff *skb)
        return NF_HOOK(PF_INET6, NF_IP6_POST_ROUTING, skb,NULL, skb->dev,ip6_output_finish);
 }
 
+static inline int ip6_skb_dst_mtu(struct sk_buff *skb)
+{
+       struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL;
+
+       return (np && np->pmtudisc == IPV6_PMTUDISC_PROBE) ?
+              skb->dst->dev->mtu : dst_mtu(skb->dst);
+}
+
 int ip6_output(struct sk_buff *skb)
 {
-       if ((skb->len > dst_mtu(skb->dst) && !skb_is_gso(skb)) ||
+       if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
                                dst_allfrag(skb->dst))
                return ip6_fragment(skb, ip6_output2);
        else
@@ -191,7 +199,9 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
                        ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop);
        }
 
-       hdr = skb->nh.ipv6h = (struct ipv6hdr*)skb_push(skb, sizeof(struct ipv6hdr));
+       skb_push(skb, sizeof(struct ipv6hdr));
+       skb_reset_network_header(skb);
+       hdr = ipv6_hdr(skb);
 
        /*
         *      Fill in the IPv6 header
@@ -239,6 +249,8 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
        return -EMSGSIZE;
 }
 
+EXPORT_SYMBOL(ip6_xmit);
+
 /*
  *     To avoid extra problems ND packets are send through this
  *     routine. It's code duplication but I really want to avoid
@@ -259,8 +271,9 @@ int ip6_nd_hdr(struct sock *sk, struct sk_buff *skb, struct net_device *dev,
 
        totlen = len + sizeof(struct ipv6hdr);
 
-       hdr = (struct ipv6hdr *) skb_put(skb, sizeof(struct ipv6hdr));
-       skb->nh.ipv6h = hdr;
+       skb_reset_network_header(skb);
+       skb_put(skb, sizeof(struct ipv6hdr));
+       hdr = ipv6_hdr(skb);
 
        *(__be32*)hdr = htonl(0x60000000);
 
@@ -305,7 +318,7 @@ static int ip6_call_ra_chain(struct sk_buff *skb, int sel)
 
 static int ip6_forward_proxy_check(struct sk_buff *skb)
 {
-       struct ipv6hdr *hdr = skb->nh.ipv6h;
+       struct ipv6hdr *hdr = ipv6_hdr(skb);
        u8 nexthdr = hdr->nexthdr;
        int offset;
 
@@ -319,10 +332,11 @@ static int ip6_forward_proxy_check(struct sk_buff *skb)
        if (nexthdr == IPPROTO_ICMPV6) {
                struct icmp6hdr *icmp6;
 
-               if (!pskb_may_pull(skb, skb->nh.raw + offset + 1 - skb->data))
+               if (!pskb_may_pull(skb, (skb_network_header(skb) +
+                                        offset + 1 - skb->data)))
                        return 0;
 
-               icmp6 = (struct icmp6hdr *)(skb->nh.raw + offset);
+               icmp6 = (struct icmp6hdr *)(skb_network_header(skb) + offset);
 
                switch (icmp6->icmp6_type) {
                case NDISC_ROUTER_SOLICITATION:
@@ -361,7 +375,7 @@ static inline int ip6_forward_finish(struct sk_buff *skb)
 int ip6_forward(struct sk_buff *skb)
 {
        struct dst_entry *dst = skb->dst;
-       struct ipv6hdr *hdr = skb->nh.ipv6h;
+       struct ipv6hdr *hdr = ipv6_hdr(skb);
        struct inet6_skb_parm *opt = IP6CB(skb);
 
        if (ipv6_devconf.forwarding == 0)
@@ -372,7 +386,7 @@ int ip6_forward(struct sk_buff *skb)
                goto drop;
        }
 
-       skb->ip_summed = CHECKSUM_NONE;
+       skb_forward_csum(skb);
 
        /*
         *      We DO NOT make any processing on
@@ -388,7 +402,7 @@ int ip6_forward(struct sk_buff *skb)
         *      that different fragments will go along one path. --ANK
         */
        if (opt->ra) {
-               u8 *ptr = skb->nh.raw + opt->ra;
+               u8 *ptr = skb_network_header(skb) + opt->ra;
                if (ip6_call_ra_chain(skb, (ptr[2]<<8) + ptr[3]))
                        return 0;
        }
@@ -470,7 +484,7 @@ int ip6_forward(struct sk_buff *skb)
                goto drop;
        }
 
-       hdr = skb->nh.ipv6h;
+       hdr = ipv6_hdr(skb);
 
        /* Mangling hops number delayed to point after skb COW */
 
@@ -499,33 +513,18 @@ static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
 #ifdef CONFIG_NET_SCHED
        to->tc_index = from->tc_index;
 #endif
-#ifdef CONFIG_NETFILTER
-       /* Connection association is same as pre-frag packet */
-       nf_conntrack_put(to->nfct);
-       to->nfct = from->nfct;
-       nf_conntrack_get(to->nfct);
-       to->nfctinfo = from->nfctinfo;
-#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
-       nf_conntrack_put_reasm(to->nfct_reasm);
-       to->nfct_reasm = from->nfct_reasm;
-       nf_conntrack_get_reasm(to->nfct_reasm);
-#endif
-#ifdef CONFIG_BRIDGE_NETFILTER
-       nf_bridge_put(to->nf_bridge);
-       to->nf_bridge = from->nf_bridge;
-       nf_bridge_get(to->nf_bridge);
-#endif
-#endif
+       nf_copy(to, from);
        skb_copy_secmark(to, from);
 }
 
 int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
 {
        u16 offset = sizeof(struct ipv6hdr);
-       struct ipv6_opt_hdr *exthdr = (struct ipv6_opt_hdr*)(skb->nh.ipv6h + 1);
-       unsigned int packet_len = skb->tail - skb->nh.raw;
+       struct ipv6_opt_hdr *exthdr =
+                               (struct ipv6_opt_hdr *)(ipv6_hdr(skb) + 1);
+       unsigned int packet_len = skb->tail - skb->network_header;
        int found_rhdr = 0;
-       *nexthdr = &skb->nh.ipv6h->nexthdr;
+       *nexthdr = &ipv6_hdr(skb)->nexthdr;
 
        while (offset + 1 <= packet_len) {
 
@@ -550,7 +549,8 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
 
                offset += ipv6_optlen(exthdr);
                *nexthdr = &exthdr->nexthdr;
-               exthdr = (struct ipv6_opt_hdr*)(skb->nh.raw + offset);
+               exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) +
+                                                offset);
        }
 
        return offset;
@@ -574,7 +574,20 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
        hlen = ip6_find_1stfragopt(skb, &prevhdr);
        nexthdr = *prevhdr;
 
-       mtu = dst_mtu(&rt->u.dst);
+       mtu = ip6_skb_dst_mtu(skb);
+
+       /* We must not fragment if the socket is set to force MTU discovery
+        * or if the skb it not generated by a local socket.  (This last
+        * check should be redundant, but it's free.)
+        */
+       if (!np || np->pmtudisc >= IPV6_PMTUDISC_DO) {
+               skb->dev = skb->dst->dev;
+               icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
+               IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_FRAGFAILS);
+               kfree_skb(skb);
+               return -EMSGSIZE;
+       }
+
        if (np && np->frag_size < mtu) {
                if (np->frag_size)
                        mtu = np->frag_size;
@@ -616,7 +629,7 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
                /* BUILD HEADER */
 
                *prevhdr = NEXTHDR_FRAGMENT;
-               tmp_hdr = kmemdup(skb->nh.raw, hlen, GFP_ATOMIC);
+               tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC);
                if (!tmp_hdr) {
                        IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_FRAGFAILS);
                        return -ENOMEM;
@@ -624,8 +637,9 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
 
                __skb_pull(skb, hlen);
                fh = (struct frag_hdr*)__skb_push(skb, sizeof(struct frag_hdr));
-               skb->nh.raw = __skb_push(skb, hlen);
-               memcpy(skb->nh.raw, tmp_hdr, hlen);
+               __skb_push(skb, hlen);
+               skb_reset_network_header(skb);
+               memcpy(skb_network_header(skb), tmp_hdr, hlen);
 
                ipv6_select_ident(skb, fh);
                fh->nexthdr = nexthdr;
@@ -636,7 +650,8 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
                first_len = skb_pagelen(skb);
                skb->data_len = first_len - skb_headlen(skb);
                skb->len = first_len;
-               skb->nh.ipv6h->payload_len = htons(first_len - sizeof(struct ipv6hdr));
+               ipv6_hdr(skb)->payload_len = htons(first_len -
+                                                  sizeof(struct ipv6hdr));
 
                dst_hold(&rt->u.dst);
 
@@ -645,10 +660,12 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
                         * before previous one went down. */
                        if (frag) {
                                frag->ip_summed = CHECKSUM_NONE;
-                               frag->h.raw = frag->data;
+                               skb_reset_transport_header(frag);
                                fh = (struct frag_hdr*)__skb_push(frag, sizeof(struct frag_hdr));
-                               frag->nh.raw = __skb_push(frag, hlen);
-                               memcpy(frag->nh.raw, tmp_hdr, hlen);
+                               __skb_push(frag, hlen);
+                               skb_reset_network_header(frag);
+                               memcpy(skb_network_header(frag), tmp_hdr,
+                                      hlen);
                                offset += skb->len - hlen - sizeof(struct frag_hdr);
                                fh->nexthdr = nexthdr;
                                fh->reserved = 0;
@@ -656,7 +673,9 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
                                if (frag->next != NULL)
                                        fh->frag_off |= htons(IP6_MF);
                                fh->identification = frag_id;
-                               frag->nh.ipv6h->payload_len = htons(frag->len - sizeof(struct ipv6hdr));
+                               ipv6_hdr(frag)->payload_len =
+                                               htons(frag->len -
+                                                     sizeof(struct ipv6hdr));
                                ip6_copy_metadata(frag, skb);
                        }
 
@@ -733,9 +752,10 @@ slow_path:
                ip6_copy_metadata(frag, skb);
                skb_reserve(frag, LL_RESERVED_SPACE(rt->u.dst.dev));
                skb_put(frag, len + hlen + sizeof(struct frag_hdr));
-               frag->nh.raw = frag->data;
-               fh = (struct frag_hdr*)(frag->data + hlen);
-               frag->h.raw = frag->data + hlen + sizeof(struct frag_hdr);
+               skb_reset_network_header(frag);
+               fh = (struct frag_hdr *)(skb_network_header(frag) + hlen);
+               frag->transport_header = (frag->network_header + hlen +
+                                         sizeof(struct frag_hdr));
 
                /*
                 *      Charge the memory for the fragment to any owner
@@ -747,7 +767,7 @@ slow_path:
                /*
                 *      Copy the packet header into the new buffer.
                 */
-               memcpy(frag->nh.raw, skb->data, hlen);
+               skb_copy_from_linear_data(skb, skb_network_header(frag), hlen);
 
                /*
                 *      Build fragment header.
@@ -763,14 +783,15 @@ slow_path:
                /*
                 *      Copy a block of the IP datagram.
                 */
-               if (skb_copy_bits(skb, ptr, frag->h.raw, len))
+               if (skb_copy_bits(skb, ptr, skb_transport_header(skb), len))
                        BUG();
                left -= len;
 
                fh->frag_off = htons(offset);
                if (left > 0)
                        fh->frag_off |= htons(IP6_MF);
-               frag->nh.ipv6h->payload_len = htons(frag->len - sizeof(struct ipv6hdr));
+               ipv6_hdr(frag)->payload_len = htons(frag->len -
+                                                   sizeof(struct ipv6hdr));
 
                ptr += len;
                offset += len;
@@ -861,6 +882,41 @@ static int ip6_dst_lookup_tail(struct sock *sk,
                        goto out_err_release;
        }
 
+#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
+               /*
+                * Here if the dst entry we've looked up
+                * has a neighbour entry that is in the INCOMPLETE
+                * state and the src address from the flow is
+                * marked as OPTIMISTIC, we release the found
+                * dst entry and replace it instead with the
+                * dst entry of the nexthop router
+                */
+               if (!((*dst)->neighbour->nud_state & NUD_VALID)) {
+                       struct inet6_ifaddr *ifp;
+                       struct flowi fl_gw;
+                       int redirect;
+
+                       ifp = ipv6_get_ifaddr(&fl->fl6_src, (*dst)->dev, 1);
+
+                       redirect = (ifp && ifp->flags & IFA_F_OPTIMISTIC);
+                       if (ifp)
+                               in6_ifa_put(ifp);
+
+                       if (redirect) {
+                               /*
+                                * We need to get the dst entry for the
+                                * default router instead
+                                */
+                               dst_release(*dst);
+                               memcpy(&fl_gw, fl, sizeof(struct flowi));
+                               memset(&fl_gw.fl6_dst, 0, sizeof(struct in6_addr));
+                               *dst = ip6_route_output(sk, &fl_gw);
+                               if ((err = (*dst)->error))
+                                       goto out_err_release;
+                       }
+               }
+#endif
+
        return 0;
 
 out_err_release:
@@ -939,10 +995,10 @@ static inline int ip6_ufo_append_data(struct sock *sk,
                skb_put(skb,fragheaderlen + transhdrlen);
 
                /* initialize network header pointer */
-               skb->nh.raw = skb->data;
+               skb_reset_network_header(skb);
 
                /* initialize protocol header pointer */
-               skb->h.raw = skb->data + fragheaderlen;
+               skb->transport_header = skb->network_header + fragheaderlen;
 
                skb->ip_summed = CHECKSUM_PARTIAL;
                skb->csum = 0;
@@ -1015,7 +1071,8 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
                inet->cork.fl = *fl;
                np->cork.hop_limit = hlimit;
                np->cork.tclass = tclass;
-               mtu = dst_mtu(rt->u.dst.path);
+               mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ?
+                     rt->u.dst.dev->mtu : dst_mtu(rt->u.dst.path);
                if (np->frag_size < mtu) {
                        if (np->frag_size)
                                mtu = np->frag_size;
@@ -1162,10 +1219,10 @@ alloc_new_skb:
                         *      Find where to start putting bytes
                         */
                        data = skb_put(skb, fraglen);
-                       skb->nh.raw = data + exthdrlen;
+                       skb_set_network_header(skb, exthdrlen);
                        data += fragheaderlen;
-                       skb->h.raw = data + exthdrlen;
-
+                       skb->transport_header = (skb->network_header +
+                                                fragheaderlen);
                        if (fraggap) {
                                skb->csum = skb_copy_and_csum_bits(
                                        skb_prev, maxfraglen,
@@ -1288,10 +1345,10 @@ int ip6_push_pending_frames(struct sock *sk)
        tail_skb = &(skb_shinfo(skb)->frag_list);
 
        /* move skb->data to ip header from ext header */
-       if (skb->data < skb->nh.raw)
-               __skb_pull(skb, skb->nh.raw - skb->data);
+       if (skb->data < skb_network_header(skb))
+               __skb_pull(skb, skb_network_offset(skb));
        while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
-               __skb_pull(tmp_skb, skb->h.raw - skb->nh.raw);
+               __skb_pull(tmp_skb, skb_network_header_len(skb));
                *tail_skb = tmp_skb;
                tail_skb = &(tmp_skb->next);
                skb->len += tmp_skb->len;
@@ -1303,13 +1360,15 @@ int ip6_push_pending_frames(struct sock *sk)
        }
 
        ipv6_addr_copy(final_dst, &fl->fl6_dst);
-       __skb_pull(skb, skb->h.raw - skb->nh.raw);
+       __skb_pull(skb, skb_network_header_len(skb));
        if (opt && opt->opt_flen)
                ipv6_push_frag_opts(skb, opt, &proto);
        if (opt && opt->opt_nflen)
                ipv6_push_nfrag_opts(skb, opt, &proto, &final_dst);
 
-       skb->nh.ipv6h = hdr = (struct ipv6hdr*) skb_push(skb, sizeof(struct ipv6hdr));
+       skb_push(skb, sizeof(struct ipv6hdr));
+       skb_reset_network_header(skb);
+       hdr = ipv6_hdr(skb);
 
        *(__be32*)hdr = fl->fl6_flowlabel |
                     htonl(0x60000000 | ((int)np->cork.tclass << 20));
index 08d9442..a0902fb 100644 (file)
@@ -1,14 +1,15 @@
 /*
- *     IPv6 over IPv6 tunnel device
+ *     IPv6 tunneling device
  *     Linux INET6 implementation
  *
  *     Authors:
  *     Ville Nuorvala          <vnuorval@tcs.hut.fi>
+ *     Yasuyuki Kozakai        <kozakai@linux-ipv6.org>
  *
  *     $Id$
  *
  *      Based on:
- *      linux/net/ipv6/sit.c
+ *      linux/net/ipv6/sit.c and linux/net/ipv4/ipip.c
  *
  *      RFC 2473
  *
@@ -24,6 +25,7 @@
 #include <linux/errno.h>
 #include <linux/types.h>
 #include <linux/sockios.h>
+#include <linux/icmp.h>
 #include <linux/if.h>
 #include <linux/in.h>
 #include <linux/ip.h>
@@ -41,6 +43,7 @@
 #include <asm/uaccess.h>
 #include <asm/atomic.h>
 
+#include <net/icmp.h>
 #include <net/ip.h>
 #include <net/ipv6.h>
 #include <net/ip6_route.h>
@@ -51,7 +54,7 @@
 #include <net/inet_ecn.h>
 
 MODULE_AUTHOR("Ville Nuorvala");
-MODULE_DESCRIPTION("IPv6-in-IPv6 tunnel");
+MODULE_DESCRIPTION("IPv6 tunneling device");
 MODULE_LICENSE("GPL");
 
 #define IPV6_TLV_TEL_DST_SIZE 8
@@ -63,6 +66,7 @@ MODULE_LICENSE("GPL");
 #endif
 
 #define IPV6_TCLASS_MASK (IPV6_FLOWINFO_MASK & ~IPV6_FLOWLABEL_MASK)
+#define IPV6_TCLASS_SHIFT 20
 
 #define HASH_SIZE  32
 
@@ -70,12 +74,12 @@ MODULE_LICENSE("GPL");
                     (addr)->s6_addr32[2] ^ (addr)->s6_addr32[3]) & \
                    (HASH_SIZE - 1))
 
-static int ip6ip6_fb_tnl_dev_init(struct net_device *dev);
-static int ip6ip6_tnl_dev_init(struct net_device *dev);
-static void ip6ip6_tnl_dev_setup(struct net_device *dev);
+static int ip6_fb_tnl_dev_init(struct net_device *dev);
+static int ip6_tnl_dev_init(struct net_device *dev);
+static void ip6_tnl_dev_setup(struct net_device *dev);
 
 /* the IPv6 tunnel fallback device */
-static struct net_device *ip6ip6_fb_tnl_dev;
+static struct net_device *ip6_fb_tnl_dev;
 
 
 /* lists for storing tunnels in use */
@@ -84,7 +88,7 @@ static struct ip6_tnl *tnls_wc[1];
 static struct ip6_tnl **tnls[2] = { tnls_wc, tnls_r_l };
 
 /* lock for the tunnel lists */
-static DEFINE_RWLOCK(ip6ip6_lock);
+static DEFINE_RWLOCK(ip6_tnl_lock);
 
 static inline struct dst_entry *ip6_tnl_dst_check(struct ip6_tnl *t)
 {
@@ -115,7 +119,7 @@ static inline void ip6_tnl_dst_store(struct ip6_tnl *t, struct dst_entry *dst)
 }
 
 /**
- * ip6ip6_tnl_lookup - fetch tunnel matching the end-point addresses
+ * ip6_tnl_lookup - fetch tunnel matching the end-point addresses
  *   @remote: the address of the tunnel exit-point
  *   @local: the address of the tunnel entry-point
  *
@@ -126,7 +130,7 @@ static inline void ip6_tnl_dst_store(struct ip6_tnl *t, struct dst_entry *dst)
  **/
 
 static struct ip6_tnl *
-ip6ip6_tnl_lookup(struct in6_addr *remote, struct in6_addr *local)
+ip6_tnl_lookup(struct in6_addr *remote, struct in6_addr *local)
 {
        unsigned h0 = HASH(remote);
        unsigned h1 = HASH(local);
@@ -145,18 +149,18 @@ ip6ip6_tnl_lookup(struct in6_addr *remote, struct in6_addr *local)
 }
 
 /**
- * ip6ip6_bucket - get head of list matching given tunnel parameters
+ * ip6_tnl_bucket - get head of list matching given tunnel parameters
  *   @p: parameters containing tunnel end-points
  *
  * Description:
- *   ip6ip6_bucket() returns the head of the list matching the
+ *   ip6_tnl_bucket() returns the head of the list matching the
  *   &struct in6_addr entries laddr and raddr in @p.
  *
  * Return: head of IPv6 tunnel list
  **/
 
 static struct ip6_tnl **
-ip6ip6_bucket(struct ip6_tnl_parm *p)
+ip6_tnl_bucket(struct ip6_tnl_parm *p)
 {
        struct in6_addr *remote = &p->raddr;
        struct in6_addr *local = &p->laddr;
@@ -171,36 +175,36 @@ ip6ip6_bucket(struct ip6_tnl_parm *p)
 }
 
 /**
- * ip6ip6_tnl_link - add tunnel to hash table
+ * ip6_tnl_link - add tunnel to hash table
  *   @t: tunnel to be added
  **/
 
 static void
-ip6ip6_tnl_link(struct ip6_tnl *t)
+ip6_tnl_link(struct ip6_tnl *t)
 {
-       struct ip6_tnl **tp = ip6ip6_bucket(&t->parms);
+       struct ip6_tnl **tp = ip6_tnl_bucket(&t->parms);
 
        t->next = *tp;
-       write_lock_bh(&ip6ip6_lock);
+       write_lock_bh(&ip6_tnl_lock);
        *tp = t;
-       write_unlock_bh(&ip6ip6_lock);
+       write_unlock_bh(&ip6_tnl_lock);
 }
 
 /**
- * ip6ip6_tnl_unlink - remove tunnel from hash table
+ * ip6_tnl_unlink - remove tunnel from hash table
  *   @t: tunnel to be removed
  **/
 
 static void
-ip6ip6_tnl_unlink(struct ip6_tnl *t)
+ip6_tnl_unlink(struct ip6_tnl *t)
 {
        struct ip6_tnl **tp;
 
-       for (tp = ip6ip6_bucket(&t->parms); *tp; tp = &(*tp)->next) {
+       for (tp = ip6_tnl_bucket(&t->parms); *tp; tp = &(*tp)->next) {
                if (t == *tp) {
-                       write_lock_bh(&ip6ip6_lock);
+                       write_lock_bh(&ip6_tnl_lock);
                        *tp = t->next;
-                       write_unlock_bh(&ip6ip6_lock);
+                       write_unlock_bh(&ip6_tnl_lock);
                        break;
                }
        }
@@ -237,12 +241,12 @@ static struct ip6_tnl *ip6_tnl_create(struct ip6_tnl_parm *p)
                if (i == IP6_TNL_MAX)
                        goto failed;
        }
-       dev = alloc_netdev(sizeof (*t), name, ip6ip6_tnl_dev_setup);
+       dev = alloc_netdev(sizeof (*t), name, ip6_tnl_dev_setup);
        if (dev == NULL)
                goto failed;
 
        t = netdev_priv(dev);
-       dev->init = ip6ip6_tnl_dev_init;
+       dev->init = ip6_tnl_dev_init;
        t->parms = *p;
 
        if ((err = register_netdevice(dev)) < 0) {
@@ -250,19 +254,19 @@ static struct ip6_tnl *ip6_tnl_create(struct ip6_tnl_parm *p)
                goto failed;
        }
        dev_hold(dev);
-       ip6ip6_tnl_link(t);
+       ip6_tnl_link(t);
        return t;
 failed:
        return NULL;
 }
 
 /**
- * ip6ip6_tnl_locate - find or create tunnel matching given parameters
+ * ip6_tnl_locate - find or create tunnel matching given parameters
  *   @p: tunnel parameters
  *   @create: != 0 if allowed to create new tunnel if no match found
  *
  * Description:
- *   ip6ip6_tnl_locate() first tries to locate an existing tunnel
+ *   ip6_tnl_locate() first tries to locate an existing tunnel
  *   based on @parms. If this is unsuccessful, but @create is set a new
  *   tunnel device is created and registered for use.
  *
@@ -270,13 +274,13 @@ failed:
  *   matching tunnel or NULL
  **/
 
-static struct ip6_tnl *ip6ip6_tnl_locate(struct ip6_tnl_parm *p, int create)
+static struct ip6_tnl *ip6_tnl_locate(struct ip6_tnl_parm *p, int create)
 {
        struct in6_addr *remote = &p->raddr;
        struct in6_addr *local = &p->laddr;
        struct ip6_tnl *t;
 
-       for (t = *ip6ip6_bucket(p); t; t = t->next) {
+       for (t = *ip6_tnl_bucket(p); t; t = t->next) {
                if (ipv6_addr_equal(local, &t->parms.laddr) &&
                    ipv6_addr_equal(remote, &t->parms.raddr))
                        return t;
@@ -287,24 +291,24 @@ static struct ip6_tnl *ip6ip6_tnl_locate(struct ip6_tnl_parm *p, int create)
 }
 
 /**
- * ip6ip6_tnl_dev_uninit - tunnel device uninitializer
+ * ip6_tnl_dev_uninit - tunnel device uninitializer
  *   @dev: the device to be destroyed
  *
  * Description:
- *   ip6ip6_tnl_dev_uninit() removes tunnel from its list
+ *   ip6_tnl_dev_uninit() removes tunnel from its list
  **/
 
 static void
-ip6ip6_tnl_dev_uninit(struct net_device *dev)
+ip6_tnl_dev_uninit(struct net_device *dev)
 {
        struct ip6_tnl *t = netdev_priv(dev);
 
-       if (dev == ip6ip6_fb_tnl_dev) {
-               write_lock_bh(&ip6ip6_lock);
+       if (dev == ip6_fb_tnl_dev) {
+               write_lock_bh(&ip6_tnl_lock);
                tnls_wc[0] = NULL;
-               write_unlock_bh(&ip6ip6_lock);
+               write_unlock_bh(&ip6_tnl_lock);
        } else {
-               ip6ip6_tnl_unlink(t);
+               ip6_tnl_unlink(t);
        }
        ip6_tnl_dst_reset(t);
        dev_put(dev);
@@ -372,16 +376,16 @@ parse_tlv_tnl_enc_lim(struct sk_buff *skb, __u8 * raw)
 }
 
 /**
- * ip6ip6_err - tunnel error handler
+ * ip6_tnl_err - tunnel error handler
  *
  * Description:
- *   ip6ip6_err() should handle errors in the tunnel according
+ *   ip6_tnl_err() should handle errors in the tunnel according
  *   to the specifications in RFC 2473.
  **/
 
 static int
-ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
-          int type, int code, int offset, __be32 info)
+ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt,
+           int *type, int *code, int *msg, __be32 *info, int offset)
 {
        struct ipv6hdr *ipv6h = (struct ipv6hdr *) skb->data;
        struct ip6_tnl *t;
@@ -396,13 +400,16 @@ ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
           in trouble since we might need the source address for further
           processing of the error. */
 
-       read_lock(&ip6ip6_lock);
-       if ((t = ip6ip6_tnl_lookup(&ipv6h->daddr, &ipv6h->saddr)) == NULL)
+       read_lock(&ip6_tnl_lock);
+       if ((t = ip6_tnl_lookup(&ipv6h->daddr, &ipv6h->saddr)) == NULL)
+               goto out;
+
+       if (t->parms.proto != ipproto && t->parms.proto != 0)
                goto out;
 
        err = 0;
 
-       switch (type) {
+       switch (*type) {
                __u32 teli;
                struct ipv6_tlv_tnl_enc_lim *tel;
                __u32 mtu;
@@ -414,7 +421,7 @@ ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
                rel_msg = 1;
                break;
        case ICMPV6_TIME_EXCEED:
-               if (code == ICMPV6_EXC_HOPLIMIT) {
+               if ((*code) == ICMPV6_EXC_HOPLIMIT) {
                        if (net_ratelimit())
                                printk(KERN_WARNING
                                       "%s: Too small hop limit or "
@@ -425,10 +432,10 @@ ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
                break;
        case ICMPV6_PARAMPROB:
                teli = 0;
-               if (code == ICMPV6_HDR_FIELD)
+               if ((*code) == ICMPV6_HDR_FIELD)
                        teli = parse_tlv_tnl_enc_lim(skb, skb->data);
 
-               if (teli && teli == ntohl(info) - 2) {
+               if (teli && teli == ntohl(*info) - 2) {
                        tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli];
                        if (tel->encap_limit == 0) {
                                if (net_ratelimit())
@@ -445,7 +452,7 @@ ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
                }
                break;
        case ICMPV6_PKT_TOOBIG:
-               mtu = ntohl(info) - offset;
+               mtu = ntohl(*info) - offset;
                if (mtu < IPV6_MIN_MTU)
                        mtu = IPV6_MIN_MTU;
                t->dev->mtu = mtu;
@@ -458,20 +465,144 @@ ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
                }
                break;
        }
-       if (rel_msg &&  pskb_may_pull(skb, offset + sizeof (*ipv6h))) {
+
+       *type = rel_type;
+       *code = rel_code;
+       *info = rel_info;
+       *msg = rel_msg;
+
+out:
+       read_unlock(&ip6_tnl_lock);
+       return err;
+}
+
+static int
+ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+          int type, int code, int offset, __u32 info)
+{
+       int rel_msg = 0;
+       int rel_type = type;
+       int rel_code = code;
+       __u32 rel_info = info;
+       int err;
+       struct sk_buff *skb2;
+       struct iphdr *eiph;
+       struct flowi fl;
+       struct rtable *rt;
+
+       err = ip6_tnl_err(skb, IPPROTO_IPIP, opt, &rel_type, &rel_code,
+                         &rel_msg, &rel_info, offset);
+       if (err < 0)
+               return err;
+
+       if (rel_msg == 0)
+               return 0;
+
+       switch (rel_type) {
+       case ICMPV6_DEST_UNREACH:
+               if (rel_code != ICMPV6_ADDR_UNREACH)
+                       return 0;
+               rel_type = ICMP_DEST_UNREACH;
+               rel_code = ICMP_HOST_UNREACH;
+               break;
+       case ICMPV6_PKT_TOOBIG:
+               if (rel_code != 0)
+                       return 0;
+               rel_type = ICMP_DEST_UNREACH;
+               rel_code = ICMP_FRAG_NEEDED;
+               break;
+       default:
+               return 0;
+       }
+
+       if (!pskb_may_pull(skb, offset + sizeof(struct iphdr)))
+               return 0;
+
+       skb2 = skb_clone(skb, GFP_ATOMIC);
+       if (!skb2)
+               return 0;
+
+       dst_release(skb2->dst);
+       skb2->dst = NULL;
+       skb_pull(skb2, offset);
+       skb_reset_network_header(skb2);
+       eiph = ip_hdr(skb2);
+
+       /* Try to guess incoming interface */
+       memset(&fl, 0, sizeof(fl));
+       fl.fl4_dst = eiph->saddr;
+       fl.fl4_tos = RT_TOS(eiph->tos);
+       fl.proto = IPPROTO_IPIP;
+       if (ip_route_output_key(&rt, &fl))
+               goto out;
+
+       skb2->dev = rt->u.dst.dev;
+
+       /* route "incoming" packet */
+       if (rt->rt_flags & RTCF_LOCAL) {
+               ip_rt_put(rt);
+               rt = NULL;
+               fl.fl4_dst = eiph->daddr;
+               fl.fl4_src = eiph->saddr;
+               fl.fl4_tos = eiph->tos;
+               if (ip_route_output_key(&rt, &fl) ||
+                   rt->u.dst.dev->type != ARPHRD_TUNNEL) {
+                       ip_rt_put(rt);
+                       goto out;
+               }
+       } else {
+               ip_rt_put(rt);
+               if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos,
+                                  skb2->dev) ||
+                   skb2->dst->dev->type != ARPHRD_TUNNEL)
+                       goto out;
+       }
+
+       /* change mtu on this route */
+       if (rel_type == ICMP_DEST_UNREACH && rel_code == ICMP_FRAG_NEEDED) {
+               if (rel_info > dst_mtu(skb2->dst))
+                       goto out;
+
+               skb2->dst->ops->update_pmtu(skb2->dst, rel_info);
+               rel_info = htonl(rel_info);
+       }
+
+       icmp_send(skb2, rel_type, rel_code, rel_info);
+
+out:
+       kfree_skb(skb2);
+       return 0;
+}
+
+static int
+ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+          int type, int code, int offset, __u32 info)
+{
+       int rel_msg = 0;
+       int rel_type = type;
+       int rel_code = code;
+       __u32 rel_info = info;
+       int err;
+
+       err = ip6_tnl_err(skb, IPPROTO_IPV6, opt, &rel_type, &rel_code,
+                         &rel_msg, &rel_info, offset);
+       if (err < 0)
+               return err;
+
+       if (rel_msg && pskb_may_pull(skb, offset + sizeof(struct ipv6hdr))) {
                struct rt6_info *rt;
                struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
 
                if (!skb2)
-                       goto out;
+                       return 0;
 
                dst_release(skb2->dst);
                skb2->dst = NULL;
                skb_pull(skb2, offset);
-               skb2->nh.raw = skb2->data;
+               skb_reset_network_header(skb2);
 
                /* Try to guess incoming interface */
-               rt = rt6_lookup(&skb2->nh.ipv6h->saddr, NULL, 0, 0);
+               rt = rt6_lookup(&ipv6_hdr(skb2)->saddr, NULL, 0, 0);
 
                if (rt && rt->rt6i_dev)
                        skb2->dev = rt->rt6i_dev;
@@ -483,19 +614,34 @@ ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 
                kfree_skb(skb2);
        }
-out:
-       read_unlock(&ip6ip6_lock);
-       return err;
+
+       return 0;
 }
 
-static inline void ip6ip6_ecn_decapsulate(struct ipv6hdr *outer_iph,
-                                         struct sk_buff *skb)
+static void ip4ip6_dscp_ecn_decapsulate(struct ip6_tnl *t,
+                                       struct ipv6hdr *ipv6h,
+                                       struct sk_buff *skb)
 {
-       struct ipv6hdr *inner_iph = skb->nh.ipv6h;
+       __u8 dsfield = ipv6_get_dsfield(ipv6h) & ~INET_ECN_MASK;
 
-       if (INET_ECN_is_ce(ipv6_get_dsfield(outer_iph)))
-               IP6_ECN_set_ce(inner_iph);
+       if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY)
+               ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, dsfield);
+
+       if (INET_ECN_is_ce(dsfield))
+               IP_ECN_set_ce(ip_hdr(skb));
+}
+
+static void ip6ip6_dscp_ecn_decapsulate(struct ip6_tnl *t,
+                                       struct ipv6hdr *ipv6h,
+                                       struct sk_buff *skb)
+{
+       if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY)
+               ipv6_copy_dscp(ipv6h, ipv6_hdr(skb));
+
+       if (INET_ECN_is_ce(ipv6_get_dsfield(ipv6h)))
+               IP6_ECN_set_ce(ipv6_hdr(skb));
 }
+
 static inline int ip6_tnl_rcv_ctl(struct ip6_tnl *t)
 {
        struct ip6_tnl_parm *p = &t->parms;
@@ -519,53 +665,61 @@ static inline int ip6_tnl_rcv_ctl(struct ip6_tnl *t)
 }
 
 /**
- * ip6ip6_rcv - decapsulate IPv6 packet and retransmit it locally
+ * ip6_tnl_rcv - decapsulate IPv6 packet and retransmit it locally
  *   @skb: received socket buffer
+ *   @protocol: ethernet protocol ID
+ *   @dscp_ecn_decapsulate: the function to decapsulate DSCP code and ECN
  *
  * Return: 0
  **/
 
-static int
-ip6ip6_rcv(struct sk_buff *skb)
+static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol,
+                      __u8 ipproto,
+                      void (*dscp_ecn_decapsulate)(struct ip6_tnl *t,
+                                                   struct ipv6hdr *ipv6h,
+                                                   struct sk_buff *skb))
 {
-       struct ipv6hdr *ipv6h;
        struct ip6_tnl *t;
+       struct ipv6hdr *ipv6h = ipv6_hdr(skb);
 
-       ipv6h = skb->nh.ipv6h;
+       read_lock(&ip6_tnl_lock);
 
-       read_lock(&ip6ip6_lock);
+       if ((t = ip6_tnl_lookup(&ipv6h->saddr, &ipv6h->daddr)) != NULL) {
+               if (t->parms.proto != ipproto && t->parms.proto != 0) {
+                       read_unlock(&ip6_tnl_lock);
+                       goto discard;
+               }
 
-       if ((t = ip6ip6_tnl_lookup(&ipv6h->saddr, &ipv6h->daddr)) != NULL) {
                if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
-                       read_unlock(&ip6ip6_lock);
+                       read_unlock(&ip6_tnl_lock);
                        goto discard;
                }
 
                if (!ip6_tnl_rcv_ctl(t)) {
                        t->stat.rx_dropped++;
-                       read_unlock(&ip6ip6_lock);
+                       read_unlock(&ip6_tnl_lock);
                        goto discard;
                }
                secpath_reset(skb);
-               skb->mac.raw = skb->nh.raw;
-               skb->nh.raw = skb->data;
-               skb->protocol = htons(ETH_P_IPV6);
+               skb->mac_header = skb->network_header;
+               skb_reset_network_header(skb);
+               skb->protocol = htons(protocol);
                skb->pkt_type = PACKET_HOST;
                memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
                skb->dev = t->dev;
                dst_release(skb->dst);
                skb->dst = NULL;
                nf_reset(skb);
-               if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY)
-                       ipv6_copy_dscp(ipv6h, skb->nh.ipv6h);
-               ip6ip6_ecn_decapsulate(ipv6h, skb);
+
+               dscp_ecn_decapsulate(t, ipv6h, skb);
+
                t->stat.rx_packets++;
                t->stat.rx_bytes += skb->len;
                netif_rx(skb);
-               read_unlock(&ip6ip6_lock);
+               read_unlock(&ip6_tnl_lock);
                return 0;
        }
-       read_unlock(&ip6ip6_lock);
+       read_unlock(&ip6_tnl_lock);
        return 1;
 
 discard:
@@ -573,6 +727,18 @@ discard:
        return 0;
 }
 
+static int ip4ip6_rcv(struct sk_buff *skb)
+{
+       return ip6_tnl_rcv(skb, ETH_P_IP, IPPROTO_IPIP,
+                          ip4ip6_dscp_ecn_decapsulate);
+}
+
+static int ip6ip6_rcv(struct sk_buff *skb)
+{
+       return ip6_tnl_rcv(skb, ETH_P_IPV6, IPPROTO_IPV6,
+                          ip6ip6_dscp_ecn_decapsulate);
+}
+
 struct ipv6_tel_txoption {
        struct ipv6_txoptions ops;
        __u8 dst_opt[8];
@@ -593,7 +759,7 @@ static void init_tel_txopt(struct ipv6_tel_txoption *opt, __u8 encap_limit)
 }
 
 /**
- * ip6ip6_tnl_addr_conflict - compare packet addresses to tunnel's own
+ * ip6_tnl_addr_conflict - compare packet addresses to tunnel's own
  *   @t: the outgoing tunnel device
  *   @hdr: IPv6 header from the incoming packet
  *
@@ -607,7 +773,7 @@ static void init_tel_txopt(struct ipv6_tel_txoption *opt, __u8 encap_limit)
  **/
 
 static inline int
-ip6ip6_tnl_addr_conflict(struct ip6_tnl *t, struct ipv6hdr *hdr)
+ip6_tnl_addr_conflict(struct ip6_tnl *t, struct ipv6hdr *hdr)
 {
        return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr);
 }
@@ -641,72 +807,49 @@ static inline int ip6_tnl_xmit_ctl(struct ip6_tnl *t)
        return ret;
 }
 /**
- * ip6ip6_tnl_xmit - encapsulate packet and send
+ * ip6_tnl_xmit2 - encapsulate packet and send
  *   @skb: the outgoing socket buffer
  *   @dev: the outgoing tunnel device
+ *   @dsfield: dscp code for outer header
+ *   @fl: flow of tunneled packet
+ *   @encap_limit: encapsulation limit
+ *   @pmtu: Path MTU is stored if packet is too big
  *
  * Description:
  *   Build new header and do some sanity checks on the packet before sending
  *   it.
  *
  * Return:
- *   0
+ *   0 on success
+ *   -1 fail
+ *   %-EMSGSIZE message too big. return mtu in this case.
  **/
 
-static int
-ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
+static int ip6_tnl_xmit2(struct sk_buff *skb,
+                        struct net_device *dev,
+                        __u8 dsfield,
+                        struct flowi *fl,
+                        int encap_limit,
+                        __u32 *pmtu)
 {
        struct ip6_tnl *t = netdev_priv(dev);
        struct net_device_stats *stats = &t->stat;
-       struct ipv6hdr *ipv6h = skb->nh.ipv6h;
-       int encap_limit = -1;
+       struct ipv6hdr *ipv6h = ipv6_hdr(skb);
        struct ipv6_tel_txoption opt;
-       __u16 offset;
-       struct flowi fl;
        struct dst_entry *dst;
        struct net_device *tdev;
        int mtu;
        int max_headroom = sizeof(struct ipv6hdr);
        u8 proto;
-       int err;
+       int err = -1;
        int pkt_len;
-       int dsfield;
-
-       if (t->recursion++) {
-               stats->collisions++;
-               goto tx_err;
-       }
-       if (skb->protocol != htons(ETH_P_IPV6) ||
-           !ip6_tnl_xmit_ctl(t) || ip6ip6_tnl_addr_conflict(t, ipv6h))
-               goto tx_err;
-
-       if ((offset = parse_tlv_tnl_enc_lim(skb, skb->nh.raw)) > 0) {
-               struct ipv6_tlv_tnl_enc_lim *tel;
-               tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->nh.raw[offset];
-               if (tel->encap_limit == 0) {
-                       icmpv6_send(skb, ICMPV6_PARAMPROB,
-                                   ICMPV6_HDR_FIELD, offset + 2, skb->dev);
-                       goto tx_err;
-               }
-               encap_limit = tel->encap_limit - 1;
-       } else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
-               encap_limit = t->parms.encap_limit;
-
-       memcpy(&fl, &t->fl, sizeof (fl));
-       proto = fl.proto;
-
-       dsfield = ipv6_get_dsfield(ipv6h);
-       if ((t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS))
-               fl.fl6_flowlabel |= (*(__be32 *) ipv6h & IPV6_TCLASS_MASK);
-       if ((t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL))
-               fl.fl6_flowlabel |= (*(__be32 *) ipv6h & IPV6_FLOWLABEL_MASK);
 
        if ((dst = ip6_tnl_dst_check(t)) != NULL)
                dst_hold(dst);
        else {
-               dst = ip6_route_output(NULL, &fl);
+               dst = ip6_route_output(NULL, fl);
 
-               if (dst->error || xfrm_lookup(&dst, &fl, NULL, 0) < 0)
+               if (dst->error || xfrm_lookup(&dst, fl, NULL, 0) < 0)
                        goto tx_err_link_failure;
        }
 
@@ -730,7 +873,8 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
        if (skb->dst)
                skb->dst->ops->update_pmtu(skb->dst, mtu);
        if (skb->len > mtu) {
-               icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev);
+               *pmtu = mtu;
+               err = -EMSGSIZE;
                goto tx_err_dst_release;
        }
 
@@ -754,22 +898,24 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
        dst_release(skb->dst);
        skb->dst = dst_clone(dst);
 
-       skb->h.raw = skb->nh.raw;
+       skb->transport_header = skb->network_header;
 
+       proto = fl->proto;
        if (encap_limit >= 0) {
                init_tel_txopt(&opt, encap_limit);
                ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL);
        }
-       skb->nh.raw = skb_push(skb, sizeof(struct ipv6hdr));
-       ipv6h = skb->nh.ipv6h;
-       *(__be32*)ipv6h = fl.fl6_flowlabel | htonl(0x60000000);
+       skb_push(skb, sizeof(struct ipv6hdr));
+       skb_reset_network_header(skb);
+       ipv6h = ipv6_hdr(skb);
+       *(__be32*)ipv6h = fl->fl6_flowlabel | htonl(0x60000000);
        dsfield = INET_ECN_encapsulate(0, dsfield);
        ipv6_change_dsfield(ipv6h, ~INET_ECN_MASK, dsfield);
        ipv6h->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
        ipv6h->hop_limit = t->parms.hop_limit;
        ipv6h->nexthdr = proto;
-       ipv6_addr_copy(&ipv6h->saddr, &fl.fl6_src);
-       ipv6_addr_copy(&ipv6h->daddr, &fl.fl6_dst);
+       ipv6_addr_copy(&ipv6h->saddr, &fl->fl6_src);
+       ipv6_addr_copy(&ipv6h->daddr, &fl->fl6_dst);
        nf_reset(skb);
        pkt_len = skb->len;
        err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL,
@@ -783,13 +929,131 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
                stats->tx_aborted_errors++;
        }
        ip6_tnl_dst_store(t, dst);
-       t->recursion--;
        return 0;
 tx_err_link_failure:
        stats->tx_carrier_errors++;
        dst_link_failure(skb);
 tx_err_dst_release:
        dst_release(dst);
+       return err;
+}
+
+static inline int
+ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+       struct ip6_tnl *t = netdev_priv(dev);
+       struct iphdr  *iph = ip_hdr(skb);
+       int encap_limit = -1;
+       struct flowi fl;
+       __u8 dsfield;
+       __u32 mtu;
+       int err;
+
+       if ((t->parms.proto != IPPROTO_IPIP && t->parms.proto != 0) ||
+           !ip6_tnl_xmit_ctl(t))
+               return -1;
+
+       if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
+               encap_limit = t->parms.encap_limit;
+
+       memcpy(&fl, &t->fl, sizeof (fl));
+       fl.proto = IPPROTO_IPIP;
+
+       dsfield = ipv4_get_dsfield(iph);
+
+       if ((t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS))
+               fl.fl6_flowlabel |= ntohl(((__u32)iph->tos << IPV6_TCLASS_SHIFT)
+                                         & IPV6_TCLASS_MASK);
+
+       err = ip6_tnl_xmit2(skb, dev, dsfield, &fl, encap_limit, &mtu);
+       if (err != 0) {
+               /* XXX: send ICMP error even if DF is not set. */
+               if (err == -EMSGSIZE)
+                       icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
+                                 htonl(mtu));
+               return -1;
+       }
+
+       return 0;
+}
+
+static inline int
+ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+       struct ip6_tnl *t = netdev_priv(dev);
+       struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+       int encap_limit = -1;
+       __u16 offset;
+       struct flowi fl;
+       __u8 dsfield;
+       __u32 mtu;
+       int err;
+
+       if ((t->parms.proto != IPPROTO_IPV6 && t->parms.proto != 0) ||
+           !ip6_tnl_xmit_ctl(t) || ip6_tnl_addr_conflict(t, ipv6h))
+               return -1;
+
+       offset = parse_tlv_tnl_enc_lim(skb, skb_network_header(skb));
+       if (offset > 0) {
+               struct ipv6_tlv_tnl_enc_lim *tel;
+               tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset];
+               if (tel->encap_limit == 0) {
+                       icmpv6_send(skb, ICMPV6_PARAMPROB,
+                                   ICMPV6_HDR_FIELD, offset + 2, skb->dev);
+                       return -1;
+               }
+               encap_limit = tel->encap_limit - 1;
+       } else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
+               encap_limit = t->parms.encap_limit;
+
+       memcpy(&fl, &t->fl, sizeof (fl));
+       fl.proto = IPPROTO_IPV6;
+
+       dsfield = ipv6_get_dsfield(ipv6h);
+       if ((t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS))
+               fl.fl6_flowlabel |= (*(__be32 *) ipv6h & IPV6_TCLASS_MASK);
+       if ((t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL))
+               fl.fl6_flowlabel |= (*(__be32 *) ipv6h & IPV6_FLOWLABEL_MASK);
+
+       err = ip6_tnl_xmit2(skb, dev, dsfield, &fl, encap_limit, &mtu);
+       if (err != 0) {
+               if (err == -EMSGSIZE)
+                       icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev);
+               return -1;
+       }
+
+       return 0;
+}
+
+static int
+ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+       struct ip6_tnl *t = netdev_priv(dev);
+       struct net_device_stats *stats = &t->stat;
+       int ret;
+
+       if (t->recursion++) {
+               t->stat.collisions++;
+               goto tx_err;
+       }
+
+       switch (skb->protocol) {
+       case __constant_htons(ETH_P_IP):
+               ret = ip4ip6_tnl_xmit(skb, dev);
+               break;
+       case __constant_htons(ETH_P_IPV6):
+               ret = ip6ip6_tnl_xmit(skb, dev);
+               break;
+       default:
+               goto tx_err;
+       }
+
+       if (ret < 0)
+               goto tx_err;
+
+       t->recursion--;
+       return 0;
+
 tx_err:
        stats->tx_errors++;
        stats->tx_dropped++;
@@ -817,7 +1081,7 @@ static void ip6_tnl_set_cap(struct ip6_tnl *t)
        }
 }
 
-static void ip6ip6_tnl_link_config(struct ip6_tnl *t)
+static void ip6_tnl_link_config(struct ip6_tnl *t)
 {
        struct net_device *dev = t->dev;
        struct ip6_tnl_parm *p = &t->parms;
@@ -870,17 +1134,17 @@ static void ip6ip6_tnl_link_config(struct ip6_tnl *t)
 }
 
 /**
- * ip6ip6_tnl_change - update the tunnel parameters
+ * ip6_tnl_change - update the tunnel parameters
  *   @t: tunnel to be changed
  *   @p: tunnel configuration parameters
  *   @active: != 0 if tunnel is ready for use
  *
  * Description:
- *   ip6ip6_tnl_change() updates the tunnel parameters
+ *   ip6_tnl_change() updates the tunnel parameters
  **/
 
 static int
-ip6ip6_tnl_change(struct ip6_tnl *t, struct ip6_tnl_parm *p)
+ip6_tnl_change(struct ip6_tnl *t, struct ip6_tnl_parm *p)
 {
        ipv6_addr_copy(&t->parms.laddr, &p->laddr);
        ipv6_addr_copy(&t->parms.raddr, &p->raddr);
@@ -889,19 +1153,20 @@ ip6ip6_tnl_change(struct ip6_tnl *t, struct ip6_tnl_parm *p)
        t->parms.encap_limit = p->encap_limit;
        t->parms.flowinfo = p->flowinfo;
        t->parms.link = p->link;
+       t->parms.proto = p->proto;
        ip6_tnl_dst_reset(t);
-       ip6ip6_tnl_link_config(t);
+       ip6_tnl_link_config(t);
        return 0;
 }
 
 /**
- * ip6ip6_tnl_ioctl - configure ipv6 tunnels from userspace
+ * ip6_tnl_ioctl - configure ipv6 tunnels from userspace
  *   @dev: virtual device associated with tunnel
  *   @ifr: parameters passed from userspace
  *   @cmd: command to be performed
  *
  * Description:
- *   ip6ip6_tnl_ioctl() is used for managing IPv6 tunnels
+ *   ip6_tnl_ioctl() is used for managing IPv6 tunnels
  *   from userspace.
  *
  *   The possible commands are the following:
@@ -923,7 +1188,7 @@ ip6ip6_tnl_change(struct ip6_tnl *t, struct ip6_tnl_parm *p)
  **/
 
 static int
-ip6ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 {
        int err = 0;
        struct ip6_tnl_parm p;
@@ -931,12 +1196,12 @@ ip6ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 
        switch (cmd) {
        case SIOCGETTUNNEL:
-               if (dev == ip6ip6_fb_tnl_dev) {
+               if (dev == ip6_fb_tnl_dev) {
                        if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof (p))) {
                                err = -EFAULT;
                                break;
                        }
-                       t = ip6ip6_tnl_locate(&p, 0);
+                       t = ip6_tnl_locate(&p, 0);
                }
                if (t == NULL)
                        t = netdev_priv(dev);
@@ -954,10 +1219,11 @@ ip6ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
                if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof (p)))
                        break;
                err = -EINVAL;
-               if (p.proto != IPPROTO_IPV6)
+               if (p.proto != IPPROTO_IPV6 && p.proto != IPPROTO_IPIP &&
+                   p.proto != 0)
                        break;
-               t = ip6ip6_tnl_locate(&p, cmd == SIOCADDTUNNEL);
-               if (dev != ip6ip6_fb_tnl_dev && cmd == SIOCCHGTUNNEL) {
+               t = ip6_tnl_locate(&p, cmd == SIOCADDTUNNEL);
+               if (dev != ip6_fb_tnl_dev && cmd == SIOCCHGTUNNEL) {
                        if (t != NULL) {
                                if (t->dev != dev) {
                                        err = -EEXIST;
@@ -966,9 +1232,9 @@ ip6ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
                        } else
                                t = netdev_priv(dev);
 
-                       ip6ip6_tnl_unlink(t);
-                       err = ip6ip6_tnl_change(t, &p);
-                       ip6ip6_tnl_link(t);
+                       ip6_tnl_unlink(t);
+                       err = ip6_tnl_change(t, &p);
+                       ip6_tnl_link(t);
                        netdev_state_change(dev);
                }
                if (t) {
@@ -984,15 +1250,15 @@ ip6ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
                if (!capable(CAP_NET_ADMIN))
                        break;
 
-               if (dev == ip6ip6_fb_tnl_dev) {
+               if (dev == ip6_fb_tnl_dev) {
                        err = -EFAULT;
                        if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof (p)))
                                break;
                        err = -ENOENT;
-                       if ((t = ip6ip6_tnl_locate(&p, 0)) == NULL)
+                       if ((t = ip6_tnl_locate(&p, 0)) == NULL)
                                break;
                        err = -EPERM;
-                       if (t->dev == ip6ip6_fb_tnl_dev)
+                       if (t->dev == ip6_fb_tnl_dev)
                                break;
                        dev = t->dev;
                }
@@ -1006,20 +1272,20 @@ ip6ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 }
 
 /**
- * ip6ip6_tnl_get_stats - return the stats for tunnel device
+ * ip6_tnl_get_stats - return the stats for tunnel device
  *   @dev: virtual device associated with tunnel
  *
  * Return: stats for device
  **/
 
 static struct net_device_stats *
-ip6ip6_tnl_get_stats(struct net_device *dev)
+ip6_tnl_get_stats(struct net_device *dev)
 {
        return &(((struct ip6_tnl *)netdev_priv(dev))->stat);
 }
 
 /**
- * ip6ip6_tnl_change_mtu - change mtu manually for tunnel device
+ * ip6_tnl_change_mtu - change mtu manually for tunnel device
  *   @dev: virtual device associated with tunnel
  *   @new_mtu: the new mtu
  *
@@ -1029,7 +1295,7 @@ ip6ip6_tnl_get_stats(struct net_device *dev)
  **/
 
 static int
-ip6ip6_tnl_change_mtu(struct net_device *dev, int new_mtu)
+ip6_tnl_change_mtu(struct net_device *dev, int new_mtu)
 {
        if (new_mtu < IPV6_MIN_MTU) {
                return -EINVAL;
@@ -1039,22 +1305,22 @@ ip6ip6_tnl_change_mtu(struct net_device *dev, int new_mtu)
 }
 
 /**
- * ip6ip6_tnl_dev_setup - setup virtual tunnel device
+ * ip6_tnl_dev_setup - setup virtual tunnel device
  *   @dev: virtual device associated with tunnel
  *
  * Description:
  *   Initialize function pointers and device parameters
  **/
 
-static void ip6ip6_tnl_dev_setup(struct net_device *dev)
+static void ip6_tnl_dev_setup(struct net_device *dev)
 {
        SET_MODULE_OWNER(dev);
-       dev->uninit = ip6ip6_tnl_dev_uninit;
+       dev->uninit = ip6_tnl_dev_uninit;
        dev->destructor = free_netdev;
-       dev->hard_start_xmit = ip6ip6_tnl_xmit;
-       dev->get_stats = ip6ip6_tnl_get_stats;
-       dev->do_ioctl = ip6ip6_tnl_ioctl;
-       dev->change_mtu = ip6ip6_tnl_change_mtu;
+       dev->hard_start_xmit = ip6_tnl_xmit;
+       dev->get_stats = ip6_tnl_get_stats;
+       dev->do_ioctl = ip6_tnl_ioctl;
+       dev->change_mtu = ip6_tnl_change_mtu;
 
        dev->type = ARPHRD_TUNNEL6;
        dev->hard_header_len = LL_MAX_HEADER + sizeof (struct ipv6hdr);
@@ -1065,50 +1331,56 @@ static void ip6ip6_tnl_dev_setup(struct net_device *dev)
 
 
 /**
- * ip6ip6_tnl_dev_init_gen - general initializer for all tunnel devices
+ * ip6_tnl_dev_init_gen - general initializer for all tunnel devices
  *   @dev: virtual device associated with tunnel
  **/
 
 static inline void
-ip6ip6_tnl_dev_init_gen(struct net_device *dev)
+ip6_tnl_dev_init_gen(struct net_device *dev)
 {
        struct ip6_tnl *t = netdev_priv(dev);
-       t->fl.proto = IPPROTO_IPV6;
        t->dev = dev;
        strcpy(t->parms.name, dev->name);
 }
 
 /**
- * ip6ip6_tnl_dev_init - initializer for all non fallback tunnel devices
+ * ip6_tnl_dev_init - initializer for all non fallback tunnel devices
  *   @dev: virtual device associated with tunnel
  **/
 
 static int
-ip6ip6_tnl_dev_init(struct net_device *dev)
+ip6_tnl_dev_init(struct net_device *dev)
 {
        struct ip6_tnl *t = netdev_priv(dev);
-       ip6ip6_tnl_dev_init_gen(dev);
-       ip6ip6_tnl_link_config(t);
+       ip6_tnl_dev_init_gen(dev);
+       ip6_tnl_link_config(t);
        return 0;
 }
 
 /**
- * ip6ip6_fb_tnl_dev_init - initializer for fallback tunnel device
+ * ip6_fb_tnl_dev_init - initializer for fallback tunnel device
  *   @dev: fallback device
  *
  * Return: 0
  **/
 
 static int
-ip6ip6_fb_tnl_dev_init(struct net_device *dev)
+ip6_fb_tnl_dev_init(struct net_device *dev)
 {
        struct ip6_tnl *t = netdev_priv(dev);
-       ip6ip6_tnl_dev_init_gen(dev);
+       ip6_tnl_dev_init_gen(dev);
+       t->parms.proto = IPPROTO_IPV6;
        dev_hold(dev);
        tnls_wc[0] = t;
        return 0;
 }
 
+static struct xfrm6_tunnel ip4ip6_handler = {
+       .handler        = ip4ip6_rcv,
+       .err_handler    = ip4ip6_err,
+       .priority       =       1,
+};
+
 static struct xfrm6_tunnel ip6ip6_handler = {
        .handler        = ip6ip6_rcv,
        .err_handler    = ip6ip6_err,
@@ -1125,30 +1397,40 @@ static int __init ip6_tunnel_init(void)
 {
        int  err;
 
+       if (xfrm6_tunnel_register(&ip4ip6_handler, AF_INET)) {
+               printk(KERN_ERR "ip6_tunnel init: can't register ip4ip6\n");
+               err = -EAGAIN;
+               goto out;
+       }
+
        if (xfrm6_tunnel_register(&ip6ip6_handler, AF_INET6)) {
-               printk(KERN_ERR "ip6ip6 init: can't register tunnel\n");
-               return -EAGAIN;
+               printk(KERN_ERR "ip6_tunnel init: can't register ip6ip6\n");
+               err = -EAGAIN;
+               goto unreg_ip4ip6;
        }
-       ip6ip6_fb_tnl_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6tnl0",
-                                        ip6ip6_tnl_dev_setup);
+       ip6_fb_tnl_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6tnl0",
+                                     ip6_tnl_dev_setup);
 
-       if (!ip6ip6_fb_tnl_dev) {
+       if (!ip6_fb_tnl_dev) {
                err = -ENOMEM;
                goto fail;
        }
-       ip6ip6_fb_tnl_dev->init = ip6ip6_fb_tnl_dev_init;
+       ip6_fb_tnl_dev->init = ip6_fb_tnl_dev_init;
 
-       if ((err = register_netdev(ip6ip6_fb_tnl_dev))) {
-               free_netdev(ip6ip6_fb_tnl_dev);
+       if ((err = register_netdev(ip6_fb_tnl_dev))) {
+               free_netdev(ip6_fb_tnl_dev);
                goto fail;
        }
        return 0;
 fail:
        xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6);
+unreg_ip4ip6:
+       xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET);
+out:
        return err;
 }
 
-static void __exit ip6ip6_destroy_tunnels(void)
+static void __exit ip6_tnl_destroy_tunnels(void)
 {
        int h;
        struct ip6_tnl *t;
@@ -1168,11 +1450,14 @@ static void __exit ip6ip6_destroy_tunnels(void)
 
 static void __exit ip6_tunnel_cleanup(void)
 {
+       if (xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET))
+               printk(KERN_INFO "ip6_tunnel close: can't deregister ip4ip6\n");
+
        if (xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6))
-               printk(KERN_INFO "ip6ip6 close: can't deregister tunnel\n");
+               printk(KERN_INFO "ip6_tunnel close: can't deregister ip6ip6\n");
 
        rtnl_lock();
-       ip6ip6_destroy_tunnels();
+       ip6_tnl_destroy_tunnels();
        rtnl_unlock();
 }
 
index 5724ba9..1ee50b5 100644 (file)
@@ -79,9 +79,9 @@ static int ipcomp6_input(struct xfrm_state *x, struct sk_buff *skb)
        skb->ip_summed = CHECKSUM_NONE;
 
        /* Remove ipcomp header and decompress original payload */
-       iph = skb->nh.ipv6h;
+       iph = ipv6_hdr(skb);
        ipch = (void *)skb->data;
-       skb->h.raw = skb->nh.raw + sizeof(*ipch);
+       skb->transport_header = skb->network_header + sizeof(*ipch);
        __skb_pull(skb, sizeof(*ipch));
 
        /* decompression */
@@ -111,7 +111,7 @@ static int ipcomp6_input(struct xfrm_state *x, struct sk_buff *skb)
 
        skb->truesize += dlen - plen;
        __skb_put(skb, dlen - plen);
-       memcpy(skb->data, scratch, dlen);
+       skb_copy_to_linear_data(skb, scratch, dlen);
        err = ipch->nexthdr;
 
 out_put_cpu:
@@ -124,15 +124,13 @@ static int ipcomp6_output(struct xfrm_state *x, struct sk_buff *skb)
 {
        int err;
        struct ipv6hdr *top_iph;
-       int hdr_len;
        struct ipv6_comp_hdr *ipch;
        struct ipcomp_data *ipcd = x->data;
        int plen, dlen;
        u8 *start, *scratch;
        struct crypto_comp *tfm;
        int cpu;
-
-       hdr_len = skb->h.raw - skb->data;
+       int hdr_len = skb_transport_offset(skb);
 
        /* check whether datagram len is larger than threshold */
        if ((skb->len - hdr_len) < ipcd->threshold) {
@@ -145,7 +143,7 @@ static int ipcomp6_output(struct xfrm_state *x, struct sk_buff *skb)
        /* compression */
        plen = skb->len - hdr_len;
        dlen = IPCOMP_SCRATCH_SIZE;
-       start = skb->h.raw;
+       start = skb_transport_header(skb);
 
        cpu = get_cpu();
        scratch = *per_cpu_ptr(ipcomp6_scratches, cpu);
@@ -166,10 +164,10 @@ static int ipcomp6_output(struct xfrm_state *x, struct sk_buff *skb)
        top_iph->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
 
        ipch = (struct ipv6_comp_hdr *)start;
-       ipch->nexthdr = *skb->nh.raw;
+       ipch->nexthdr = *skb_network_header(skb);
        ipch->flags = 0;
        ipch->cpi = htons((u16 )ntohl(x->id.spi));
-       *skb->nh.raw = IPPROTO_COMP;
+       *skb_network_header(skb) = IPPROTO_COMP;
 
 out_ok:
        return 0;
index f5f9582..aa3d07c 100644 (file)
@@ -101,14 +101,14 @@ static int ipv6_gso_send_check(struct sk_buff *skb)
        if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
                goto out;
 
-       ipv6h = skb->nh.ipv6h;
+       ipv6h = ipv6_hdr(skb);
        __skb_pull(skb, sizeof(*ipv6h));
        err = -EPROTONOSUPPORT;
 
        rcu_read_lock();
        ops = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr);
        if (likely(ops && ops->gso_send_check)) {
-               skb->h.raw = skb->data;
+               skb_reset_transport_header(skb);
                err = ops->gso_send_check(skb);
        }
        rcu_read_unlock();
@@ -137,14 +137,14 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, int features)
        if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
                goto out;
 
-       ipv6h = skb->nh.ipv6h;
+       ipv6h = ipv6_hdr(skb);
        __skb_pull(skb, sizeof(*ipv6h));
        segs = ERR_PTR(-EPROTONOSUPPORT);
 
        rcu_read_lock();
        ops = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr);
        if (likely(ops && ops->gso_segment)) {
-               skb->h.raw = skb->data;
+               skb_reset_transport_header(skb);
                segs = ops->gso_segment(skb, features);
        }
        rcu_read_unlock();
@@ -153,7 +153,7 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, int features)
                goto out;
 
        for (skb = segs; skb; skb = skb->next) {
-               ipv6h = skb->nh.ipv6h;
+               ipv6h = ipv6_hdr(skb);
                ipv6h->payload_len = htons(skb->len - skb->mac_len -
                                           sizeof(*ipv6h));
        }
@@ -694,7 +694,7 @@ done:
                retv = ip6_ra_control(sk, val, NULL);
                break;
        case IPV6_MTU_DISCOVER:
-               if (val<0 || val>2)
+               if (val<0 || val>3)
                        goto e_inval;
                np->pmtudisc = val;
                retv = 0;
@@ -761,6 +761,7 @@ int ipv6_setsockopt(struct sock *sk, int level, int optname,
        return err;
 }
 
+EXPORT_SYMBOL(ipv6_setsockopt);
 
 #ifdef CONFIG_COMPAT
 int compat_ipv6_setsockopt(struct sock *sk, int level, int optname,
@@ -796,18 +797,37 @@ EXPORT_SYMBOL(compat_ipv6_setsockopt);
 #endif
 
 static int ipv6_getsockopt_sticky(struct sock *sk, struct ipv6_txoptions *opt,
-                                 char __user *optval, int len)
+                                 int optname, char __user *optval, int len)
 {
        struct ipv6_opt_hdr *hdr;
 
-       if (!opt || !opt->hopopt)
+       if (!opt)
+               return 0;
+
+       switch(optname) {
+       case IPV6_HOPOPTS:
+               hdr = opt->hopopt;
+               break;
+       case IPV6_RTHDRDSTOPTS:
+               hdr = opt->dst0opt;
+               break;
+       case IPV6_RTHDR:
+               hdr = (struct ipv6_opt_hdr *)opt->srcrt;
+               break;
+       case IPV6_DSTOPTS:
+               hdr = opt->dst1opt;
+               break;
+       default:
+               return -EINVAL; /* should not happen */
+       }
+
+       if (!hdr)
                return 0;
-       hdr = opt->hopopt;
 
        len = min_t(unsigned int, len, ipv6_optlen(hdr));
-       if (copy_to_user(optval, hdr, ipv6_optlen(hdr)))
+       if (copy_to_user(optval, hdr, len));
                return -EFAULT;
-       return len;
+       return ipv6_optlen(hdr);
 }
 
 static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
@@ -945,7 +965,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
 
                lock_sock(sk);
                len = ipv6_getsockopt_sticky(sk, np->opt,
-                                            optval, len);
+                                            optname, optval, len);
                release_sock(sk);
                return put_user(len, optlen);
        }
@@ -1066,6 +1086,8 @@ int ipv6_getsockopt(struct sock *sk, int level, int optname,
        return err;
 }
 
+EXPORT_SYMBOL(ipv6_getsockopt);
+
 #ifdef CONFIG_COMPAT
 int compat_ipv6_getsockopt(struct sock *sk, int level, int optname,
                           char __user *optval, int __user *optlen)
diff --git a/net/ipv6/ipv6_syms.c b/net/ipv6/ipv6_syms.c
deleted file mode 100644 (file)
index e12e3d4..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
-
-#include <linux/module.h>
-#include <net/protocol.h>
-#include <net/ipv6.h>
-#include <net/addrconf.h>
-#include <net/ip6_route.h>
-#include <net/xfrm.h>
-
-EXPORT_SYMBOL(icmpv6_send);
-EXPORT_SYMBOL(icmpv6_statistics);
-EXPORT_SYMBOL(icmpv6_err_convert);
-EXPORT_SYMBOL(ndisc_mc_map);
-EXPORT_SYMBOL(register_inet6addr_notifier);
-EXPORT_SYMBOL(unregister_inet6addr_notifier);
-EXPORT_SYMBOL(ip6_route_output);
-EXPORT_SYMBOL(ipv6_setsockopt);
-EXPORT_SYMBOL(ipv6_getsockopt);
-EXPORT_SYMBOL(inet6_register_protosw);
-EXPORT_SYMBOL(inet6_unregister_protosw);
-EXPORT_SYMBOL(inet6_add_protocol);
-EXPORT_SYMBOL(inet6_del_protocol);
-EXPORT_SYMBOL(ip6_xmit);
-EXPORT_SYMBOL(inet6_release);
-EXPORT_SYMBOL(inet6_bind);
-EXPORT_SYMBOL(inet6_getname);
-EXPORT_SYMBOL(inet6_ioctl);
-EXPORT_SYMBOL(ipv6_get_saddr);
-EXPORT_SYMBOL(ipv6_chk_addr);
-EXPORT_SYMBOL(in6_dev_finish_destroy);
-#ifdef CONFIG_XFRM
-EXPORT_SYMBOL(xfrm6_rcv);
-EXPORT_SYMBOL(xfrm6_input_addr);
-EXPORT_SYMBOL(xfrm6_find_1stfragopt);
-#endif
-EXPORT_SYMBOL(rt6_lookup);
-EXPORT_SYMBOL(ipv6_push_nfrag_opts);
index a8d6625..6c27589 100644 (file)
@@ -988,7 +988,7 @@ int ipv6_is_mld(struct sk_buff *skb, int nexthdr)
        if (!pskb_may_pull(skb, sizeof(struct icmp6hdr)))
                return 0;
 
-       pic = (struct icmp6hdr *)skb->h.raw;
+       pic = icmp6_hdr(skb);
 
        switch (pic->icmp6_type) {
        case ICMPV6_MGM_QUERY:
@@ -1167,11 +1167,11 @@ int igmp6_event_query(struct sk_buff *skb)
                return -EINVAL;
 
        /* compute payload length excluding extension headers */
-       len = ntohs(skb->nh.ipv6h->payload_len) + sizeof(struct ipv6hdr);
-       len -= (char *)skb->h.raw - (char *)skb->nh.ipv6h;
+       len = ntohs(ipv6_hdr(skb)->payload_len) + sizeof(struct ipv6hdr);
+       len -= skb_network_header_len(skb);
 
        /* Drop queries with not link local source */
-       if (!(ipv6_addr_type(&skb->nh.ipv6h->saddr)&IPV6_ADDR_LINKLOCAL))
+       if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL))
                return -EINVAL;
 
        idev = in6_dev_get(skb->dev);
@@ -1179,7 +1179,7 @@ int igmp6_event_query(struct sk_buff *skb)
        if (idev == NULL)
                return 0;
 
-       hdr = (struct icmp6hdr *) skb->h.raw;
+       hdr = icmp6_hdr(skb);
        group = (struct in6_addr *) (hdr + 1);
        group_type = ipv6_addr_type(group);
 
@@ -1212,7 +1212,7 @@ int igmp6_event_query(struct sk_buff *skb)
                        in6_dev_put(idev);
                        return -EINVAL;
                }
-               mlh2 = (struct mld2_query *) skb->h.raw;
+               mlh2 = (struct mld2_query *)skb_transport_header(skb);
                max_delay = (MLDV2_MRC(ntohs(mlh2->mrc))*HZ)/1000;
                if (!max_delay)
                        max_delay = 1;
@@ -1235,7 +1235,7 @@ int igmp6_event_query(struct sk_buff *skb)
                                in6_dev_put(idev);
                                return -EINVAL;
                        }
-                       mlh2 = (struct mld2_query *) skb->h.raw;
+                       mlh2 = (struct mld2_query *)skb_transport_header(skb);
                        mark = 1;
                }
        } else {
@@ -1300,10 +1300,10 @@ int igmp6_event_report(struct sk_buff *skb)
        if (!pskb_may_pull(skb, sizeof(struct in6_addr)))
                return -EINVAL;
 
-       hdr = (struct icmp6hdr*) skb->h.raw;
+       hdr = icmp6_hdr(skb);
 
        /* Drop reports with not link local source */
-       addr_type = ipv6_addr_type(&skb->nh.ipv6h->saddr);
+       addr_type = ipv6_addr_type(&ipv6_hdr(skb)->saddr);
        if (addr_type != IPV6_ADDR_ANY &&
            !(addr_type&IPV6_ADDR_LINKLOCAL))
                return -EINVAL;
@@ -1411,7 +1411,7 @@ static struct sk_buff *mld_newpack(struct net_device *dev, int size)
 
        skb_reserve(skb, LL_RESERVED_SPACE(dev));
 
-       if (ipv6_get_lladdr(dev, &addr_buf)) {
+       if (ipv6_get_lladdr(dev, &addr_buf, IFA_F_TENTATIVE)) {
                /* <draft-ietf-magma-mld-source-05.txt>:
                 * use unspecified address as the source address
                 * when a valid link-local address is not available.
@@ -1423,8 +1423,9 @@ static struct sk_buff *mld_newpack(struct net_device *dev, int size)
 
        memcpy(skb_put(skb, sizeof(ra)), ra, sizeof(ra));
 
-       pmr =(struct mld2_report *)skb_put(skb, sizeof(*pmr));
-       skb->h.raw = (unsigned char *)pmr;
+       skb_set_transport_header(skb, skb_tail_pointer(skb) - skb->data);
+       skb_put(skb, sizeof(*pmr));
+       pmr = (struct mld2_report *)skb_transport_header(skb);
        pmr->type = ICMPV6_MLD2_REPORT;
        pmr->resv1 = 0;
        pmr->csum = 0;
@@ -1441,7 +1442,7 @@ static inline int mld_dev_queue_xmit2(struct sk_buff *skb)
                unsigned char ha[MAX_ADDR_LEN];
                int err;
 
-               ndisc_mc_map(&skb->nh.ipv6h->daddr, ha, dev, 1);
+               ndisc_mc_map(&ipv6_hdr(skb)->daddr, ha, dev, 1);
                err = dev->hard_header(skb, dev, ETH_P_IPV6, ha, NULL, skb->len);
                if (err < 0) {
                        kfree_skb(skb);
@@ -1459,20 +1460,21 @@ static inline int mld_dev_queue_xmit(struct sk_buff *skb)
 
 static void mld_sendpack(struct sk_buff *skb)
 {
-       struct ipv6hdr *pip6 = skb->nh.ipv6h;
-       struct mld2_report *pmr = (struct mld2_report *)skb->h.raw;
+       struct ipv6hdr *pip6 = ipv6_hdr(skb);
+       struct mld2_report *pmr =
+                             (struct mld2_report *)skb_transport_header(skb);
        int payload_len, mldlen;
        struct inet6_dev *idev = in6_dev_get(skb->dev);
        int err;
 
        IP6_INC_STATS(idev, IPSTATS_MIB_OUTREQUESTS);
-       payload_len = skb->tail - (unsigned char *)skb->nh.ipv6h -
-               sizeof(struct ipv6hdr);
-       mldlen = skb->tail - skb->h.raw;
+       payload_len = (skb->tail - skb->network_header) - sizeof(*pip6);
+       mldlen = skb->tail - skb->transport_header;
        pip6->payload_len = htons(payload_len);
 
        pmr->csum = csum_ipv6_magic(&pip6->saddr, &pip6->daddr, mldlen,
-               IPPROTO_ICMPV6, csum_partial(skb->h.raw, mldlen, 0));
+               IPPROTO_ICMPV6, csum_partial(skb_transport_header(skb),
+                                            mldlen, 0));
        err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, skb->dev,
                mld_dev_queue_xmit);
        if (!err) {
@@ -1506,7 +1508,7 @@ static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc,
        pgr->grec_auxwords = 0;
        pgr->grec_nsrcs = 0;
        pgr->grec_mca = pmc->mca_addr;  /* structure copy */
-       pmr = (struct mld2_report *)skb->h.raw;
+       pmr = (struct mld2_report *)skb_transport_header(skb);
        pmr->ngrec = htons(ntohs(pmr->ngrec)+1);
        *ppgr = pgr;
        return skb;
@@ -1539,7 +1541,7 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
        if (!*psf_list)
                goto empty_source;
 
-       pmr = skb ? (struct mld2_report *)skb->h.raw : NULL;
+       pmr = skb ? (struct mld2_report *)skb_transport_header(skb) : NULL;
 
        /* EX and TO_EX get a fresh packet, if needed */
        if (truncate) {
@@ -1791,7 +1793,7 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
 
        skb_reserve(skb, LL_RESERVED_SPACE(dev));
 
-       if (ipv6_get_lladdr(dev, &addr_buf)) {
+       if (ipv6_get_lladdr(dev, &addr_buf, IFA_F_TENTATIVE)) {
                /* <draft-ietf-magma-mld-source-05.txt>:
                 * use unspecified address as the source address
                 * when a valid link-local address is not available.
index 0afcabd..13b7160 100644 (file)
@@ -90,23 +90,26 @@ int mip6_mh_filter(struct sock *sk, struct sk_buff *skb)
 {
        struct ip6_mh *mh;
 
-       if (!pskb_may_pull(skb, (skb->h.raw - skb->data) + 8) ||
-           !pskb_may_pull(skb, (skb->h.raw - skb->data) + ((skb->h.raw[1] + 1) << 3)))
+       if (!pskb_may_pull(skb, (skb_transport_offset(skb)) + 8) ||
+           !pskb_may_pull(skb, (skb_transport_offset(skb) +
+                                ((skb_transport_header(skb)[1] + 1) << 3))))
                return -1;
 
-       mh = (struct ip6_mh *)skb->h.raw;
+       mh = (struct ip6_mh *)skb_transport_header(skb);
 
        if (mh->ip6mh_hdrlen < mip6_mh_len(mh->ip6mh_type)) {
                LIMIT_NETDEBUG(KERN_DEBUG "mip6: MH message too short: %d vs >=%d\n",
                               mh->ip6mh_hdrlen, mip6_mh_len(mh->ip6mh_type));
-               mip6_param_prob(skb, 0, (&mh->ip6mh_hdrlen) - skb->nh.raw);
+               mip6_param_prob(skb, 0, ((&mh->ip6mh_hdrlen) -
+                                        skb_network_header(skb)));
                return -1;
        }
 
        if (mh->ip6mh_proto != IPPROTO_NONE) {
                LIMIT_NETDEBUG(KERN_DEBUG "mip6: MH invalid payload proto = %d\n",
                               mh->ip6mh_proto);
-               mip6_param_prob(skb, 0, (&mh->ip6mh_proto) - skb->nh.raw);
+               mip6_param_prob(skb, 0, ((&mh->ip6mh_proto) -
+                                        skb_network_header(skb)));
                return -1;
        }
 
@@ -122,12 +125,12 @@ struct mip6_report_rate_limiter {
 };
 
 static struct mip6_report_rate_limiter mip6_report_rl = {
-       .lock = SPIN_LOCK_UNLOCKED
+       .lock = __SPIN_LOCK_UNLOCKED(mip6_report_rl.lock)
 };
 
 static int mip6_destopt_input(struct xfrm_state *x, struct sk_buff *skb)
 {
-       struct ipv6hdr *iph = skb->nh.ipv6h;
+       struct ipv6hdr *iph = ipv6_hdr(skb);
        struct ipv6_destopt_hdr *destopt = (struct ipv6_destopt_hdr *)skb->data;
 
        if (!ipv6_addr_equal(&iph->saddr, (struct in6_addr *)x->coaddr) &&
@@ -152,10 +155,10 @@ static int mip6_destopt_output(struct xfrm_state *x, struct sk_buff *skb)
        iph = (struct ipv6hdr *)skb->data;
        iph->payload_len = htons(skb->len - sizeof(*iph));
 
-       nexthdr = *skb->nh.raw;
-       *skb->nh.raw = IPPROTO_DSTOPTS;
+       nexthdr = *skb_network_header(skb);
+       *skb_network_header(skb) = IPPROTO_DSTOPTS;
 
-       dstopt = (struct ipv6_destopt_hdr *)skb->h.raw;
+       dstopt = (struct ipv6_destopt_hdr *)skb_transport_header(skb);
        dstopt->nexthdr = nexthdr;
 
        hao = mip6_padn((char *)(dstopt + 1),
@@ -215,21 +218,22 @@ static int mip6_destopt_reject(struct xfrm_state *x, struct sk_buff *skb, struct
        if (likely(opt->dsthao)) {
                offset = ipv6_find_tlv(skb, opt->dsthao, IPV6_TLV_HAO);
                if (likely(offset >= 0))
-                       hao = (struct ipv6_destopt_hao *)(skb->nh.raw + offset);
+                       hao = (struct ipv6_destopt_hao *)
+                                       (skb_network_header(skb) + offset);
        }
 
        skb_get_timestamp(skb, &stamp);
 
-       if (!mip6_report_rl_allow(&stamp, &skb->nh.ipv6h->daddr,
-                                 hao ? &hao->addr : &skb->nh.ipv6h->saddr,
+       if (!mip6_report_rl_allow(&stamp, &ipv6_hdr(skb)->daddr,
+                                 hao ? &hao->addr : &ipv6_hdr(skb)->saddr,
                                  opt->iif))
                goto out;
 
        memset(&sel, 0, sizeof(sel));
-       memcpy(&sel.daddr, (xfrm_address_t *)&skb->nh.ipv6h->daddr,
+       memcpy(&sel.daddr, (xfrm_address_t *)&ipv6_hdr(skb)->daddr,
               sizeof(sel.daddr));
        sel.prefixlen_d = 128;
-       memcpy(&sel.saddr, (xfrm_address_t *)&skb->nh.ipv6h->saddr,
+       memcpy(&sel.saddr, (xfrm_address_t *)&ipv6_hdr(skb)->saddr,
               sizeof(sel.saddr));
        sel.prefixlen_s = 128;
        sel.family = AF_INET6;
@@ -253,11 +257,13 @@ static int mip6_destopt_offset(struct xfrm_state *x, struct sk_buff *skb,
                               u8 **nexthdr)
 {
        u16 offset = sizeof(struct ipv6hdr);
-       struct ipv6_opt_hdr *exthdr = (struct ipv6_opt_hdr*)(skb->nh.ipv6h + 1);
-       unsigned int packet_len = skb->tail - skb->nh.raw;
+       struct ipv6_opt_hdr *exthdr =
+                                  (struct ipv6_opt_hdr *)(ipv6_hdr(skb) + 1);
+       const unsigned char *nh = skb_network_header(skb);
+       unsigned int packet_len = skb->tail - skb->network_header;
        int found_rhdr = 0;
 
-       *nexthdr = &skb->nh.ipv6h->nexthdr;
+       *nexthdr = &ipv6_hdr(skb)->nexthdr;
 
        while (offset + 1 <= packet_len) {
 
@@ -288,7 +294,7 @@ static int mip6_destopt_offset(struct xfrm_state *x, struct sk_buff *skb,
 
                offset += ipv6_optlen(exthdr);
                *nexthdr = &exthdr->nexthdr;
-               exthdr = (struct ipv6_opt_hdr*)(skb->nh.raw + offset);
+               exthdr = (struct ipv6_opt_hdr *)(nh + offset);
        }
 
        return offset;
@@ -361,10 +367,10 @@ static int mip6_rthdr_output(struct xfrm_state *x, struct sk_buff *skb)
        iph = (struct ipv6hdr *)skb->data;
        iph->payload_len = htons(skb->len - sizeof(*iph));
 
-       nexthdr = *skb->nh.raw;
-       *skb->nh.raw = IPPROTO_ROUTING;
+       nexthdr = *skb_network_header(skb);
+       *skb_network_header(skb) = IPPROTO_ROUTING;
 
-       rt2 = (struct rt2_hdr *)skb->h.raw;
+       rt2 = (struct rt2_hdr *)skb_transport_header(skb);
        rt2->rt_hdr.nexthdr = nexthdr;
        rt2->rt_hdr.hdrlen = (x->props.header_len >> 3) - 1;
        rt2->rt_hdr.type = IPV6_SRCRT_TYPE_2;
@@ -383,11 +389,13 @@ static int mip6_rthdr_offset(struct xfrm_state *x, struct sk_buff *skb,
                             u8 **nexthdr)
 {
        u16 offset = sizeof(struct ipv6hdr);
-       struct ipv6_opt_hdr *exthdr = (struct ipv6_opt_hdr*)(skb->nh.ipv6h + 1);
-       unsigned int packet_len = skb->tail - skb->nh.raw;
+       struct ipv6_opt_hdr *exthdr =
+                                  (struct ipv6_opt_hdr *)(ipv6_hdr(skb) + 1);
+       const unsigned char *nh = skb_network_header(skb);
+       unsigned int packet_len = skb->tail - skb->network_header;
        int found_rhdr = 0;
 
-       *nexthdr = &skb->nh.ipv6h->nexthdr;
+       *nexthdr = &ipv6_hdr(skb)->nexthdr;
 
        while (offset + 1 <= packet_len) {
 
@@ -397,7 +405,7 @@ static int mip6_rthdr_offset(struct xfrm_state *x, struct sk_buff *skb,
                case NEXTHDR_ROUTING:
                        if (offset + 3 <= packet_len) {
                                struct ipv6_rt_hdr *rt;
-                               rt = (struct ipv6_rt_hdr *)(skb->nh.raw + offset);
+                               rt = (struct ipv6_rt_hdr *)(nh + offset);
                                if (rt->type != 0)
                                        return offset;
                        }
@@ -417,7 +425,7 @@ static int mip6_rthdr_offset(struct xfrm_state *x, struct sk_buff *skb,
 
                offset += ipv6_optlen(exthdr);
                *nexthdr = &exthdr->nexthdr;
-               exthdr = (struct ipv6_opt_hdr*)(skb->nh.raw + offset);
+               exthdr = (struct ipv6_opt_hdr *)(nh + offset);
        }
 
        return offset;
index 121f31c..d8b3645 100644 (file)
@@ -319,6 +319,8 @@ int ndisc_mc_map(struct in6_addr *addr, char *buf, struct net_device *dev, int d
        return -EINVAL;
 }
 
+EXPORT_SYMBOL(ndisc_mc_map);
+
 static u32 ndisc_hash(const void *pkey, const struct net_device *dev)
 {
        const u32 *p32 = pkey;
@@ -425,36 +427,23 @@ static inline void ndisc_flow_init(struct flowi *fl, u8 type,
        security_sk_classify_flow(ndisc_socket->sk, fl);
 }
 
-static void ndisc_send_na(struct net_device *dev, struct neighbour *neigh,
-                  struct in6_addr *daddr, struct in6_addr *solicited_addr,
-                  int router, int solicited, int override, int inc_opt)
+static void __ndisc_send(struct net_device *dev,
+                        struct neighbour *neigh,
+                        struct in6_addr *daddr, struct in6_addr *saddr,
+                        struct icmp6hdr *icmp6h, struct in6_addr *target,
+                        int llinfo, int icmp6_mib_outnd)
 {
-       struct in6_addr tmpaddr;
-       struct inet6_ifaddr *ifp;
-       struct inet6_dev *idev;
        struct flowi fl;
-       struct dst_entrydst;
+       struct dst_entry *dst;
        struct sock *sk = ndisc_socket->sk;
-       struct in6_addr *src_addr;
-       struct nd_msg *msg;
-       int len;
        struct sk_buff *skb;
+       struct icmp6hdr *hdr;
+       struct inet6_dev *idev;
+       int len;
        int err;
+       u8 *opt;
 
-       len = sizeof(struct icmp6hdr) + sizeof(struct in6_addr);
-
-       /* for anycast or proxy, solicited_addr != src_addr */
-       ifp = ipv6_get_ifaddr(solicited_addr, dev, 1);
-       if (ifp) {
-               src_addr = solicited_addr;
-               in6_ifa_put(ifp);
-       } else {
-               if (ipv6_dev_get_saddr(dev, daddr, &tmpaddr))
-                       return;
-               src_addr = &tmpaddr;
-       }
-
-       ndisc_flow_init(&fl, NDISC_NEIGHBOUR_ADVERTISEMENT, src_addr, daddr,
+       ndisc_flow_init(&fl, icmp6h->icmp6_type, saddr, daddr,
                        dev->ifindex);
 
        dst = ndisc_dst_alloc(dev, neigh, daddr, ip6_output);
@@ -465,60 +454,57 @@ static void ndisc_send_na(struct net_device *dev, struct neighbour *neigh,
        if (err < 0)
                return;
 
-       if (inc_opt) {
-               if (dev->addr_len)
-                       len += ndisc_opt_addr_space(dev);
-               else
-                       inc_opt = 0;
-       }
+       if (!dev->addr_len)
+               llinfo = 0;
+
+       len = sizeof(struct icmp6hdr) + (target ? sizeof(*target) : 0);
+       if (llinfo)
+               len += ndisc_opt_addr_space(dev);
 
        skb = sock_alloc_send_skb(sk,
                                  (MAX_HEADER + sizeof(struct ipv6hdr) +
                                   len + LL_RESERVED_SPACE(dev)),
                                  1, &err);
-
-       if (skb == NULL) {
+       if (!skb) {
                ND_PRINTK0(KERN_ERR
-                          "ICMPv6 NA: %s() failed to allocate an skb.\n",
+                          "ICMPv6 ND: %s() failed to allocate an skb.\n",
                           __FUNCTION__);
                dst_release(dst);
                return;
        }
 
        skb_reserve(skb, LL_RESERVED_SPACE(dev));
-       ip6_nd_hdr(sk, skb, dev, src_addr, daddr, IPPROTO_ICMPV6, len);
-
-       msg = (struct nd_msg *)skb_put(skb, len);
-       skb->h.raw = (unsigned char*)msg;
+       ip6_nd_hdr(sk, skb, dev, saddr, daddr, IPPROTO_ICMPV6, len);
 
-       msg->icmph.icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT;
-       msg->icmph.icmp6_code = 0;
-       msg->icmph.icmp6_cksum = 0;
+       skb->transport_header = skb->tail;
+       skb_put(skb, len);
 
-       msg->icmph.icmp6_unused = 0;
-       msg->icmph.icmp6_router    = router;
-       msg->icmph.icmp6_solicited = solicited;
-       msg->icmph.icmp6_override  = override;
+       hdr = (struct icmp6hdr *)skb_transport_header(skb);
+       memcpy(hdr, icmp6h, sizeof(*hdr));
 
-       /* Set the target address. */
-       ipv6_addr_copy(&msg->target, solicited_addr);
+       opt = skb_transport_header(skb) + sizeof(struct icmp6hdr);
+       if (target) {
+               ipv6_addr_copy((struct in6_addr *)opt, target);
+               opt += sizeof(*target);
+       }
 
-       if (inc_opt)
-               ndisc_fill_addr_option(msg->opt, ND_OPT_TARGET_LL_ADDR, dev->dev_addr,
+       if (llinfo)
+               ndisc_fill_addr_option(opt, llinfo, dev->dev_addr,
                                       dev->addr_len, dev->type);
 
-       /* checksum */
-       msg->icmph.icmp6_cksum = csum_ipv6_magic(src_addr, daddr, len,
-                                                IPPROTO_ICMPV6,
-                                                csum_partial((__u8 *) msg,
-                                                             len, 0));
+       hdr->icmp6_cksum = csum_ipv6_magic(saddr, daddr, len,
+                                          IPPROTO_ICMPV6,
+                                          csum_partial((__u8 *) hdr,
+                                                       len, 0));
 
        skb->dst = dst;
+
        idev = in6_dev_get(dst->dev);
        IP6_INC_STATS(idev, IPSTATS_MIB_OUTREQUESTS);
+
        err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev, dst_output);
        if (!err) {
-               ICMP6_INC_STATS(idev, ICMP6_MIB_OUTNEIGHBORADVERTISEMENTS);
+               ICMP6_INC_STATS(idev, icmp6_mib_outnd);
                ICMP6_INC_STATS(idev, ICMP6_MIB_OUTMSGS);
        }
 
@@ -526,165 +512,95 @@ static void ndisc_send_na(struct net_device *dev, struct neighbour *neigh,
                in6_dev_put(idev);
 }
 
+static void ndisc_send_na(struct net_device *dev, struct neighbour *neigh,
+                  struct in6_addr *daddr, struct in6_addr *solicited_addr,
+                  int router, int solicited, int override, int inc_opt)
+{
+       struct in6_addr tmpaddr;
+       struct inet6_ifaddr *ifp;
+       struct in6_addr *src_addr;
+       struct icmp6hdr icmp6h = {
+               .icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT,
+       };
+
+       /* for anycast or proxy, solicited_addr != src_addr */
+       ifp = ipv6_get_ifaddr(solicited_addr, dev, 1);
+       if (ifp) {
+               src_addr = solicited_addr;
+               if (ifp->flags & IFA_F_OPTIMISTIC)
+                       override = 0;
+               in6_ifa_put(ifp);
+       } else {
+               if (ipv6_dev_get_saddr(dev, daddr, &tmpaddr))
+                       return;
+               src_addr = &tmpaddr;
+       }
+
+       icmp6h.icmp6_router = router;
+       icmp6h.icmp6_solicited = solicited;
+       icmp6h.icmp6_override = override;
+
+       __ndisc_send(dev, neigh, daddr, src_addr,
+                    &icmp6h, solicited_addr,
+                    inc_opt ? ND_OPT_TARGET_LL_ADDR : 0,
+                    ICMP6_MIB_OUTNEIGHBORADVERTISEMENTS);
+}
+
 void ndisc_send_ns(struct net_device *dev, struct neighbour *neigh,
                   struct in6_addr *solicit,
                   struct in6_addr *daddr, struct in6_addr *saddr)
 {
-       struct flowi fl;
-       struct dst_entry* dst;
-       struct inet6_dev *idev;
-       struct sock *sk = ndisc_socket->sk;
-       struct sk_buff *skb;
-       struct nd_msg *msg;
        struct in6_addr addr_buf;
-       int len;
-       int err;
-       int send_llinfo;
+       struct icmp6hdr icmp6h = {
+               .icmp6_type = NDISC_NEIGHBOUR_SOLICITATION,
+       };
 
        if (saddr == NULL) {
-               if (ipv6_get_lladdr(dev, &addr_buf))
+               if (ipv6_get_lladdr(dev, &addr_buf,
+                                  (IFA_F_TENTATIVE|IFA_F_OPTIMISTIC)))
                        return;
                saddr = &addr_buf;
        }
 
-       ndisc_flow_init(&fl, NDISC_NEIGHBOUR_SOLICITATION, saddr, daddr,
-                       dev->ifindex);
-
-       dst = ndisc_dst_alloc(dev, neigh, daddr, ip6_output);
-       if (!dst)
-               return;
-
-       err = xfrm_lookup(&dst, &fl, NULL, 0);
-       if (err < 0)
-               return;
-
-       len = sizeof(struct icmp6hdr) + sizeof(struct in6_addr);
-       send_llinfo = dev->addr_len && !ipv6_addr_any(saddr);
-       if (send_llinfo)
-               len += ndisc_opt_addr_space(dev);
-
-       skb = sock_alloc_send_skb(sk,
-                                 (MAX_HEADER + sizeof(struct ipv6hdr) +
-                                  len + LL_RESERVED_SPACE(dev)),
-                                 1, &err);
-       if (skb == NULL) {
-               ND_PRINTK0(KERN_ERR
-                          "ICMPv6 NA: %s() failed to allocate an skb.\n",
-                          __FUNCTION__);
-               dst_release(dst);
-               return;
-       }
-
-       skb_reserve(skb, LL_RESERVED_SPACE(dev));
-       ip6_nd_hdr(sk, skb, dev, saddr, daddr, IPPROTO_ICMPV6, len);
-
-       msg = (struct nd_msg *)skb_put(skb, len);
-       skb->h.raw = (unsigned char*)msg;
-       msg->icmph.icmp6_type = NDISC_NEIGHBOUR_SOLICITATION;
-       msg->icmph.icmp6_code = 0;
-       msg->icmph.icmp6_cksum = 0;
-       msg->icmph.icmp6_unused = 0;
-
-       /* Set the target address. */
-       ipv6_addr_copy(&msg->target, solicit);
-
-       if (send_llinfo)
-               ndisc_fill_addr_option(msg->opt, ND_OPT_SOURCE_LL_ADDR, dev->dev_addr,
-                                      dev->addr_len, dev->type);
-
-       /* checksum */
-       msg->icmph.icmp6_cksum = csum_ipv6_magic(&skb->nh.ipv6h->saddr,
-                                                daddr, len,
-                                                IPPROTO_ICMPV6,
-                                                csum_partial((__u8 *) msg,
-                                                             len, 0));
-       /* send it! */
-       skb->dst = dst;
-       idev = in6_dev_get(dst->dev);
-       IP6_INC_STATS(idev, IPSTATS_MIB_OUTREQUESTS);
-       err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev, dst_output);
-       if (!err) {
-               ICMP6_INC_STATS(idev, ICMP6_MIB_OUTNEIGHBORSOLICITS);
-               ICMP6_INC_STATS(idev, ICMP6_MIB_OUTMSGS);
-       }
-
-       if (likely(idev != NULL))
-               in6_dev_put(idev);
+       __ndisc_send(dev, neigh, daddr, saddr,
+                    &icmp6h, solicit,
+                    !ipv6_addr_any(saddr) ? ND_OPT_SOURCE_LL_ADDR : 0,
+                    ICMP6_MIB_OUTNEIGHBORSOLICITS);
 }
 
 void ndisc_send_rs(struct net_device *dev, struct in6_addr *saddr,
                   struct in6_addr *daddr)
 {
-       struct flowi fl;
-       struct dst_entry* dst;
-       struct inet6_dev *idev;
-       struct sock *sk = ndisc_socket->sk;
-       struct sk_buff *skb;
-       struct icmp6hdr *hdr;
-       __u8 * opt;
-       int len;
-       int err;
-
-       ndisc_flow_init(&fl, NDISC_ROUTER_SOLICITATION, saddr, daddr,
-                       dev->ifindex);
-
-       dst = ndisc_dst_alloc(dev, NULL, daddr, ip6_output);
-       if (!dst)
-               return;
-
-       err = xfrm_lookup(&dst, &fl, NULL, 0);
-       if (err < 0)
-               return;
-
-       len = sizeof(struct icmp6hdr);
-       if (dev->addr_len)
-               len += ndisc_opt_addr_space(dev);
-
-       skb = sock_alloc_send_skb(sk,
-                                 (MAX_HEADER + sizeof(struct ipv6hdr) +
-                                  len + LL_RESERVED_SPACE(dev)),
-                                 1, &err);
-       if (skb == NULL) {
-               ND_PRINTK0(KERN_ERR
-                          "ICMPv6 RS: %s() failed to allocate an skb.\n",
-                          __FUNCTION__);
-               dst_release(dst);
-               return;
-       }
-
-       skb_reserve(skb, LL_RESERVED_SPACE(dev));
-       ip6_nd_hdr(sk, skb, dev, saddr, daddr, IPPROTO_ICMPV6, len);
-
-       hdr = (struct icmp6hdr *)skb_put(skb, len);
-       skb->h.raw = (unsigned char*)hdr;
-       hdr->icmp6_type = NDISC_ROUTER_SOLICITATION;
-       hdr->icmp6_code = 0;
-       hdr->icmp6_cksum = 0;
-       hdr->icmp6_unused = 0;
-
-       opt = (u8*) (hdr + 1);
-
-       if (dev->addr_len)
-               ndisc_fill_addr_option(opt, ND_OPT_SOURCE_LL_ADDR, dev->dev_addr,
-                                      dev->addr_len, dev->type);
-
-       /* checksum */
-       hdr->icmp6_cksum = csum_ipv6_magic(&skb->nh.ipv6h->saddr, daddr, len,
-                                          IPPROTO_ICMPV6,
-                                          csum_partial((__u8 *) hdr, len, 0));
+       struct icmp6hdr icmp6h = {
+               .icmp6_type = NDISC_ROUTER_SOLICITATION,
+       };
+       int send_sllao = dev->addr_len;
 
-       /* send it! */
-       skb->dst = dst;
-       idev = in6_dev_get(dst->dev);
-       IP6_INC_STATS(idev, IPSTATS_MIB_OUTREQUESTS);
-       err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev, dst_output);
-       if (!err) {
-               ICMP6_INC_STATS(idev, ICMP6_MIB_OUTROUTERSOLICITS);
-               ICMP6_INC_STATS(idev, ICMP6_MIB_OUTMSGS);
+#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
+       /*
+        * According to section 2.2 of RFC 4429, we must not
+        * send router solicitations with a sllao from
+        * optimistic addresses, but we may send the solicitation
+        * if we don't include the sllao.  So here we check
+        * if our address is optimistic, and if so, we
+        * supress the inclusion of the sllao.
+        */
+       if (send_sllao) {
+               struct inet6_ifaddr *ifp = ipv6_get_ifaddr(saddr, dev, 1);
+               if (ifp) {
+                       if (ifp->flags & IFA_F_OPTIMISTIC)  {
+                               send_sllao = 0;
+                       }
+                       in6_ifa_put(ifp);
+               } else {
+                       send_sllao = 0;
+               }
        }
-
-       if (likely(idev != NULL))
-               in6_dev_put(idev);
+#endif
+       __ndisc_send(dev, NULL, daddr, saddr,
+                    &icmp6h, NULL,
+                    send_sllao ? ND_OPT_SOURCE_LL_ADDR : 0,
+                    ICMP6_MIB_OUTROUTERSOLICITS);
 }
 
 
@@ -708,8 +624,8 @@ static void ndisc_solicit(struct neighbour *neigh, struct sk_buff *skb)
        struct in6_addr *target = (struct in6_addr *)&neigh->primary_key;
        int probes = atomic_read(&neigh->probes);
 
-       if (skb && ipv6_chk_addr(&skb->nh.ipv6h->saddr, dev, 1))
-               saddr = &skb->nh.ipv6h->saddr;
+       if (skb && ipv6_chk_addr(&ipv6_hdr(skb)->saddr, dev, 1))
+               saddr = &ipv6_hdr(skb)->saddr;
 
        if ((probes -= neigh->parms->ucast_probes) < 0) {
                if (!(neigh->nud_state & NUD_VALID)) {
@@ -732,11 +648,12 @@ static void ndisc_solicit(struct neighbour *neigh, struct sk_buff *skb)
 
 static void ndisc_recv_ns(struct sk_buff *skb)
 {
-       struct nd_msg *msg = (struct nd_msg *)skb->h.raw;
-       struct in6_addr *saddr = &skb->nh.ipv6h->saddr;
-       struct in6_addr *daddr = &skb->nh.ipv6h->daddr;
+       struct nd_msg *msg = (struct nd_msg *)skb_transport_header(skb);
+       struct in6_addr *saddr = &ipv6_hdr(skb)->saddr;
+       struct in6_addr *daddr = &ipv6_hdr(skb)->daddr;
        u8 *lladdr = NULL;
-       u32 ndoptlen = skb->tail - msg->opt;
+       u32 ndoptlen = skb->tail - (skb->transport_header +
+                                   offsetof(struct nd_msg, opt));
        struct ndisc_options ndopts;
        struct net_device *dev = skb->dev;
        struct inet6_ifaddr *ifp;
@@ -796,28 +713,40 @@ static void ndisc_recv_ns(struct sk_buff *skb)
        inc = ipv6_addr_is_multicast(daddr);
 
        if ((ifp = ipv6_get_ifaddr(&msg->target, dev, 1)) != NULL) {
-               if (ifp->flags & IFA_F_TENTATIVE) {
-                       /* Address is tentative. If the source
-                          is unspecified address, it is someone
-                          does DAD, otherwise we ignore solicitations
-                          until DAD timer expires.
-                        */
-                       if (!dad)
+
+               if (ifp->flags & (IFA_F_TENTATIVE|IFA_F_OPTIMISTIC)) {
+                       if (dad) {
+                               if (dev->type == ARPHRD_IEEE802_TR) {
+                                       const unsigned char *sadr;
+                                       sadr = skb_mac_header(skb);
+                                       if (((sadr[8] ^ dev->dev_addr[0]) & 0x7f) == 0 &&
+                                           sadr[9] == dev->dev_addr[1] &&
+                                           sadr[10] == dev->dev_addr[2] &&
+                                           sadr[11] == dev->dev_addr[3] &&
+                                           sadr[12] == dev->dev_addr[4] &&
+                                           sadr[13] == dev->dev_addr[5]) {
+                                               /* looped-back to us */
+                                               goto out;
+                                       }
+                               }
+
+                               /*
+                                * We are colliding with another node
+                                * who is doing DAD
+                                * so fail our DAD process
+                                */
+                               addrconf_dad_failure(ifp);
                                goto out;
-                       if (dev->type == ARPHRD_IEEE802_TR) {
-                               unsigned char *sadr = skb->mac.raw;
-                               if (((sadr[8] ^ dev->dev_addr[0]) & 0x7f) == 0 &&
-                                   sadr[9] == dev->dev_addr[1] &&
-                                   sadr[10] == dev->dev_addr[2] &&
-                                   sadr[11] == dev->dev_addr[3] &&
-                                   sadr[12] == dev->dev_addr[4] &&
-                                   sadr[13] == dev->dev_addr[5]) {
-                                       /* looped-back to us */
+                       } else {
+                               /*
+                                * This is not a dad solicitation.
+                                * If we are an optimistic node,
+                                * we should respond.
+                                * Otherwise, we should ignore it.
+                                */
+                               if (!(ifp->flags & IFA_F_OPTIMISTIC))
                                        goto out;
-                               }
                        }
-                       addrconf_dad_failure(ifp);
-                       return;
                }
 
                idev = ifp->idev;
@@ -898,11 +827,12 @@ out:
 
 static void ndisc_recv_na(struct sk_buff *skb)
 {
-       struct nd_msg *msg = (struct nd_msg *)skb->h.raw;
-       struct in6_addr *saddr = &skb->nh.ipv6h->saddr;
-       struct in6_addr *daddr = &skb->nh.ipv6h->daddr;
+       struct nd_msg *msg = (struct nd_msg *)skb_transport_header(skb);
+       struct in6_addr *saddr = &ipv6_hdr(skb)->saddr;
+       struct in6_addr *daddr = &ipv6_hdr(skb)->daddr;
        u8 *lladdr = NULL;
-       u32 ndoptlen = skb->tail - msg->opt;
+       u32 ndoptlen = skb->tail - (skb->transport_header +
+                                   offsetof(struct nd_msg, opt));
        struct ndisc_options ndopts;
        struct net_device *dev = skb->dev;
        struct inet6_ifaddr *ifp;
@@ -1000,11 +930,11 @@ out:
 
 static void ndisc_recv_rs(struct sk_buff *skb)
 {
-       struct rs_msg *rs_msg = (struct rs_msg *) skb->h.raw;
+       struct rs_msg *rs_msg = (struct rs_msg *)skb_transport_header(skb);
        unsigned long ndoptlen = skb->len - sizeof(*rs_msg);
        struct neighbour *neigh;
        struct inet6_dev *idev;
-       struct in6_addr *saddr = &skb->nh.ipv6h->saddr;
+       struct in6_addr *saddr = &ipv6_hdr(skb)->saddr;
        struct ndisc_options ndopts;
        u8 *lladdr = NULL;
 
@@ -1057,7 +987,7 @@ out:
 
 static void ndisc_router_discovery(struct sk_buff *skb)
 {
-       struct ra_msg *ra_msg = (struct ra_msg *) skb->h.raw;
+       struct ra_msg *ra_msg = (struct ra_msg *)skb_transport_header(skb);
        struct neighbour *neigh = NULL;
        struct inet6_dev *in6_dev;
        struct rt6_info *rt = NULL;
@@ -1068,9 +998,9 @@ static void ndisc_router_discovery(struct sk_buff *skb)
 
        __u8 * opt = (__u8 *)(ra_msg + 1);
 
-       optlen = (skb->tail - skb->h.raw) - sizeof(struct ra_msg);
+       optlen = (skb->tail - skb->transport_header) - sizeof(struct ra_msg);
 
-       if (!(ipv6_addr_type(&skb->nh.ipv6h->saddr) & IPV6_ADDR_LINKLOCAL)) {
+       if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL)) {
                ND_PRINTK2(KERN_WARNING
                           "ICMPv6 RA: source address is not link-local.\n");
                return;
@@ -1136,7 +1066,7 @@ static void ndisc_router_discovery(struct sk_buff *skb)
                pref = ICMPV6_ROUTER_PREF_MEDIUM;
 #endif
 
-       rt = rt6_get_dflt_router(&skb->nh.ipv6h->saddr, skb->dev);
+       rt = rt6_get_dflt_router(&ipv6_hdr(skb)->saddr, skb->dev);
 
        if (rt)
                neigh = rt->rt6i_nexthop;
@@ -1151,7 +1081,7 @@ static void ndisc_router_discovery(struct sk_buff *skb)
                ND_PRINTK3(KERN_DEBUG
                           "ICMPv6 RA: adding default router.\n");
 
-               rt = rt6_add_dflt_router(&skb->nh.ipv6h->saddr, skb->dev, pref);
+               rt = rt6_add_dflt_router(&ipv6_hdr(skb)->saddr, skb->dev, pref);
                if (rt == NULL) {
                        ND_PRINTK0(KERN_ERR
                                   "ICMPv6 RA: %s() failed to add default route.\n",
@@ -1223,7 +1153,7 @@ skip_defrtr:
         */
 
        if (!neigh)
-               neigh = __neigh_lookup(&nd_tbl, &skb->nh.ipv6h->saddr,
+               neigh = __neigh_lookup(&nd_tbl, &ipv6_hdr(skb)->saddr,
                                       skb->dev, 1);
        if (neigh) {
                u8 *lladdr = NULL;
@@ -1252,7 +1182,7 @@ skip_defrtr:
                        if (((struct route_info *)p)->prefix_len > in6_dev->cnf.accept_ra_rt_info_max_plen)
                                continue;
                        rt6_route_rcv(skb->dev, (u8*)p, (p->nd_opt_len) << 3,
-                                     &skb->nh.ipv6h->saddr);
+                                     &ipv6_hdr(skb)->saddr);
                }
        }
 #endif
@@ -1311,13 +1241,13 @@ static void ndisc_redirect_rcv(struct sk_buff *skb)
        int optlen;
        u8 *lladdr = NULL;
 
-       if (!(ipv6_addr_type(&skb->nh.ipv6h->saddr) & IPV6_ADDR_LINKLOCAL)) {
+       if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL)) {
                ND_PRINTK2(KERN_WARNING
                           "ICMPv6 Redirect: source address is not link-local.\n");
                return;
        }
 
-       optlen = skb->tail - skb->h.raw;
+       optlen = skb->tail - skb->transport_header;
        optlen -= sizeof(struct icmp6hdr) + 2 * sizeof(struct in6_addr);
 
        if (optlen < 0) {
@@ -1326,7 +1256,7 @@ static void ndisc_redirect_rcv(struct sk_buff *skb)
                return;
        }
 
-       icmph = (struct icmp6hdr *) skb->h.raw;
+       icmph = icmp6_hdr(skb);
        target = (struct in6_addr *) (icmph + 1);
        dest = target + 1;
 
@@ -1376,8 +1306,8 @@ static void ndisc_redirect_rcv(struct sk_buff *skb)
 
        neigh = __neigh_lookup(&nd_tbl, target, skb->dev, 1);
        if (neigh) {
-               rt6_redirect(dest, &skb->nh.ipv6h->daddr,
-                            &skb->nh.ipv6h->saddr, neigh, lladdr,
+               rt6_redirect(dest, &ipv6_hdr(skb)->daddr,
+                            &ipv6_hdr(skb)->saddr, neigh, lladdr,
                             on_link);
                neigh_release(neigh);
        }
@@ -1406,21 +1336,21 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
 
        dev = skb->dev;
 
-       if (ipv6_get_lladdr(dev, &saddr_buf)) {
+       if (ipv6_get_lladdr(dev, &saddr_buf, IFA_F_TENTATIVE)) {
                ND_PRINTK2(KERN_WARNING
                           "ICMPv6 Redirect: no link-local address on %s\n",
                           dev->name);
                return;
        }
 
-       if (!ipv6_addr_equal(&skb->nh.ipv6h->daddr, target) &&
+       if (!ipv6_addr_equal(&ipv6_hdr(skb)->daddr, target) &&
            !(ipv6_addr_type(target) & IPV6_ADDR_LINKLOCAL)) {
                ND_PRINTK2(KERN_WARNING
                        "ICMPv6 Redirect: target address is not link-local.\n");
                return;
        }
 
-       ndisc_flow_init(&fl, NDISC_REDIRECT, &saddr_buf, &skb->nh.ipv6h->saddr,
+       ndisc_flow_init(&fl, NDISC_REDIRECT, &saddr_buf, &ipv6_hdr(skb)->saddr,
                        dev->ifindex);
 
        dst = ip6_route_output(NULL, &fl);
@@ -1475,11 +1405,12 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
        hlen = 0;
 
        skb_reserve(buff, LL_RESERVED_SPACE(dev));
-       ip6_nd_hdr(sk, buff, dev, &saddr_buf, &skb->nh.ipv6h->saddr,
+       ip6_nd_hdr(sk, buff, dev, &saddr_buf, &ipv6_hdr(skb)->saddr,
                   IPPROTO_ICMPV6, len);
 
-       icmph = (struct icmp6hdr *)skb_put(buff, len);
-       buff->h.raw = (unsigned char*)icmph;
+       skb_set_transport_header(buff, skb_tail_pointer(buff) - buff->data);
+       skb_put(buff, len);
+       icmph = icmp6_hdr(buff);
 
        memset(icmph, 0, sizeof(struct icmp6hdr));
        icmph->icmp6_type = NDISC_REDIRECT;
@@ -1491,7 +1422,7 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
        addrp = (struct in6_addr *)(icmph + 1);
        ipv6_addr_copy(addrp, target);
        addrp++;
-       ipv6_addr_copy(addrp, &skb->nh.ipv6h->daddr);
+       ipv6_addr_copy(addrp, &ipv6_hdr(skb)->daddr);
 
        opt = (u8*) (addrp + 1);
 
@@ -1512,9 +1443,9 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
        *(opt++) = (rd_len >> 3);
        opt += 6;
 
-       memcpy(opt, skb->nh.ipv6h, rd_len - 8);
+       memcpy(opt, ipv6_hdr(skb), rd_len - 8);
 
-       icmph->icmp6_cksum = csum_ipv6_magic(&saddr_buf, &skb->nh.ipv6h->saddr,
+       icmph->icmp6_cksum = csum_ipv6_magic(&saddr_buf, &ipv6_hdr(skb)->saddr,
                                             len, IPPROTO_ICMPV6,
                                             csum_partial((u8 *) icmph, len, 0));
 
@@ -1544,14 +1475,14 @@ int ndisc_rcv(struct sk_buff *skb)
        if (!pskb_may_pull(skb, skb->len))
                return 0;
 
-       msg = (struct nd_msg *) skb->h.raw;
+       msg = (struct nd_msg *)skb_transport_header(skb);
 
-       __skb_push(skb, skb->data-skb->h.raw);
+       __skb_push(skb, skb->data - skb_transport_header(skb));
 
-       if (skb->nh.ipv6h->hop_limit != 255) {
+       if (ipv6_hdr(skb)->hop_limit != 255) {
                ND_PRINTK2(KERN_WARNING
                           "ICMPv6 NDISC: invalid hop-limit: %d\n",
-                          skb->nh.ipv6h->hop_limit);
+                          ipv6_hdr(skb)->hop_limit);
                return 0;
        }
 
@@ -1584,7 +1515,7 @@ int ndisc_rcv(struct sk_buff *skb)
        case NDISC_REDIRECT:
                ndisc_redirect_rcv(skb);
                break;
-       };
+       }
 
        return 0;
 }
index 1c405dd..38b1496 100644 (file)
@@ -11,7 +11,7 @@
 
 int ip6_route_me_harder(struct sk_buff *skb)
 {
-       struct ipv6hdr *iph = skb->nh.ipv6h;
+       struct ipv6hdr *iph = ipv6_hdr(skb);
        struct dst_entry *dst;
        struct flowi fl = {
                .oif = skb->sk ? skb->sk->sk_bound_dev_if : 0,
@@ -61,7 +61,7 @@ static void nf_ip6_saveroute(const struct sk_buff *skb, struct nf_info *info)
        struct ip6_rt_info *rt_info = nf_info_reroute(info);
 
        if (info->hook == NF_IP6_LOCAL_OUT) {
-               struct ipv6hdr *iph = skb->nh.ipv6h;
+               struct ipv6hdr *iph = ipv6_hdr(skb);
 
                rt_info->daddr = iph->daddr;
                rt_info->saddr = iph->saddr;
@@ -73,7 +73,7 @@ static int nf_ip6_reroute(struct sk_buff **pskb, const struct nf_info *info)
        struct ip6_rt_info *rt_info = nf_info_reroute(info);
 
        if (info->hook == NF_IP6_LOCAL_OUT) {
-               struct ipv6hdr *iph = (*pskb)->nh.ipv6h;
+               struct ipv6hdr *iph = ipv6_hdr(*pskb);
                if (!ipv6_addr_equal(&iph->daddr, &rt_info->daddr) ||
                    !ipv6_addr_equal(&iph->saddr, &rt_info->saddr))
                        return ip6_route_me_harder(*pskb);
@@ -84,7 +84,7 @@ static int nf_ip6_reroute(struct sk_buff **pskb, const struct nf_info *info)
 __sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook,
                             unsigned int dataoff, u_int8_t protocol)
 {
-       struct ipv6hdr *ip6h = skb->nh.ipv6h;
+       struct ipv6hdr *ip6h = ipv6_hdr(skb);
        __sum16 csum = 0;
 
        switch (skb->ip_summed) {
index fdb30a5..0004db3 100644 (file)
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
- *
- * 2001-11-06: First try. Working with ip_queue.c for IPv4 and trying
- *             to adapt it to IPv6
- *             HEAVILY based in ipqueue.c by James Morris. It's just
- *             a little modified version of it, so he's nearly the
- *             real coder of this.
- *             Few changes needed, mainly the hard_routing code and
- *             the netlink socket protocol (we're NETLINK_IP6_FW).
- * 2002-06-25: Code cleanup. [JM: ported cleanup over from ip_queue.c]
- * 2005-02-04: Added /proc counter for dropped packets; fixed so
- *             packets aren't delivered to user space if they're going
- *             to be dropped.
  */
 #include <linux/module.h>
 #include <linux/skbuff.h>
@@ -189,12 +177,13 @@ ipq_flush(int verdict)
 static struct sk_buff *
 ipq_build_packet_message(struct ipq_queue_entry *entry, int *errp)
 {
-       unsigned char *old_tail;
+       sk_buff_data_t old_tail;
        size_t size = 0;
        size_t data_len = 0;
        struct sk_buff *skb;
        struct ipq_packet_msg *pmsg;
        struct nlmsghdr *nlh;
+       struct timeval tv;
 
        read_lock_bh(&queue_lock);
 
@@ -232,15 +221,16 @@ ipq_build_packet_message(struct ipq_queue_entry *entry, int *errp)
        if (!skb)
                goto nlmsg_failure;
 
-       old_tail= skb->tail;
+       old_tail = skb->tail;
        nlh = NLMSG_PUT(skb, 0, 0, IPQM_PACKET, size - sizeof(*nlh));
        pmsg = NLMSG_DATA(nlh);
        memset(pmsg, 0, sizeof(*pmsg));
 
        pmsg->packet_id       = (unsigned long )entry;
        pmsg->data_len        = data_len;
-       pmsg->timestamp_sec   = entry->skb->tstamp.off_sec;
-       pmsg->timestamp_usec  = entry->skb->tstamp.off_usec;
+       tv = ktime_to_timeval(entry->skb->tstamp);
+       pmsg->timestamp_sec   = tv.tv_sec;
+       pmsg->timestamp_usec  = tv.tv_usec;
        pmsg->mark            = entry->skb->mark;
        pmsg->hook            = entry->info->hook;
        pmsg->hw_protocol     = entry->skb->protocol;
@@ -376,7 +366,7 @@ ipq_mangle_ipv6(ipq_verdict_msg_t *v, struct ipq_queue_entry *e)
        }
        if (!skb_make_writable(&e->skb, v->data_len))
                return -ENOMEM;
-       memcpy(e->skb->data, v->payload, v->data_len);
+       skb_copy_to_linear_data(e->skb, v->payload, v->data_len);
        e->skb->ip_summed = CHECKSUM_NONE;
 
        return 0;
@@ -485,7 +475,7 @@ ipq_rcv_skb(struct sk_buff *skb)
        if (skblen < sizeof(*nlh))
                return;
 
-       nlh = (struct nlmsghdr *)skb->data;
+       nlh = nlmsg_hdr(skb);
        nlmsglen = nlh->nlmsg_len;
        if (nlmsglen < sizeof(*nlh) || skblen < nlmsglen)
                return;
@@ -667,7 +657,7 @@ static int __init ip6_queue_init(void)
        struct proc_dir_entry *proc;
 
        netlink_register_notifier(&ipq_nl_notifier);
-       ipqnl = netlink_kernel_create(NETLINK_IP6_FW, 0, ipq_rcv_sk,
+       ipqnl = netlink_kernel_create(NETLINK_IP6_FW, 0, ipq_rcv_sk, NULL,
                                      THIS_MODULE);
        if (ipqnl == NULL) {
                printk(KERN_ERR "ip6_queue: failed to create netlink socket\n");
index 7c512e1..9aa6240 100644 (file)
@@ -7,15 +7,6 @@
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
- *
- * 19 Jan 2002 Harald Welte <laforge@gnumonks.org>
- *     - increase module usage count as soon as we have rules inside
- *       a table
- * 06 Jun 2002 Andras Kis-Szabo <kisza@sch.bme.hu>
- *      - new extension header parser code
- * 15 Oct 2005 Harald Welte <laforge@netfilter.org>
- *     - Unification of {ip,ip6}_tables into x_tables
- *     - Removed tcp and udp code, since it's not ipv6 specific
  */
 
 #include <linux/capability.h>
@@ -115,7 +106,7 @@ ip6_packet_match(const struct sk_buff *skb,
 {
        size_t i;
        unsigned long ret;
-       const struct ipv6hdr *ipv6 = skb->nh.ipv6h;
+       const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
 
 #define FWINV(bool,invflg) ((bool) ^ !!(ip6info->invflags & invflg))
 
@@ -301,7 +292,7 @@ ip6t_do_table(struct sk_buff **pskb,
                                goto no_match;
 
                        ADD_COUNTER(e->counters,
-                                   ntohs((*pskb)->nh.ipv6h->payload_len)
+                                   ntohs(ipv6_hdr(*pskb)->payload_len)
                                    + IPV6_HDR_LEN,
                                    1);
 
@@ -1448,8 +1439,8 @@ static void __exit ip6_tables_fini(void)
 int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
                  int target, unsigned short *fragoff)
 {
-       unsigned int start = (u8*)(skb->nh.ipv6h + 1) - skb->data;
-       u8 nexthdr = skb->nh.ipv6h->nexthdr;
+       unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr);
+       u8 nexthdr = ipv6_hdr(skb)->nexthdr;
        unsigned int len = skb->len - start;
 
        if (fragoff)
index ccbab66..4115a57 100644 (file)
@@ -32,7 +32,7 @@ static unsigned int ip6t_hl_target(struct sk_buff **pskb,
        if (!skb_make_writable(pskb, (*pskb)->len))
                return NF_DROP;
 
-       ip6h = (*pskb)->nh.ipv6h;
+       ip6h = ipv6_hdr(*pskb);
 
        switch (info->mode) {
                case IP6T_HL_SET:
index afaa039..5bb9cd3 100644 (file)
@@ -396,8 +396,8 @@ ip6t_log_packet(unsigned int pf,
                /* MAC logging for input chain only. */
                printk("MAC=");
                if (skb->dev && (len = skb->dev->hard_header_len) &&
-                   skb->mac.raw != skb->nh.raw) {
-                       unsigned char *p = skb->mac.raw;
+                   skb->mac_header != skb->network_header) {
+                       const unsigned char *p = skb_mac_header(skb);
                        int i;
 
                        if (skb->dev->type == ARPHRD_SIT &&
@@ -412,7 +412,8 @@ ip6t_log_packet(unsigned int pf,
                        printk(" ");
 
                        if (skb->dev->type == ARPHRD_SIT) {
-                               struct iphdr *iph = (struct iphdr *)skb->mac.raw;
+                               const struct iphdr *iph =
+                                       (struct iphdr *)skb_mac_header(skb);
                                printk("TUNNEL=%u.%u.%u.%u->%u.%u.%u.%u ",
                                       NIPQUAD(iph->saddr),
                                       NIPQUAD(iph->daddr));
@@ -421,7 +422,7 @@ ip6t_log_packet(unsigned int pf,
                        printk(" ");
        }
 
-       dump_packet(loginfo, skb, (u8*)skb->nh.ipv6h - skb->data, 1);
+       dump_packet(loginfo, skb, skb_network_offset(skb), 1);
        printk("\n");
        spin_unlock_bh(&log_lock);
 }
@@ -489,14 +490,10 @@ static int __init ip6t_log_init(void)
        ret = xt_register_target(&ip6t_log_reg);
        if (ret < 0)
                return ret;
-       if (nf_log_register(PF_INET6, &ip6t_logger) < 0) {
-               printk(KERN_WARNING "ip6t_LOG: not logging via system console "
-                      "since somebody else already registered for PF_INET6\n");
-               /* we cannot make module load fail here, since otherwise
-                * ip6tables userspace would abort */
-       }
-
-       return 0;
+       ret = nf_log_register(PF_INET6, &ip6t_logger);
+       if (ret < 0 && ret != -EEXIST)
+               xt_unregister_target(&ip6t_log_reg);
+       return ret;
 }
 
 static void __exit ip6t_log_fini(void)
index 6abee94..cb3d241 100644 (file)
@@ -47,7 +47,7 @@ static void send_reset(struct sk_buff *oldskb)
        struct tcphdr otcph, *tcph;
        unsigned int otcplen, hh_len;
        int tcphoff, needs_ack;
-       struct ipv6hdr *oip6h = oldskb->nh.ipv6h, *ip6h;
+       struct ipv6hdr *oip6h = ipv6_hdr(oldskb), *ip6h;
        struct dst_entry *dst = NULL;
        u8 proto;
        struct flowi fl;
@@ -120,8 +120,9 @@ static void send_reset(struct sk_buff *oldskb)
 
        skb_reserve(nskb, hh_len + dst->header_len);
 
-       ip6h = nskb->nh.ipv6h = (struct ipv6hdr *)
-                                       skb_put(nskb, sizeof(struct ipv6hdr));
+       skb_put(nskb, sizeof(struct ipv6hdr));
+       skb_reset_network_header(nskb);
+       ip6h = ipv6_hdr(nskb);
        ip6h->version = 6;
        ip6h->hop_limit = dst_metric(dst, RTAX_HOPLIMIT);
        ip6h->nexthdr = IPPROTO_TCP;
@@ -155,8 +156,8 @@ static void send_reset(struct sk_buff *oldskb)
        tcph->check = 0;
 
        /* Adjust TCP checksum */
-       tcph->check = csum_ipv6_magic(&nskb->nh.ipv6h->saddr,
-                                     &nskb->nh.ipv6h->daddr,
+       tcph->check = csum_ipv6_magic(&ipv6_hdr(nskb)->saddr,
+                                     &ipv6_hdr(nskb)->daddr,
                                      sizeof(struct tcphdr), IPPROTO_TCP,
                                      csum_partial((char *)tcph,
                                                   sizeof(struct tcphdr), 0));
index 967bed7..0f3dd93 100644 (file)
@@ -32,8 +32,8 @@ match(const struct sk_buff *skb,
        unsigned char eui64[8];
        int i = 0;
 
-       if (!(skb->mac.raw >= skb->head &&
-             (skb->mac.raw + ETH_HLEN) <= skb->data) &&
+       if (!(skb_mac_header(skb) >= skb->head &&
+             (skb_mac_header(skb) + ETH_HLEN) <= skb->data) &&
            offset != 0) {
                *hotdrop = 1;
                return 0;
@@ -42,7 +42,7 @@ match(const struct sk_buff *skb,
        memset(eui64, 0, sizeof(eui64));
 
        if (eth_hdr(skb)->h_proto == htons(ETH_P_IPV6)) {
-               if (skb->nh.ipv6h->version == 0x6) {
+               if (ipv6_hdr(skb)->version == 0x6) {
                        memcpy(eui64, eth_hdr(skb)->h_source, 3);
                        memcpy(eui64 + 5, eth_hdr(skb)->h_source + 3, 3);
                        eui64[3] = 0xff;
@@ -50,7 +50,7 @@ match(const struct sk_buff *skb,
                        eui64[0] |= 0x02;
 
                        i = 0;
-                       while ((skb->nh.ipv6h->saddr.s6_addr[8+i] == eui64[i])
+                       while ((ipv6_hdr(skb)->saddr.s6_addr[8 + i] == eui64[i])
                               && (i < 8))
                                i++;
 
index 37c8a4d..d606c0e 100644 (file)
@@ -25,7 +25,7 @@ static int match(const struct sk_buff *skb,
                 int offset, unsigned int protoff, int *hotdrop)
 {
        const struct ip6t_hl_info *info = matchinfo;
-       const struct ipv6hdr *ip6h = skb->nh.ipv6h;
+       const struct ipv6hdr *ip6h = ipv6_hdr(skb);
 
        switch (info->mode) {
                case IP6T_HL_EQ:
index 700a11d..fd6a086 100644 (file)
@@ -45,7 +45,7 @@ ipv6header_match(const struct sk_buff *skb,
        /* Make sure this isn't an evil packet */
 
        /* type of the 1st exthdr */
-       nexthdr = skb->nh.ipv6h->nexthdr;
+       nexthdr = ipv6_hdr(skb)->nexthdr;
        /* pointer to the 1st exthdr */
        ptr = sizeof(struct ipv6hdr);
        /* available length */
index 112a21d..76f0cf6 100644 (file)
@@ -102,7 +102,7 @@ ip6t_local_out_hook(unsigned int hook,
 #if 0
        /* root is playing with raw sockets. */
        if ((*pskb)->len < sizeof(struct iphdr)
-           || (*pskb)->nh.iph->ihl * 4 < sizeof(struct iphdr)) {
+           || ip_hdrlen(*pskb) < sizeof(struct iphdr)) {
                if (net_ratelimit())
                        printk("ip6t_hook: happy cracking.\n");
                return NF_ACCEPT;
index 0c468d3..a9f10e3 100644 (file)
@@ -7,8 +7,6 @@
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
- *
- * Extended to all five netfilter hooks by Brad Chapman & Harald Welte
  */
 #include <linux/module.h>
 #include <linux/netfilter_ipv6/ip6_tables.h>
@@ -138,7 +136,7 @@ ip6t_local_hook(unsigned int hook,
 #if 0
        /* root is playing with raw sockets. */
        if ((*pskb)->len < sizeof(struct iphdr)
-           || (*pskb)->nh.iph->ihl * 4 < sizeof(struct iphdr)) {
+           || ip_hdrlen(*pskb) < sizeof(struct iphdr)) {
                if (net_ratelimit())
                        printk("ip6t_hook: happy cracking.\n");
                return NF_ACCEPT;
@@ -146,21 +144,21 @@ ip6t_local_hook(unsigned int hook,
 #endif
 
        /* save source/dest address, mark, hoplimit, flowlabel, priority,  */
-       memcpy(&saddr, &(*pskb)->nh.ipv6h->saddr, sizeof(saddr));
-       memcpy(&daddr, &(*pskb)->nh.ipv6h->daddr, sizeof(daddr));
+       memcpy(&saddr, &ipv6_hdr(*pskb)->saddr, sizeof(saddr));
+       memcpy(&daddr, &ipv6_hdr(*pskb)->daddr, sizeof(daddr));
        mark = (*pskb)->mark;
-       hop_limit = (*pskb)->nh.ipv6h->hop_limit;
+       hop_limit = ipv6_hdr(*pskb)->hop_limit;
 
        /* flowlabel and prio (includes version, which shouldn't change either */
-       flowlabel = *((u_int32_t *) (*pskb)->nh.ipv6h);
+       flowlabel = *((u_int32_t *)ipv6_hdr(*pskb));
 
        ret = ip6t_do_table(pskb, hook, in, out, &packet_mangler);
 
        if (ret != NF_DROP && ret != NF_STOLEN
-               && (memcmp(&(*pskb)->nh.ipv6h->saddr, &saddr, sizeof(saddr))
-                   || memcmp(&(*pskb)->nh.ipv6h->daddr, &daddr, sizeof(daddr))
+               && (memcmp(&ipv6_hdr(*pskb)->saddr, &saddr, sizeof(saddr))
+                   || memcmp(&ipv6_hdr(*pskb)->daddr, &daddr, sizeof(daddr))
                    || (*pskb)->mark != mark
-                   || (*pskb)->nh.ipv6h->hop_limit != hop_limit))
+                   || ipv6_hdr(*pskb)->hop_limit != hop_limit))
                return ip6_route_me_harder(*pskb) == 0 ? ret : NF_DROP;
 
        return ret;
index d110245..6d2a082 100644 (file)
@@ -7,17 +7,6 @@
  *
  * Author:
  *     Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
- *
- * 16 Dec 2003: Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
- *     - support Layer 3 protocol independent connection tracking.
- *       Based on the original ip_conntrack code which had the following
- *       copyright information:
- *             (C) 1999-2001 Paul `Rusty' Russell
- *             (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
- *
- * 23 Mar 2004: Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
- *     - add get_features() to support various size of conntrack
- *       structures.
  */
 
 #include <linux/types.h>
@@ -138,16 +127,10 @@ static int
 ipv6_prepare(struct sk_buff **pskb, unsigned int hooknum, unsigned int *dataoff,
             u_int8_t *protonum)
 {
-       unsigned int extoff;
-       unsigned char pnum;
-       int protoff;
-
-       extoff = (u8*)((*pskb)->nh.ipv6h + 1) - (*pskb)->data;
-       pnum = (*pskb)->nh.ipv6h->nexthdr;
-
-       protoff = nf_ct_ipv6_skip_exthdr(*pskb, extoff, &pnum,
-                                        (*pskb)->len - extoff);
-
+       unsigned int extoff = (u8 *)(ipv6_hdr(*pskb) + 1) - (*pskb)->data;
+       unsigned char pnum = ipv6_hdr(*pskb)->nexthdr;
+       int protoff = nf_ct_ipv6_skip_exthdr(*pskb, extoff, &pnum,
+                                            (*pskb)->len - extoff);
        /*
         * (protoff == (*pskb)->len) mean that the packet doesn't have no data
         * except of IPv6 & ext headers. but it's tracked anyway. - YK
@@ -179,9 +162,8 @@ static unsigned int ipv6_confirm(unsigned int hooknum,
        struct nf_conn_help *help;
        enum ip_conntrack_info ctinfo;
        unsigned int ret, protoff;
-       unsigned int extoff = (u8*)((*pskb)->nh.ipv6h + 1)
-                             - (*pskb)->data;
-       unsigned char pnum = (*pskb)->nh.ipv6h->nexthdr;
+       unsigned int extoff = (u8 *)(ipv6_hdr(*pskb) + 1) - (*pskb)->data;
+       unsigned char pnum = ipv6_hdr(*pskb)->nexthdr;
 
 
        /* This is where we call the helper: as the packet goes out. */
index 075da4f..0be790d 100644 (file)
@@ -7,13 +7,6 @@
  *
  * Author:
  *     Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
- *
- * 16 Dec 2003: Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
- *     - ICMPv6 tracking support. Derived from the original ip_conntrack code
- *       net/ipv4/netfilter/ip_conntrack_proto_icmp.c which had the following
- *       copyright information:
- *             (C) 1999-2001 Paul `Rusty' Russell
- *             (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
  */
 
 #include <linux/types.h>
index 15ab1e3..347ab76 100644 (file)
@@ -82,7 +82,7 @@ struct nf_ct_frag6_queue
        struct sk_buff          *fragments;
        int                     len;
        int                     meat;
-       struct timeval          stamp;
+       ktime_t                 stamp;
        unsigned int            csum;
        __u8                    last_in;        /* has first/last segment arrived? */
 #define COMPLETE               4
@@ -353,9 +353,7 @@ nf_ct_frag6_create(unsigned int hash, __be32 id, struct in6_addr *src,                                 str
        ipv6_addr_copy(&fq->saddr, src);
        ipv6_addr_copy(&fq->daddr, dst);
 
-       init_timer(&fq->timer);
-       fq->timer.function = nf_ct_frag6_expire;
-       fq->timer.data = (long) fq;
+       setup_timer(&fq->timer, nf_ct_frag6_expire, (unsigned long)fq);
        spin_lock_init(&fq->lock);
        atomic_set(&fq->refcnt, 1);
 
@@ -400,19 +398,20 @@ static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb,
        }
 
        offset = ntohs(fhdr->frag_off) & ~0x7;
-       end = offset + (ntohs(skb->nh.ipv6h->payload_len) -
-                       ((u8 *) (fhdr + 1) - (u8 *) (skb->nh.ipv6h + 1)));
+       end = offset + (ntohs(ipv6_hdr(skb)->payload_len) -
+                       ((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1)));
 
        if ((unsigned int)end > IPV6_MAXPLEN) {
                DEBUGP("offset is too large.\n");
                return -1;
        }
 
-       if (skb->ip_summed == CHECKSUM_COMPLETE)
+       if (skb->ip_summed == CHECKSUM_COMPLETE) {
+               const unsigned char *nh = skb_network_header(skb);
                skb->csum = csum_sub(skb->csum,
-                                    csum_partial(skb->nh.raw,
-                                                 (u8*)(fhdr + 1) - skb->nh.raw,
+                                    csum_partial(nh, (u8 *)(fhdr + 1) - nh,
                                                  0));
+       }
 
        /* Is this the final fragment? */
        if (!(fhdr->frag_off & htons(IP6_MF))) {
@@ -542,7 +541,7 @@ static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb,
                fq->fragments = skb;
 
        skb->dev = NULL;
-       skb_get_timestamp(skb, &fq->stamp);
+       fq->stamp = skb->tstamp;
        fq->meat += skb->len;
        atomic_add(skb->truesize, &nf_ct_frag6_mem);
 
@@ -583,7 +582,9 @@ nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev)
        BUG_TRAP(NFCT_FRAG6_CB(head)->offset == 0);
 
        /* Unfragmented part is taken from the first segment. */
-       payload_len = (head->data - head->nh.raw) - sizeof(struct ipv6hdr) + fq->len - sizeof(struct frag_hdr);
+       payload_len = ((head->data - skb_network_header(head)) -
+                      sizeof(struct ipv6hdr) + fq->len -
+                      sizeof(struct frag_hdr));
        if (payload_len > IPV6_MAXPLEN) {
                DEBUGP("payload len is too large.\n");
                goto out_oversize;
@@ -624,15 +625,15 @@ nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev)
 
        /* We have to remove fragment header from datagram and to relocate
         * header in order to calculate ICV correctly. */
-       head->nh.raw[fq->nhoffset] = head->h.raw[0];
+       skb_network_header(head)[fq->nhoffset] = skb_transport_header(head)[0];
        memmove(head->head + sizeof(struct frag_hdr), head->head,
                (head->data - head->head) - sizeof(struct frag_hdr));
-       head->mac.raw += sizeof(struct frag_hdr);
-       head->nh.raw += sizeof(struct frag_hdr);
+       head->mac_header += sizeof(struct frag_hdr);
+       head->network_header += sizeof(struct frag_hdr);
 
        skb_shinfo(head)->frag_list = head->next;
-       head->h.raw = head->data;
-       skb_push(head, head->data - head->nh.raw);
+       skb_reset_transport_header(head);
+       skb_push(head, head->data - skb_network_header(head));
        atomic_sub(head->truesize, &nf_ct_frag6_mem);
 
        for (fp=head->next; fp; fp = fp->next) {
@@ -648,12 +649,14 @@ nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev)
 
        head->next = NULL;
        head->dev = dev;
-       skb_set_timestamp(head, &fq->stamp);
-       head->nh.ipv6h->payload_len = htons(payload_len);
+       head->tstamp = fq->stamp;
+       ipv6_hdr(head)->payload_len = htons(payload_len);
 
        /* Yes, and fold redundant checksum back. 8) */
        if (head->ip_summed == CHECKSUM_COMPLETE)
-               head->csum = csum_partial(head->nh.raw, head->h.raw-head->nh.raw, head->csum);
+               head->csum = csum_partial(skb_network_header(head),
+                                         skb_network_header_len(head),
+                                         head->csum);
 
        fq->fragments = NULL;
 
@@ -701,9 +704,10 @@ out_fail:
 static int
 find_prev_fhdr(struct sk_buff *skb, u8 *prevhdrp, int *prevhoff, int *fhoff)
 {
-       u8 nexthdr = skb->nh.ipv6h->nexthdr;
-       u8 prev_nhoff = (u8 *)&skb->nh.ipv6h->nexthdr - skb->data;
-       int start = (u8 *)(skb->nh.ipv6h+1) - skb->data;
+       u8 nexthdr = ipv6_hdr(skb)->nexthdr;
+       const int netoff = skb_network_offset(skb);
+       u8 prev_nhoff = netoff + offsetof(struct ipv6hdr, nexthdr);
+       int start = netoff + sizeof(struct ipv6hdr);
        int len = skb->len - start;
        u8 prevhdr = NEXTHDR_IPV6;
 
@@ -759,7 +763,7 @@ struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb)
        struct sk_buff *ret_skb = NULL;
 
        /* Jumbo payload inhibits frag. header */
-       if (skb->nh.ipv6h->payload_len == 0) {
+       if (ipv6_hdr(skb)->payload_len == 0) {
                DEBUGP("payload len = 0\n");
                return skb;
        }
@@ -780,9 +784,9 @@ struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb)
                goto ret_orig;
        }
 
-       clone->h.raw = clone->data + fhoff;
-       hdr = clone->nh.ipv6h;
-       fhdr = (struct frag_hdr *)clone->h.raw;
+       skb_set_transport_header(clone, fhoff);
+       hdr = ipv6_hdr(clone);
+       fhdr = (struct frag_hdr *)skb_transport_header(clone);
 
        if (!(fhdr->frag_off & htons(0xFFF9))) {
                DEBUGP("Invalid fragment offset\n");
@@ -864,8 +868,7 @@ int nf_ct_frag6_init(void)
        nf_ct_frag6_hash_rnd = (u32) ((num_physpages ^ (num_physpages>>7)) ^
                                   (jiffies ^ (jiffies >> 6)));
 
-       init_timer(&nf_ct_frag6_secret_timer);
-       nf_ct_frag6_secret_timer.function = nf_ct_frag6_secret_rebuild;
+       setup_timer(&nf_ct_frag6_secret_timer, nf_ct_frag6_secret_rebuild, 0);
        nf_ct_frag6_secret_timer.expires = jiffies
                                           + nf_ct_frag6_secret_interval;
        add_timer(&nf_ct_frag6_secret_timer);
index fa3fb50..acb306a 100644 (file)
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
 #include <linux/stddef.h>
+#include <net/ip.h>
 #include <net/sock.h>
 #include <net/tcp.h>
 #include <net/transp_v6.h>
 #include <net/ipv6.h>
 
-#ifdef CONFIG_PROC_FS
 static struct proc_dir_entry *proc_net_devsnmp6;
 
 static int fold_prot_inuse(struct proto *proto)
@@ -142,26 +142,13 @@ static struct snmp_mib snmp6_udplite6_list[] = {
        SNMP_MIB_SENTINEL
 };
 
-static unsigned long
-fold_field(void *mib[], int offt)
-{
-       unsigned long res = 0;
-       int i;
-
-       for_each_possible_cpu(i) {
-               res += *(((unsigned long *)per_cpu_ptr(mib[0], i)) + offt);
-               res += *(((unsigned long *)per_cpu_ptr(mib[1], i)) + offt);
-       }
-       return res;
-}
-
 static inline void
 snmp6_seq_show_item(struct seq_file *seq, void **mib, struct snmp_mib *itemlist)
 {
        int i;
        for (i=0; itemlist[i].name; i++)
                seq_printf(seq, "%-32s\t%lu\n", itemlist[i].name,
-                               fold_field(mib, itemlist[i].entry));
+                          snmp_fold_field(mib, itemlist[i].entry));
 }
 
 static int snmp6_seq_show(struct seq_file *seq, void *v)
@@ -271,47 +258,3 @@ void ipv6_misc_proc_exit(void)
        proc_net_remove("snmp6");
 }
 
-#else  /* CONFIG_PROC_FS */
-
-
-int snmp6_register_dev(struct inet6_dev *idev)
-{
-       return 0;
-}
-
-int snmp6_unregister_dev(struct inet6_dev *idev)
-{
-       return 0;
-}
-#endif /* CONFIG_PROC_FS */
-
-int snmp6_alloc_dev(struct inet6_dev *idev)
-{
-       int err = -ENOMEM;
-
-       if (!idev || !idev->dev)
-               return -EINVAL;
-
-       if (snmp6_mib_init((void **)idev->stats.ipv6, sizeof(struct ipstats_mib),
-                          __alignof__(struct ipstats_mib)) < 0)
-               goto err_ip;
-       if (snmp6_mib_init((void **)idev->stats.icmpv6, sizeof(struct icmpv6_mib),
-                          __alignof__(struct icmpv6_mib)) < 0)
-               goto err_icmp;
-
-       return 0;
-
-err_icmp:
-       snmp6_mib_free((void **)idev->stats.ipv6);
-err_ip:
-       return err;
-}
-
-int snmp6_free_dev(struct inet6_dev *idev)
-{
-       snmp6_mib_free((void **)idev->stats.icmpv6);
-       snmp6_mib_free((void **)idev->stats.ipv6);
-       return 0;
-}
-
-
index ef43bd5..f929f47 100644 (file)
@@ -60,6 +60,8 @@ int inet6_add_protocol(struct inet6_protocol *prot, unsigned char protocol)
        return ret;
 }
 
+EXPORT_SYMBOL(inet6_add_protocol);
+
 /*
  *     Remove a protocol from the hash tables.
  */
@@ -83,3 +85,5 @@ int inet6_del_protocol(struct inet6_protocol *prot, unsigned char protocol)
 
        return ret;
 }
+
+EXPORT_SYMBOL(inet6_del_protocol);
index 203e069..009a104 100644 (file)
@@ -152,7 +152,7 @@ int ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
        int delivered = 0;
        __u8 hash;
 
-       saddr = &skb->nh.ipv6h->saddr;
+       saddr = &ipv6_hdr(skb)->saddr;
        daddr = saddr + 1;
 
        hash = nexthdr & (MAX_INET_PROTOS - 1);
@@ -361,17 +361,18 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
                skb->ip_summed = CHECKSUM_UNNECESSARY;
 
        if (skb->ip_summed == CHECKSUM_COMPLETE) {
-               skb_postpull_rcsum(skb, skb->nh.raw,
-                                  skb->h.raw - skb->nh.raw);
-               if (!csum_ipv6_magic(&skb->nh.ipv6h->saddr,
-                                    &skb->nh.ipv6h->daddr,
+               skb_postpull_rcsum(skb, skb_network_header(skb),
+                                  skb_network_header_len(skb));
+               if (!csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+                                    &ipv6_hdr(skb)->daddr,
                                     skb->len, inet->num, skb->csum))
                        skb->ip_summed = CHECKSUM_UNNECESSARY;
        }
-       if (skb->ip_summed != CHECKSUM_UNNECESSARY)
-               skb->csum = ~csum_unfold(csum_ipv6_magic(&skb->nh.ipv6h->saddr,
-                                            &skb->nh.ipv6h->daddr,
-                                            skb->len, inet->num, 0));
+       if (!skb_csum_unnecessary(skb))
+               skb->csum = ~csum_unfold(csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+                                                        &ipv6_hdr(skb)->daddr,
+                                                        skb->len,
+                                                        inet->num, 0));
 
        if (inet->hdrincl) {
                if (skb_checksum_complete(skb)) {
@@ -420,7 +421,7 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk,
                msg->msg_flags |= MSG_TRUNC;
        }
 
-       if (skb->ip_summed==CHECKSUM_UNNECESSARY) {
+       if (skb_csum_unnecessary(skb)) {
                err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
        } else if (msg->msg_flags&MSG_TRUNC) {
                if (__skb_checksum_complete(skb))
@@ -438,7 +439,7 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk,
        if (sin6) {
                sin6->sin6_family = AF_INET6;
                sin6->sin6_port = 0;
-               ipv6_addr_copy(&sin6->sin6_addr, &skb->nh.ipv6h->saddr);
+               ipv6_addr_copy(&sin6->sin6_addr, &ipv6_hdr(skb)->saddr);
                sin6->sin6_flowinfo = 0;
                sin6->sin6_scope_id = 0;
                if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL)
@@ -488,7 +489,8 @@ static int rawv6_push_pending_frames(struct sock *sk, struct flowi *fl,
                goto out;
 
        offset = rp->offset;
-       total_len = inet_sk(sk)->cork.length - (skb->nh.raw - skb->data);
+       total_len = inet_sk(sk)->cork.length - (skb_network_header(skb) -
+                                               skb->data);
        if (offset >= total_len - 1) {
                err = -EINVAL;
                ip6_flush_pending_frames(sk);
@@ -511,7 +513,7 @@ static int rawv6_push_pending_frames(struct sock *sk, struct flowi *fl,
                        if (csum_skb)
                                continue;
 
-                       len = skb->len - (skb->h.raw - skb->data);
+                       len = skb->len - skb_transport_offset(skb);
                        if (offset >= len) {
                                offset -= len;
                                continue;
@@ -523,7 +525,7 @@ static int rawv6_push_pending_frames(struct sock *sk, struct flowi *fl,
                skb = csum_skb;
        }
 
-       offset += skb->h.raw - skb->data;
+       offset += skb_transport_offset(skb);
        if (skb_copy_bits(skb, offset, &csum, 2))
                BUG();
 
@@ -575,11 +577,13 @@ static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
        skb->priority = sk->sk_priority;
        skb->dst = dst_clone(&rt->u.dst);
 
-       skb->nh.ipv6h = iph = (struct ipv6hdr *)skb_put(skb, length);
+       skb_put(skb, length);
+       skb_reset_network_header(skb);
+       iph = ipv6_hdr(skb);
 
        skb->ip_summed = CHECKSUM_NONE;
 
-       skb->h.raw = skb->nh.raw;
+       skb->transport_header = skb->network_header;
        err = memcpy_fromiovecend((void *)iph, from, 0, length);
        if (err)
                goto error_fault;
@@ -878,7 +882,7 @@ static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
                return 0;
        default:
                return -ENOPROTOOPT;
-       };
+       }
 
        return 0;
 }
@@ -903,7 +907,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
                return 0;
        default:
                return -ENOPROTOOPT;
-       };
+       }
 
        return 0;
 }
@@ -957,7 +961,8 @@ static int rawv6_setsockopt(struct sock *sk, int level, int optname,
                default:
                        return ipv6_setsockopt(sk, level, optname, optval,
                                               optlen);
-       };
+       }
+
        return do_rawv6_setsockopt(sk, level, optname, optval, optlen);
 }
 
@@ -978,7 +983,7 @@ static int compat_rawv6_setsockopt(struct sock *sk, int level, int optname,
        default:
                return compat_ipv6_setsockopt(sk, level, optname,
                                              optval, optlen);
-       };
+       }
        return do_rawv6_setsockopt(sk, level, optname, optval, optlen);
 }
 #endif
@@ -1031,7 +1036,8 @@ static int rawv6_getsockopt(struct sock *sk, int level, int optname,
                default:
                        return ipv6_getsockopt(sk, level, optname, optval,
                                               optlen);
-       };
+       }
+
        return do_rawv6_getsockopt(sk, level, optname, optval, optlen);
 }
 
@@ -1052,7 +1058,7 @@ static int compat_rawv6_getsockopt(struct sock *sk, int level, int optname,
        default:
                return compat_ipv6_getsockopt(sk, level, optname,
                                              optval, optlen);
-       };
+       }
        return do_rawv6_getsockopt(sk, level, optname, optval, optlen);
 }
 #endif
@@ -1073,7 +1079,7 @@ static int rawv6_ioctl(struct sock *sk, int cmd, unsigned long arg)
                        spin_lock_bh(&sk->sk_receive_queue.lock);
                        skb = skb_peek(&sk->sk_receive_queue);
                        if (skb != NULL)
-                               amount = skb->tail - skb->h.raw;
+                               amount = skb->tail - skb->transport_header;
                        spin_unlock_bh(&sk->sk_receive_queue.lock);
                        return put_user(amount, (int __user *)arg);
                }
index 7034c54..de795c0 100644 (file)
@@ -88,7 +88,7 @@ struct frag_queue
        int                     len;
        int                     meat;
        int                     iif;
-       struct timeval          stamp;
+       ktime_t                 stamp;
        unsigned int            csum;
        __u8                    last_in;        /* has first/last segment arrived? */
 #define COMPLETE               4
@@ -430,19 +430,24 @@ static void ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
                goto err;
 
        offset = ntohs(fhdr->frag_off) & ~0x7;
-       end = offset + (ntohs(skb->nh.ipv6h->payload_len) -
-                       ((u8 *) (fhdr + 1) - (u8 *) (skb->nh.ipv6h + 1)));
+       end = offset + (ntohs(ipv6_hdr(skb)->payload_len) -
+                       ((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1)));
 
        if ((unsigned int)end > IPV6_MAXPLEN) {
                IP6_INC_STATS_BH(ip6_dst_idev(skb->dst),
                                 IPSTATS_MIB_INHDRERRORS);
-               icmpv6_param_prob(skb,ICMPV6_HDR_FIELD, (u8*)&fhdr->frag_off - skb->nh.raw);
+               icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
+                                 ((u8 *)&fhdr->frag_off -
+                                  skb_network_header(skb)));
                return;
        }
 
-       if (skb->ip_summed == CHECKSUM_COMPLETE)
+       if (skb->ip_summed == CHECKSUM_COMPLETE) {
+               const unsigned char *nh = skb_network_header(skb);
                skb->csum = csum_sub(skb->csum,
-                                    csum_partial(skb->nh.raw, (u8*)(fhdr+1)-skb->nh.raw, 0));
+                                    csum_partial(nh, (u8 *)(fhdr + 1) - nh,
+                                                 0));
+       }
 
        /* Is this the final fragment? */
        if (!(fhdr->frag_off & htons(IP6_MF))) {
@@ -562,7 +567,7 @@ static void ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
        if (skb->dev)
                fq->iif = skb->dev->ifindex;
        skb->dev = NULL;
-       skb_get_timestamp(skb, &fq->stamp);
+       fq->stamp = skb->tstamp;
        fq->meat += skb->len;
        atomic_add(skb->truesize, &ip6_frag_mem);
 
@@ -605,7 +610,9 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff **skb_in,
        BUG_TRAP(FRAG6_CB(head)->offset == 0);
 
        /* Unfragmented part is taken from the first segment. */
-       payload_len = (head->data - head->nh.raw) - sizeof(struct ipv6hdr) + fq->len - sizeof(struct frag_hdr);
+       payload_len = ((head->data - skb_network_header(head)) -
+                      sizeof(struct ipv6hdr) + fq->len -
+                      sizeof(struct frag_hdr));
        if (payload_len > IPV6_MAXPLEN)
                goto out_oversize;
 
@@ -639,15 +646,15 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff **skb_in,
        /* We have to remove fragment header from datagram and to relocate
         * header in order to calculate ICV correctly. */
        nhoff = fq->nhoffset;
-       head->nh.raw[nhoff] = head->h.raw[0];
+       skb_network_header(head)[nhoff] = skb_transport_header(head)[0];
        memmove(head->head + sizeof(struct frag_hdr), head->head,
                (head->data - head->head) - sizeof(struct frag_hdr));
-       head->mac.raw += sizeof(struct frag_hdr);
-       head->nh.raw += sizeof(struct frag_hdr);
+       head->mac_header += sizeof(struct frag_hdr);
+       head->network_header += sizeof(struct frag_hdr);
 
        skb_shinfo(head)->frag_list = head->next;
-       head->h.raw = head->data;
-       skb_push(head, head->data - head->nh.raw);
+       skb_reset_transport_header(head);
+       skb_push(head, head->data - skb_network_header(head));
        atomic_sub(head->truesize, &ip6_frag_mem);
 
        for (fp=head->next; fp; fp = fp->next) {
@@ -663,15 +670,17 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff **skb_in,
 
        head->next = NULL;
        head->dev = dev;
-       skb_set_timestamp(head, &fq->stamp);
-       head->nh.ipv6h->payload_len = htons(payload_len);
+       head->tstamp = fq->stamp;
+       ipv6_hdr(head)->payload_len = htons(payload_len);
        IP6CB(head)->nhoff = nhoff;
 
        *skb_in = head;
 
        /* Yes, and fold redundant checksum back. 8) */
        if (head->ip_summed == CHECKSUM_COMPLETE)
-               head->csum = csum_partial(head->nh.raw, head->h.raw-head->nh.raw, head->csum);
+               head->csum = csum_partial(skb_network_header(head),
+                                         skb_network_header_len(head),
+                                         head->csum);
 
        rcu_read_lock();
        IP6_INC_STATS_BH(__in6_dev_get(dev), IPSTATS_MIB_REASMOKS);
@@ -699,33 +708,34 @@ static int ipv6_frag_rcv(struct sk_buff **skbp)
        struct net_device *dev = skb->dev;
        struct frag_hdr *fhdr;
        struct frag_queue *fq;
-       struct ipv6hdr *hdr;
-
-       hdr = skb->nh.ipv6h;
+       struct ipv6hdr *hdr = ipv6_hdr(skb);
 
        IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_REASMREQDS);
 
        /* Jumbo payload inhibits frag. header */
        if (hdr->payload_len==0) {
                IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_INHDRERRORS);
-               icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, skb->h.raw-skb->nh.raw);
+               icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
+                                 skb_network_header_len(skb));
                return -1;
        }
-       if (!pskb_may_pull(skb, (skb->h.raw-skb->data)+sizeof(struct frag_hdr))) {
+       if (!pskb_may_pull(skb, (skb_transport_offset(skb) +
+                                sizeof(struct frag_hdr)))) {
                IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_INHDRERRORS);
-               icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, skb->h.raw-skb->nh.raw);
+               icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
+                                 skb_network_header_len(skb));
                return -1;
        }
 
-       hdr = skb->nh.ipv6h;
-       fhdr = (struct frag_hdr *)skb->h.raw;
+       hdr = ipv6_hdr(skb);
+       fhdr = (struct frag_hdr *)skb_transport_header(skb);
 
        if (!(fhdr->frag_off & htons(0xFFF9))) {
                /* It is not a fragmented frame */
-               skb->h.raw += sizeof(struct frag_hdr);
+               skb->transport_header += sizeof(struct frag_hdr);
                IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_REASMOKS);
 
-               IP6CB(skb)->nhoff = (u8*)fhdr - skb->nh.raw;
+               IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb);
                return 1;
        }
 
index aebb4e2..b46ad53 100644 (file)
@@ -575,6 +575,8 @@ struct rt6_info *rt6_lookup(struct in6_addr *daddr, struct in6_addr *saddr,
        return NULL;
 }
 
+EXPORT_SYMBOL(rt6_lookup);
+
 /* ip6_ins_rt is called with FREE table->tb6_lock.
    It takes new route entry, the addition fails by any reason the
    route is freed. In any case, if caller does not hold it, it may
@@ -724,7 +726,7 @@ out2:
 
 void ip6_route_input(struct sk_buff *skb)
 {
-       struct ipv6hdr *iph = skb->nh.ipv6h;
+       struct ipv6hdr *iph = ipv6_hdr(skb);
        int flags = RT6_LOOKUP_F_HAS_SADDR;
        struct flowi fl = {
                .iif = skb->dev->ifindex,
@@ -829,6 +831,7 @@ struct dst_entry * ip6_route_output(struct sock *sk, struct flowi *fl)
        return fib6_rule_lookup(fl, flags, ip6_pol_route_output);
 }
 
+EXPORT_SYMBOL(ip6_route_output);
 
 /*
  *     Destination cache support functions
@@ -1757,7 +1760,7 @@ int ipv6_route_ioctl(unsigned int cmd, void __user *arg)
                rtnl_unlock();
 
                return err;
-       };
+       }
 
        return -EINVAL;
 }
@@ -1772,7 +1775,7 @@ static inline int ip6_pkt_drop(struct sk_buff *skb, int code,
        int type;
        switch (ipstats_mib_noroutes) {
        case IPSTATS_MIB_INNOROUTES:
-               type = ipv6_addr_type(&skb->nh.ipv6h->daddr);
+               type = ipv6_addr_type(&ipv6_hdr(skb)->daddr);
                if (type == IPV6_ADDR_ANY || type == IPV6_ADDR_RESERVED) {
                        IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_INADDRERRORS);
                        break;
@@ -2012,7 +2015,7 @@ errout:
        return err;
 }
 
-int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
+static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
 {
        struct fib6_config cfg;
        int err;
@@ -2024,7 +2027,7 @@ int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
        return ip6_route_del(&cfg);
 }
 
-int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
+static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
 {
        struct fib6_config cfg;
        int err;
@@ -2161,7 +2164,7 @@ int rt6_dump_route(struct rt6_info *rt, void *p_arg)
                     prefix, NLM_F_MULTI);
 }
 
-int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg)
+static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg)
 {
        struct nlattr *tb[RTA_MAX+1];
        struct rt6_info *rt;
@@ -2215,7 +2218,7 @@ int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg)
        /* Reserve room for dummy headers, this skb can pass
           through good chunk of routing engine.
         */
-       skb->mac.raw = skb->data;
+       skb_reset_mac_header(skb);
        skb_reserve(skb, MAX_HEADER + sizeof(struct ipv6hdr));
 
        rt = (struct rt6_info*) ip6_route_output(NULL, &fl);
@@ -2486,8 +2489,9 @@ ctl_table ipv6_route_table[] = {
 
 void __init ip6_route_init(void)
 {
+#ifdef         CONFIG_PROC_FS
        struct proc_dir_entry *p;
-
+#endif
        ip6_dst_ops.kmem_cachep =
                kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0,
                                  SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
@@ -2505,6 +2509,10 @@ void __init ip6_route_init(void)
 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
        fib6_rules_init();
 #endif
+
+       __rtnl_register(PF_INET6, RTM_NEWROUTE, inet6_rtm_newroute, NULL);
+       __rtnl_register(PF_INET6, RTM_DELROUTE, inet6_rtm_delroute, NULL);
+       __rtnl_register(PF_INET6, RTM_GETROUTE, inet6_rtm_getroute, NULL);
 }
 
 void ip6_route_cleanup(void)
index 08d6ed3..1efa95a 100644 (file)
@@ -99,10 +99,10 @@ static struct ip_tunnel * ipip6_tunnel_lookup(__be32 remote, __be32 local)
        return NULL;
 }
 
-static struct ip_tunnel ** ipip6_bucket(struct ip_tunnel *t)
+static struct ip_tunnel **__ipip6_bucket(struct ip_tunnel_parm *parms)
 {
-       __be32 remote = t->parms.iph.daddr;
-       __be32 local = t->parms.iph.saddr;
+       __be32 remote = parms->iph.daddr;
+       __be32 local = parms->iph.saddr;
        unsigned h = 0;
        int prio = 0;
 
@@ -117,6 +117,11 @@ static struct ip_tunnel ** ipip6_bucket(struct ip_tunnel *t)
        return &tunnels[prio][h];
 }
 
+static inline struct ip_tunnel **ipip6_bucket(struct ip_tunnel *t)
+{
+       return __ipip6_bucket(&t->parms);
+}
+
 static void ipip6_tunnel_unlink(struct ip_tunnel *t)
 {
        struct ip_tunnel **tp;
@@ -147,19 +152,9 @@ static struct ip_tunnel * ipip6_tunnel_locate(struct ip_tunnel_parm *parms, int
        __be32 local = parms->iph.saddr;
        struct ip_tunnel *t, **tp, *nt;
        struct net_device *dev;
-       unsigned h = 0;
-       int prio = 0;
        char name[IFNAMSIZ];
 
-       if (remote) {
-               prio |= 2;
-               h ^= HASH(remote);
-       }
-       if (local) {
-               prio |= 1;
-               h ^= HASH(local);
-       }
-       for (tp = &tunnels[prio][h]; (t = *tp) != NULL; tp = &t->next) {
+       for (tp = __ipip6_bucket(parms); (t = *tp) != NULL; tp = &t->next) {
                if (local == t->parms.iph.saddr && remote == t->parms.iph.daddr)
                        return t;
        }
@@ -224,8 +219,8 @@ static int ipip6_err(struct sk_buff *skb, u32 info)
    ICMP in the real Internet is absolutely infeasible.
  */
        struct iphdr *iph = (struct iphdr*)skb->data;
-       int type = skb->h.icmph->type;
-       int code = skb->h.icmph->code;
+       const int type = icmp_hdr(skb)->type;
+       const int code = icmp_hdr(skb)->code;
        struct ip_tunnel *t;
        int err;
 
@@ -280,8 +275,8 @@ out:
        struct iphdr *iph = (struct iphdr*)dp;
        int hlen = iph->ihl<<2;
        struct ipv6hdr *iph6;
-       int type = skb->h.icmph->type;
-       int code = skb->h.icmph->code;
+       const int type = icmp_hdr(skb)->type;
+       const int code = icmp_hdr(skb)->code;
        int rel_type = 0;
        int rel_code = 0;
        int rel_info = 0;
@@ -296,14 +291,14 @@ out:
        default:
                return;
        case ICMP_PARAMETERPROB:
-               if (skb->h.icmph->un.gateway < hlen)
+               if (icmp_hdr(skb)->un.gateway < hlen)
                        return;
 
                /* So... This guy found something strange INSIDE encapsulated
                   packet. Well, he is fool, but what can we do ?
                 */
                rel_type = ICMPV6_PARAMPROB;
-               rel_info = skb->h.icmph->un.gateway - hlen;
+               rel_info = icmp_hdr(skb)->un.gateway - hlen;
                break;
 
        case ICMP_DEST_UNREACH:
@@ -340,7 +335,7 @@ out:
        dst_release(skb2->dst);
        skb2->dst = NULL;
        skb_pull(skb2, skb->data - (u8*)iph6);
-       skb2->nh.raw = skb2->data;
+       skb_reset_network_header(skb2);
 
        /* Try to guess incoming interface */
        rt6i = rt6_lookup(&iph6->saddr, NULL, NULL, 0);
@@ -366,7 +361,7 @@ out:
 static inline void ipip6_ecn_decapsulate(struct iphdr *iph, struct sk_buff *skb)
 {
        if (INET_ECN_is_ce(iph->tos))
-               IP6_ECN_set_ce(skb->nh.ipv6h);
+               IP6_ECN_set_ce(ipv6_hdr(skb));
 }
 
 static int ipip6_rcv(struct sk_buff *skb)
@@ -377,13 +372,13 @@ static int ipip6_rcv(struct sk_buff *skb)
        if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
                goto out;
 
-       iph = skb->nh.iph;
+       iph = ip_hdr(skb);
 
        read_lock(&ipip6_lock);
        if ((tunnel = ipip6_tunnel_lookup(iph->saddr, iph->daddr)) != NULL) {
                secpath_reset(skb);
-               skb->mac.raw = skb->nh.raw;
-               skb->nh.raw = skb->data;
+               skb->mac_header = skb->network_header;
+               skb_reset_network_header(skb);
                IPCB(skb)->flags = 0;
                skb->protocol = htons(ETH_P_IPV6);
                skb->pkt_type = PACKET_HOST;
@@ -430,7 +425,7 @@ static int ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
        struct ip_tunnel *tunnel = netdev_priv(dev);
        struct net_device_stats *stats = &tunnel->stat;
        struct iphdr  *tiph = &tunnel->parms.iph;
-       struct ipv6hdr *iph6 = skb->nh.ipv6h;
+       struct ipv6hdr *iph6 = ipv6_hdr(skb);
        u8     tos = tunnel->parms.iph.tos;
        struct rtable *rt;                      /* Route to the other host */
        struct net_device *tdev;                        /* Device to other host */
@@ -468,7 +463,7 @@ static int ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
                addr_type = ipv6_addr_type(addr6);
 
                if (addr_type == IPV6_ADDR_ANY) {
-                       addr6 = &skb->nh.ipv6h->daddr;
+                       addr6 = &ipv6_hdr(skb)->daddr;
                        addr_type = ipv6_addr_type(addr6);
                }
 
@@ -550,11 +545,12 @@ static int ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
                        skb_set_owner_w(new_skb, skb->sk);
                dev_kfree_skb(skb);
                skb = new_skb;
-               iph6 = skb->nh.ipv6h;
+               iph6 = ipv6_hdr(skb);
        }
 
-       skb->h.raw = skb->nh.raw;
-       skb->nh.raw = skb_push(skb, sizeof(struct iphdr));
+       skb->transport_header = skb->network_header;
+       skb_push(skb, sizeof(struct iphdr));
+       skb_reset_network_header(skb);
        memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
        IPCB(skb)->flags = 0;
        dst_release(skb->dst);
@@ -564,7 +560,7 @@ static int ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
         *      Push down and install the IPIP header.
         */
 
-       iph                     =       skb->nh.iph;
+       iph                     =       ip_hdr(skb);
        iph->version            =       4;
        iph->ihl                =       sizeof(struct iphdr)>>2;
        if (mtu > IPV6_MIN_MTU)
index 92f9992..e2f25ea 100644 (file)
@@ -115,10 +115,10 @@ static __inline__ __sum16 tcp_v6_check(struct tcphdr *th, int len,
 
 static __u32 tcp_v6_init_sequence(struct sk_buff *skb)
 {
-       return secure_tcpv6_sequence_number(skb->nh.ipv6h->daddr.s6_addr32,
-                                           skb->nh.ipv6h->saddr.s6_addr32,
-                                           skb->h.th->dest,
-                                           skb->h.th->source);
+       return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
+                                           ipv6_hdr(skb)->saddr.s6_addr32,
+                                           tcp_hdr(skb)->dest,
+                                           tcp_hdr(skb)->source);
 }
 
 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
@@ -486,7 +486,9 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
                        struct sk_buff *pktopts = treq->pktopts;
                        struct inet6_skb_parm *rxopt = IP6CB(pktopts);
                        if (rxopt->srcrt)
-                               opt = ipv6_invert_rthdr(sk, (struct ipv6_rt_hdr*)(pktopts->nh.raw + rxopt->srcrt));
+                               opt = ipv6_invert_rthdr(sk,
+                         (struct ipv6_rt_hdr *)(skb_network_header(pktopts) +
+                                                rxopt->srcrt));
                }
 
                if (opt && opt->srcrt) {
@@ -507,7 +509,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
 
        skb = tcp_make_synack(sk, dst, req);
        if (skb) {
-               struct tcphdr *th = skb->h.th;
+               struct tcphdr *th = tcp_hdr(skb);
 
                th->check = tcp_v6_check(th, skb->len,
                                         &treq->loc_addr, &treq->rmt_addr,
@@ -835,8 +837,8 @@ static int tcp_v6_inbound_md5_hash (struct sock *sk, struct sk_buff *skb)
 {
        __u8 *hash_location = NULL;
        struct tcp_md5sig_key *hash_expected;
-       struct ipv6hdr *ip6h = skb->nh.ipv6h;
-       struct tcphdr *th = skb->h.th;
+       struct ipv6hdr *ip6h = ipv6_hdr(skb);
+       struct tcphdr *th = tcp_hdr(skb);
        int length = (th->doff << 2) - sizeof (*th);
        int genhash;
        u8 *ptr;
@@ -944,10 +946,11 @@ static struct timewait_sock_ops tcp6_timewait_sock_ops = {
 static void tcp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb)
 {
        struct ipv6_pinfo *np = inet6_sk(sk);
-       struct tcphdr *th = skb->h.th;
+       struct tcphdr *th = tcp_hdr(skb);
 
        if (skb->ip_summed == CHECKSUM_PARTIAL) {
                th->check = ~csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP,  0);
+               skb->csum_start = skb_transport_header(skb) - skb->head;
                skb->csum_offset = offsetof(struct tcphdr, check);
        } else {
                th->check = csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP,
@@ -964,12 +967,13 @@ static int tcp_v6_gso_send_check(struct sk_buff *skb)
        if (!pskb_may_pull(skb, sizeof(*th)))
                return -EINVAL;
 
-       ipv6h = skb->nh.ipv6h;
-       th = skb->h.th;
+       ipv6h = ipv6_hdr(skb);
+       th = tcp_hdr(skb);
 
        th->check = 0;
        th->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, skb->len,
                                     IPPROTO_TCP, 0);
+       skb->csum_start = skb_transport_header(skb) - skb->head;
        skb->csum_offset = offsetof(struct tcphdr, check);
        skb->ip_summed = CHECKSUM_PARTIAL;
        return 0;
@@ -977,7 +981,7 @@ static int tcp_v6_gso_send_check(struct sk_buff *skb)
 
 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
 {
-       struct tcphdr *th = skb->h.th, *t1;
+       struct tcphdr *th = tcp_hdr(skb), *t1;
        struct sk_buff *buff;
        struct flowi fl;
        int tot_len = sizeof(*th);
@@ -993,7 +997,7 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
 
 #ifdef CONFIG_TCP_MD5SIG
        if (sk)
-               key = tcp_v6_md5_do_lookup(sk, &skb->nh.ipv6h->daddr);
+               key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr);
        else
                key = NULL;
 
@@ -1037,20 +1041,18 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
                               (TCPOPT_NOP << 16) |
                               (TCPOPT_MD5SIG << 8) |
                               TCPOLEN_MD5SIG);
-               tcp_v6_do_calc_md5_hash((__u8*)&opt[1],
-                                       key,
-                                       &skb->nh.ipv6h->daddr,
-                                       &skb->nh.ipv6h->saddr,
-                                       t1, IPPROTO_TCP,
-                                       tot_len);
+               tcp_v6_do_calc_md5_hash((__u8 *)&opt[1], key,
+                                       &ipv6_hdr(skb)->daddr,
+                                       &ipv6_hdr(skb)->saddr,
+                                       t1, IPPROTO_TCP, tot_len);
        }
 #endif
 
        buff->csum = csum_partial((char *)t1, sizeof(*t1), 0);
 
        memset(&fl, 0, sizeof(fl));
-       ipv6_addr_copy(&fl.fl6_dst, &skb->nh.ipv6h->saddr);
-       ipv6_addr_copy(&fl.fl6_src, &skb->nh.ipv6h->daddr);
+       ipv6_addr_copy(&fl.fl6_dst, &ipv6_hdr(skb)->saddr);
+       ipv6_addr_copy(&fl.fl6_src, &ipv6_hdr(skb)->daddr);
 
        t1->check = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst,
                                    sizeof(*t1), IPPROTO_TCP,
@@ -1079,7 +1081,7 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
 static void tcp_v6_send_ack(struct tcp_timewait_sock *tw,
                            struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts)
 {
-       struct tcphdr *th = skb->h.th, *t1;
+       struct tcphdr *th = tcp_hdr(skb), *t1;
        struct sk_buff *buff;
        struct flowi fl;
        int tot_len = sizeof(struct tcphdr);
@@ -1091,7 +1093,7 @@ static void tcp_v6_send_ack(struct tcp_timewait_sock *tw,
 
 #ifdef CONFIG_TCP_MD5SIG
        if (!tw && skb->sk) {
-               key = tcp_v6_md5_do_lookup(skb->sk, &skb->nh.ipv6h->daddr);
+               key = tcp_v6_md5_do_lookup(skb->sk, &ipv6_hdr(skb)->daddr);
        } else if (tw && tw->tw_md5_keylen) {
                tw_key.key = tw->tw_md5_key;
                tw_key.keylen = tw->tw_md5_keylen;
@@ -1140,20 +1142,18 @@ static void tcp_v6_send_ack(struct tcp_timewait_sock *tw,
        if (key) {
                *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
                                (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
-               tcp_v6_do_calc_md5_hash((__u8 *)topt,
-                                       key,
-                                       &skb->nh.ipv6h->daddr,
-                                       &skb->nh.ipv6h->saddr,
-                                       t1, IPPROTO_TCP,
-                                       tot_len);
+               tcp_v6_do_calc_md5_hash((__u8 *)topt, key,
+                                       &ipv6_hdr(skb)->daddr,
+                                       &ipv6_hdr(skb)->saddr,
+                                       t1, IPPROTO_TCP, tot_len);
        }
 #endif
 
        buff->csum = csum_partial((char *)t1, tot_len, 0);
 
        memset(&fl, 0, sizeof(fl));
-       ipv6_addr_copy(&fl.fl6_dst, &skb->nh.ipv6h->saddr);
-       ipv6_addr_copy(&fl.fl6_src, &skb->nh.ipv6h->daddr);
+       ipv6_addr_copy(&fl.fl6_dst, &ipv6_hdr(skb)->saddr);
+       ipv6_addr_copy(&fl.fl6_src, &ipv6_hdr(skb)->daddr);
 
        t1->check = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst,
                                    tot_len, IPPROTO_TCP,
@@ -1197,18 +1197,18 @@ static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req)
 static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
 {
        struct request_sock *req, **prev;
-       const struct tcphdr *th = skb->h.th;
+       const struct tcphdr *th = tcp_hdr(skb);
        struct sock *nsk;
 
        /* Find possible connection requests. */
        req = inet6_csk_search_req(sk, &prev, th->source,
-                                  &skb->nh.ipv6h->saddr,
-                                  &skb->nh.ipv6h->daddr, inet6_iif(skb));
+                                  &ipv6_hdr(skb)->saddr,
+                                  &ipv6_hdr(skb)->daddr, inet6_iif(skb));
        if (req)
                return tcp_check_req(sk, skb, req, prev);
 
-       nsk = __inet6_lookup_established(&tcp_hashinfo, &skb->nh.ipv6h->saddr,
-                                        th->source, &skb->nh.ipv6h->daddr,
+       nsk = __inet6_lookup_established(&tcp_hashinfo, &ipv6_hdr(skb)->saddr,
+                                        th->source, &ipv6_hdr(skb)->daddr,
                                         ntohs(th->dest), inet6_iif(skb));
 
        if (nsk) {
@@ -1275,9 +1275,9 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
        tcp_openreq_init(req, &tmp_opt, skb);
 
        treq = inet6_rsk(req);
-       ipv6_addr_copy(&treq->rmt_addr, &skb->nh.ipv6h->saddr);
-       ipv6_addr_copy(&treq->loc_addr, &skb->nh.ipv6h->daddr);
-       TCP_ECN_create_request(req, skb->h.th);
+       ipv6_addr_copy(&treq->rmt_addr, &ipv6_hdr(skb)->saddr);
+       ipv6_addr_copy(&treq->loc_addr, &ipv6_hdr(skb)->daddr);
+       TCP_ECN_create_request(req, tcp_hdr(skb));
        treq->pktopts = NULL;
        if (ipv6_opt_accepted(sk, skb) ||
            np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
@@ -1363,7 +1363,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
                newnp->pktoptions  = NULL;
                newnp->opt         = NULL;
                newnp->mcast_oif   = inet6_iif(skb);
-               newnp->mcast_hops  = skb->nh.ipv6h->hop_limit;
+               newnp->mcast_hops  = ipv6_hdr(skb)->hop_limit;
 
                /*
                 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
@@ -1389,7 +1389,9 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
            opt == NULL && treq->pktopts) {
                struct inet6_skb_parm *rxopt = IP6CB(treq->pktopts);
                if (rxopt->srcrt)
-                       opt = ipv6_invert_rthdr(sk, (struct ipv6_rt_hdr *)(treq->pktopts->nh.raw + rxopt->srcrt));
+                       opt = ipv6_invert_rthdr(sk,
+                  (struct ipv6_rt_hdr *)(skb_network_header(treq->pktopts) +
+                                         rxopt->srcrt));
        }
 
        if (dst == NULL) {
@@ -1469,7 +1471,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
        }
        newnp->opt        = NULL;
        newnp->mcast_oif  = inet6_iif(skb);
-       newnp->mcast_hops = skb->nh.ipv6h->hop_limit;
+       newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
 
        /* Clone native IPv6 options from listening socket (if any)
 
@@ -1528,15 +1530,16 @@ out:
 static __sum16 tcp_v6_checksum_init(struct sk_buff *skb)
 {
        if (skb->ip_summed == CHECKSUM_COMPLETE) {
-               if (!tcp_v6_check(skb->h.th,skb->len,&skb->nh.ipv6h->saddr,
-                                 &skb->nh.ipv6h->daddr,skb->csum)) {
+               if (!tcp_v6_check(tcp_hdr(skb), skb->len, &ipv6_hdr(skb)->saddr,
+                                 &ipv6_hdr(skb)->daddr, skb->csum)) {
                        skb->ip_summed = CHECKSUM_UNNECESSARY;
                        return 0;
                }
        }
 
-       skb->csum = ~csum_unfold(tcp_v6_check(skb->h.th,skb->len,&skb->nh.ipv6h->saddr,
-                                 &skb->nh.ipv6h->daddr, 0));
+       skb->csum = ~csum_unfold(tcp_v6_check(tcp_hdr(skb), skb->len,
+                                             &ipv6_hdr(skb)->saddr,
+                                             &ipv6_hdr(skb)->daddr, 0));
 
        if (skb->len <= 76) {
                return __skb_checksum_complete(skb);
@@ -1600,7 +1603,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
 
        if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
                TCP_CHECK_TIMER(sk);
-               if (tcp_rcv_established(sk, skb, skb->h.th, skb->len))
+               if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
                        goto reset;
                TCP_CHECK_TIMER(sk);
                if (opt_skb)
@@ -1608,7 +1611,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
                return 0;
        }
 
-       if (skb->len < (skb->h.th->doff<<2) || tcp_checksum_complete(skb))
+       if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
                goto csum_err;
 
        if (sk->sk_state == TCP_LISTEN) {
@@ -1631,7 +1634,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
        }
 
        TCP_CHECK_TIMER(sk);
-       if (tcp_rcv_state_process(sk, skb, skb->h.th, skb->len))
+       if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
                goto reset;
        TCP_CHECK_TIMER(sk);
        if (opt_skb)
@@ -1664,7 +1667,7 @@ ipv6_pktoptions:
                if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
                        np->mcast_oif = inet6_iif(opt_skb);
                if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
-                       np->mcast_hops = opt_skb->nh.ipv6h->hop_limit;
+                       np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
                if (ipv6_opt_accepted(sk, opt_skb)) {
                        skb_set_owner_r(opt_skb, sk);
                        opt_skb = xchg(&np->pktoptions, opt_skb);
@@ -1697,28 +1700,27 @@ static int tcp_v6_rcv(struct sk_buff **pskb)
        if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
                goto discard_it;
 
-       th = skb->h.th;
+       th = tcp_hdr(skb);
 
        if (th->doff < sizeof(struct tcphdr)/4)
                goto bad_packet;
        if (!pskb_may_pull(skb, th->doff*4))
                goto discard_it;
 
-       if ((skb->ip_summed != CHECKSUM_UNNECESSARY &&
-            tcp_v6_checksum_init(skb)))
+       if (!skb_csum_unnecessary(skb) && tcp_v6_checksum_init(skb))
                goto bad_packet;
 
-       th = skb->h.th;
+       th = tcp_hdr(skb);
        TCP_SKB_CB(skb)->seq = ntohl(th->seq);
        TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
                                    skb->len - th->doff*4);
        TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
        TCP_SKB_CB(skb)->when = 0;
-       TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(skb->nh.ipv6h);
+       TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(ipv6_hdr(skb));
        TCP_SKB_CB(skb)->sacked = 0;
 
-       sk = __inet6_lookup(&tcp_hashinfo, &skb->nh.ipv6h->saddr, th->source,
-                           &skb->nh.ipv6h->daddr, ntohs(th->dest),
+       sk = __inet6_lookup(&tcp_hashinfo, &ipv6_hdr(skb)->saddr, th->source,
+                           &ipv6_hdr(skb)->daddr, ntohs(th->dest),
                            inet6_iif(skb));
 
        if (!sk)
@@ -1798,7 +1800,7 @@ do_time_wait:
                struct sock *sk2;
 
                sk2 = inet6_lookup_listener(&tcp_hashinfo,
-                                           &skb->nh.ipv6h->daddr,
+                                           &ipv6_hdr(skb)->daddr,
                                            ntohs(th->dest), inet6_iif(skb));
                if (sk2 != NULL) {
                        struct inet_timewait_sock *tw = inet_twsk(sk);
@@ -1945,6 +1947,7 @@ static int tcp_v6_destroy_sock(struct sock *sk)
        return inet6_destroy_sock(sk);
 }
 
+#ifdef CONFIG_PROC_FS
 /* Proc filesystem TCPv6 sock list dumping. */
 static void get_openreq6(struct seq_file *seq,
                         struct sock *sk, struct request_sock *req, int i, int uid)
@@ -2061,7 +2064,6 @@ static void get_timewait6_sock(struct seq_file *seq,
                   atomic_read(&tw->tw_refcnt), tw);
 }
 
-#ifdef CONFIG_PROC_FS
 static int tcp6_seq_show(struct seq_file *seq, void *v)
 {
        struct tcp_iter_state *st;
index f590db5..b083c09 100644 (file)
@@ -93,10 +93,10 @@ static struct sock *__udp6_lib_lookup(struct in6_addr *saddr, __be16 sport,
                                        continue;
                                score++;
                        }
-                       if(score == 4) {
+                       if (score == 4) {
                                result = sk;
                                break;
-                       } else if(score > badness) {
+                       } else if (score > badness) {
                                result = sk;
                                badness = score;
                        }
@@ -120,8 +120,9 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
        struct ipv6_pinfo *np = inet6_sk(sk);
        struct inet_sock *inet = inet_sk(sk);
        struct sk_buff *skb;
-       size_t copied;
-       int err, copy_only, is_udplite = IS_UDPLITE(sk);
+       unsigned int ulen, copied;
+       int err;
+       int is_udplite = IS_UDPLITE(sk);
 
        if (addr_len)
                *addr_len=sizeof(struct sockaddr_in6);
@@ -134,24 +135,25 @@ try_again:
        if (!skb)
                goto out;
 
-       copied = skb->len - sizeof(struct udphdr);
-       if (copied > len) {
-               copied = len;
+       ulen = skb->len - sizeof(struct udphdr);
+       copied = len;
+       if (copied > ulen)
+               copied = ulen;
+       else if (copied < ulen)
                msg->msg_flags |= MSG_TRUNC;
-       }
 
        /*
-        *      Decide whether to checksum and/or copy data.
+        * If checksum is needed at all, try to do it while copying the
+        * data.  If the data is truncated, or if we only want a partial
+        * coverage checksum (UDP-Lite), do it before the copy.
         */
-       copy_only = (skb->ip_summed==CHECKSUM_UNNECESSARY);
 
-       if (is_udplite  ||  (!copy_only  &&  msg->msg_flags&MSG_TRUNC)) {
-               if (__udp_lib_checksum_complete(skb))
+       if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) {
+               if (udp_lib_checksum_complete(skb))
                        goto csum_copy_err;
-               copy_only = 1;
        }
 
-       if (copy_only)
+       if (skb_csum_unnecessary(skb))
                err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr),
                                              msg->msg_iov, copied       );
        else {
@@ -170,15 +172,16 @@ try_again:
 
                sin6 = (struct sockaddr_in6 *) msg->msg_name;
                sin6->sin6_family = AF_INET6;
-               sin6->sin6_port = skb->h.uh->source;
+               sin6->sin6_port = udp_hdr(skb)->source;
                sin6->sin6_flowinfo = 0;
                sin6->sin6_scope_id = 0;
 
                if (skb->protocol == htons(ETH_P_IP))
                        ipv6_addr_set(&sin6->sin6_addr, 0, 0,
-                                     htonl(0xffff), skb->nh.iph->saddr);
+                                     htonl(0xffff), ip_hdr(skb)->saddr);
                else {
-                       ipv6_addr_copy(&sin6->sin6_addr, &skb->nh.ipv6h->saddr);
+                       ipv6_addr_copy(&sin6->sin6_addr,
+                                      &ipv6_hdr(skb)->saddr);
                        if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL)
                                sin6->sin6_scope_id = IP6CB(skb)->iif;
                }
@@ -194,7 +197,7 @@ try_again:
 
        err = copied;
        if (flags & MSG_TRUNC)
-               err = skb->len - sizeof(struct udphdr);
+               err = ulen;
 
 out_free:
        skb_free_datagram(sk, skb);
@@ -279,8 +282,10 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
                }
        }
 
-       if (udp_lib_checksum_complete(skb))
-               goto drop;
+       if (sk->sk_filter) {
+               if (udp_lib_checksum_complete(skb))
+                       goto drop;
+       }
 
        if ((rc = sock_queue_rcv_skb(sk,skb)) < 0) {
                /* Note that an ENOMEM error is charged twice */
@@ -325,7 +330,7 @@ static struct sock *udp_v6_mcast_next(struct sock *sk,
                                if (!ipv6_addr_equal(&np->rcv_saddr, loc_addr))
                                        continue;
                        }
-                       if(!inet6_mc_check(s, loc_addr, rmt_addr))
+                       if (!inet6_mc_check(s, loc_addr, rmt_addr))
                                continue;
                        return s;
                }
@@ -341,7 +346,7 @@ static int __udp6_lib_mcast_deliver(struct sk_buff *skb, struct in6_addr *saddr,
                           struct in6_addr *daddr, struct hlist_head udptable[])
 {
        struct sock *sk, *sk2;
-       const struct udphdr *uh = skb->h.uh;
+       const struct udphdr *uh = udp_hdr(skb);
        int dif;
 
        read_lock(&udp_hash_lock);
@@ -366,9 +371,20 @@ out:
        return 0;
 }
 
-static inline int udp6_csum_init(struct sk_buff *skb, struct udphdr *uh)
-
+static inline int udp6_csum_init(struct sk_buff *skb, struct udphdr *uh,
+                                int proto)
 {
+       int err;
+
+       UDP_SKB_CB(skb)->partial_cov = 0;
+       UDP_SKB_CB(skb)->cscov = skb->len;
+
+       if (proto == IPPROTO_UDPLITE) {
+               err = udplite_checksum_init(skb, uh);
+               if (err)
+                       return err;
+       }
+
        if (uh->check == 0) {
                /* RFC 2460 section 8.1 says that we SHOULD log
                   this error. Well, it is reasonable.
@@ -377,21 +393,20 @@ static inline int udp6_csum_init(struct sk_buff *skb, struct udphdr *uh)
                return 1;
        }
        if (skb->ip_summed == CHECKSUM_COMPLETE &&
-           !csum_ipv6_magic(&skb->nh.ipv6h->saddr, &skb->nh.ipv6h->daddr,
-                            skb->len, IPPROTO_UDP, skb->csum             ))
+           !csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
+                            skb->len, proto, skb->csum))
                skb->ip_summed = CHECKSUM_UNNECESSARY;
 
-       if (skb->ip_summed != CHECKSUM_UNNECESSARY)
-               skb->csum = ~csum_unfold(csum_ipv6_magic(&skb->nh.ipv6h->saddr,
-                                                        &skb->nh.ipv6h->daddr,
-                                                        skb->len, IPPROTO_UDP,
-                                                        0));
+       if (!skb_csum_unnecessary(skb))
+               skb->csum = ~csum_unfold(csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+                                                        &ipv6_hdr(skb)->daddr,
+                                                        skb->len, proto, 0));
 
-       return (UDP_SKB_CB(skb)->partial_cov = 0);
+       return 0;
 }
 
 int __udp6_lib_rcv(struct sk_buff **pskb, struct hlist_head udptable[],
-                  int is_udplite)
+                  int proto)
 {
        struct sk_buff *skb = *pskb;
        struct sock *sk;
@@ -403,15 +418,16 @@ int __udp6_lib_rcv(struct sk_buff **pskb, struct hlist_head udptable[],
        if (!pskb_may_pull(skb, sizeof(struct udphdr)))
                goto short_packet;
 
-       saddr = &skb->nh.ipv6h->saddr;
-       daddr = &skb->nh.ipv6h->daddr;
-       uh = skb->h.uh;
+       saddr = &ipv6_hdr(skb)->saddr;
+       daddr = &ipv6_hdr(skb)->daddr;
+       uh = udp_hdr(skb);
 
        ulen = ntohs(uh->len);
        if (ulen > skb->len)
                goto short_packet;
 
-       if(! is_udplite ) {             /* UDP validates ulen. */
+       if (proto == IPPROTO_UDP) {
+               /* UDP validates ulen. */
 
                /* Check for jumbo payload */
                if (ulen == 0)
@@ -423,19 +439,15 @@ int __udp6_lib_rcv(struct sk_buff **pskb, struct hlist_head udptable[],
                if (ulen < skb->len) {
                        if (pskb_trim_rcsum(skb, ulen))
                                goto short_packet;
-                       saddr = &skb->nh.ipv6h->saddr;
-                       daddr = &skb->nh.ipv6h->daddr;
-                       uh = skb->h.uh;
+                       saddr = &ipv6_hdr(skb)->saddr;
+                       daddr = &ipv6_hdr(skb)->daddr;
+                       uh = udp_hdr(skb);
                }
-
-               if (udp6_csum_init(skb, uh))
-                       goto discard;
-
-       } else  {                       /* UDP-Lite validates cscov. */
-               if (udplite6_csum_init(skb, uh))
-                       goto discard;
        }
 
+       if (udp6_csum_init(skb, uh, proto))
+               goto discard;
+
        /*
         *      Multicast receive code
         */
@@ -457,33 +469,34 @@ int __udp6_lib_rcv(struct sk_buff **pskb, struct hlist_head udptable[],
 
                if (udp_lib_checksum_complete(skb))
                        goto discard;
-               UDP6_INC_STATS_BH(UDP_MIB_NOPORTS, is_udplite);
+               UDP6_INC_STATS_BH(UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
 
                icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, dev);
 
                kfree_skb(skb);
-               return(0);
+               return 0;
        }
 
        /* deliver */
 
        udpv6_queue_rcv_skb(sk, skb);
        sock_put(sk);
-       return(0);
+       return 0;
 
 short_packet:
        LIMIT_NETDEBUG(KERN_DEBUG "UDP%sv6: short packet: %d/%u\n",
-                      is_udplite? "-Lite" : "",  ulen, skb->len);
+                      proto == IPPROTO_UDPLITE ? "-Lite" : "",
+                      ulen, skb->len);
 
 discard:
-       UDP6_INC_STATS_BH(UDP_MIB_INERRORS, is_udplite);
+       UDP6_INC_STATS_BH(UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
        kfree_skb(skb);
-       return(0);
+       return 0;
 }
 
 static __inline__ int udpv6_rcv(struct sk_buff **pskb)
 {
-       return __udp6_lib_rcv(pskb, udp_hash, 0);
+       return __udp6_lib_rcv(pskb, udp_hash, IPPROTO_UDP);
 }
 
 /*
@@ -521,7 +534,7 @@ static int udp_v6_push_pending_frames(struct sock *sk)
        /*
         * Create a UDP header
         */
-       uh = skb->h.uh;
+       uh = udp_hdr(skb);
        uh->source = fl->fl_ip_sport;
        uh->dest = fl->fl_ip_dport;
        uh->len = htons(up->len);
index 629f971..f54016a 100644 (file)
@@ -19,7 +19,7 @@ DEFINE_SNMP_STAT(struct udp_mib, udplite_stats_in6) __read_mostly;
 
 static int udplitev6_rcv(struct sk_buff **pskb)
 {
-       return __udp6_lib_rcv(pskb, udplite_hash, 1);
+       return __udp6_lib_rcv(pskb, udplite_hash, IPPROTO_UDPLITE);
 }
 
 static void udplitev6_err(struct sk_buff *skb,
index 31f651f..d7ed8aa 100644 (file)
@@ -28,14 +28,14 @@ int xfrm6_rcv_spi(struct sk_buff *skb, __be32 spi)
        unsigned int nhoff;
 
        nhoff = IP6CB(skb)->nhoff;
-       nexthdr = skb->nh.raw[nhoff];
+       nexthdr = skb_network_header(skb)[nhoff];
 
        seq = 0;
        if (!spi && (err = xfrm_parse_spi(skb, nexthdr, &spi, &seq)) != 0)
                goto drop;
 
        do {
-               struct ipv6hdr *iph = skb->nh.ipv6h;
+               struct ipv6hdr *iph = ipv6_hdr(skb);
 
                if (xfrm_nr == XFRM_MAX_DEPTH)
                        goto drop;
@@ -58,7 +58,7 @@ int xfrm6_rcv_spi(struct sk_buff *skb, __be32 spi)
                if (nexthdr <= 0)
                        goto drop_unlock;
 
-               skb->nh.raw[nhoff] = nexthdr;
+               skb_network_header(skb)[nhoff] = nexthdr;
 
                if (x->props.replay_window)
                        xfrm_replay_advance(x, seq);
@@ -112,8 +112,8 @@ int xfrm6_rcv_spi(struct sk_buff *skb, __be32 spi)
                return -1;
        } else {
 #ifdef CONFIG_NETFILTER
-               skb->nh.ipv6h->payload_len = htons(skb->len);
-               __skb_push(skb, skb->data - skb->nh.raw);
+               ipv6_hdr(skb)->payload_len = htons(skb->len);
+               __skb_push(skb, skb->data - skb_network_header(skb));
 
                NF_HOOK(PF_INET6, NF_IP6_PRE_ROUTING, skb, skb->dev, NULL,
                        ip6_rcv_finish);
@@ -140,19 +140,19 @@ int xfrm6_rcv(struct sk_buff **pskb)
        return xfrm6_rcv_spi(*pskb, 0);
 }
 
+EXPORT_SYMBOL(xfrm6_rcv);
+
 int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr,
                     xfrm_address_t *saddr, u8 proto)
 {
        struct xfrm_state *x = NULL;
        int wildcard = 0;
-       struct in6_addr any;
        xfrm_address_t *xany;
        struct xfrm_state *xfrm_vec_one = NULL;
        int nh = 0;
        int i = 0;
 
-       ipv6_addr_set(&any, 0, 0, 0, 0);
-       xany = (xfrm_address_t *)&any;
+       xany = (xfrm_address_t *)&in6addr_any;
 
        for (i = 0; i < 3; i++) {
                xfrm_address_t *dst, *src;
@@ -247,3 +247,5 @@ drop:
                xfrm_state_put(xfrm_vec_one);
        return -1;
 }
+
+EXPORT_SYMBOL(xfrm6_input_addr);
index edcfffa..2e61d6d 100644 (file)
@@ -38,17 +38,18 @@ static int xfrm6_beet_output(struct xfrm_state *x, struct sk_buff *skb)
        int hdr_len;
 
        skb_push(skb, x->props.header_len);
-       iph = skb->nh.ipv6h;
+       iph = ipv6_hdr(skb);
 
        hdr_len = ip6_find_1stfragopt(skb, &prevhdr);
-       skb->nh.raw = prevhdr - x->props.header_len;
-       skb->h.raw = skb->data + hdr_len;
+       skb_set_network_header(skb,
+                              (prevhdr - x->props.header_len) - skb->data);
+       skb_set_transport_header(skb, hdr_len);
        memmove(skb->data, iph, hdr_len);
 
-       skb->nh.raw = skb->data;
-       top_iph = skb->nh.ipv6h;
-       skb->nh.raw = &top_iph->nexthdr;
-       skb->h.ipv6h = top_iph + 1;
+       skb_reset_network_header(skb);
+       top_iph = ipv6_hdr(skb);
+       skb->transport_header = skb->network_header + sizeof(struct ipv6hdr);
+       skb->network_header += offsetof(struct ipv6hdr, nexthdr);
 
        ipv6_addr_copy(&top_iph->saddr, (struct in6_addr *)&x->props.saddr);
        ipv6_addr_copy(&top_iph->daddr, (struct in6_addr *)&x->id.daddr);
@@ -59,6 +60,7 @@ static int xfrm6_beet_output(struct xfrm_state *x, struct sk_buff *skb)
 static int xfrm6_beet_input(struct xfrm_state *x, struct sk_buff *skb)
 {
        struct ipv6hdr *ip6h;
+       const unsigned char *old_mac;
        int size = sizeof(struct ipv6hdr);
        int err = -EINVAL;
 
@@ -66,13 +68,14 @@ static int xfrm6_beet_input(struct xfrm_state *x, struct sk_buff *skb)
                goto out;
 
        skb_push(skb, size);
-       memmove(skb->data, skb->nh.raw, size);
-       skb->nh.raw = skb->data;
+       memmove(skb->data, skb_network_header(skb), size);
+       skb_reset_network_header(skb);
 
-       skb->mac.raw = memmove(skb->data - skb->mac_len,
-                              skb->mac.raw, skb->mac_len);
+       old_mac = skb_mac_header(skb);
+       skb_set_mac_header(skb, -skb->mac_len);
+       memmove(skb_mac_header(skb), old_mac, skb->mac_len);
 
-       ip6h = skb->nh.ipv6h;
+       ip6h = ipv6_hdr(skb);
        ip6h->payload_len = htons(skb->len - size);
        ipv6_addr_copy(&ip6h->daddr, (struct in6_addr *) &x->sel.daddr.a6);
        ipv6_addr_copy(&ip6h->saddr, (struct in6_addr *) &x->sel.saddr.a6);
index 6031c16..6ad6d7a 100644 (file)
@@ -50,11 +50,12 @@ static int xfrm6_ro_output(struct xfrm_state *x, struct sk_buff *skb)
        int hdr_len;
 
        skb_push(skb, x->props.header_len);
-       iph = skb->nh.ipv6h;
+       iph = ipv6_hdr(skb);
 
        hdr_len = x->type->hdr_offset(x, skb, &prevhdr);
-       skb->nh.raw = prevhdr - x->props.header_len;
-       skb->h.raw = skb->data + hdr_len;
+       skb_set_network_header(skb,
+                              (prevhdr - x->props.header_len) - skb->data);
+       skb_set_transport_header(skb, hdr_len);
        memmove(skb->data, iph, hdr_len);
        return 0;
 }
index 3a4b39b..c026bfe 100644 (file)
@@ -32,11 +32,12 @@ static int xfrm6_transport_output(struct xfrm_state *x, struct sk_buff *skb)
        int hdr_len;
 
        skb_push(skb, x->props.header_len);
-       iph = skb->nh.ipv6h;
+       iph = ipv6_hdr(skb);
 
        hdr_len = x->type->hdr_offset(x, skb, &prevhdr);
-       skb->nh.raw = prevhdr - x->props.header_len;
-       skb->h.raw = skb->data + hdr_len;
+       skb_set_network_header(skb,
+                              (prevhdr - x->props.header_len) - skb->data);
+       skb_set_transport_header(skb, hdr_len);
        memmove(skb->data, iph, hdr_len);
        return 0;
 }
@@ -51,13 +52,16 @@ static int xfrm6_transport_output(struct xfrm_state *x, struct sk_buff *skb)
  */
 static int xfrm6_transport_input(struct xfrm_state *x, struct sk_buff *skb)
 {
-       int ihl = skb->data - skb->h.raw;
+       int ihl = skb->data - skb_transport_header(skb);
 
-       if (skb->h.raw != skb->nh.raw)
-               skb->nh.raw = memmove(skb->h.raw, skb->nh.raw, ihl);
-       skb->nh.ipv6h->payload_len = htons(skb->len + ihl -
+       if (skb->transport_header != skb->network_header) {
+               memmove(skb_transport_header(skb),
+                       skb_network_header(skb), ihl);
+               skb->network_header = skb->transport_header;
+       }
+       ipv6_hdr(skb)->payload_len = htons(skb->len + ihl -
                                           sizeof(struct ipv6hdr));
-       skb->h.raw = skb->data;
+       skb_reset_transport_header(skb);
        return 0;
 }
 
index 0bc866c..a6c0cdf 100644 (file)
@@ -18,8 +18,8 @@
 
 static inline void ipip6_ecn_decapsulate(struct sk_buff *skb)
 {
-       struct ipv6hdr *outer_iph = skb->nh.ipv6h;
-       struct ipv6hdr *inner_iph = skb->h.ipv6h;
+       struct ipv6hdr *outer_iph = ipv6_hdr(skb);
+       struct ipv6hdr *inner_iph = ipipv6_hdr(skb);
 
        if (INET_ECN_is_ce(ipv6_get_dsfield(outer_iph)))
                IP6_ECN_set_ce(inner_iph);
@@ -27,8 +27,8 @@ static inline void ipip6_ecn_decapsulate(struct sk_buff *skb)
 
 static inline void ip6ip_ecn_decapsulate(struct sk_buff *skb)
 {
-       if (INET_ECN_is_ce(ipv6_get_dsfield(skb->nh.ipv6h)))
-                       IP_ECN_set_ce(skb->h.ipiph);
+       if (INET_ECN_is_ce(ipv6_get_dsfield(ipv6_hdr(skb))))
+                       IP_ECN_set_ce(ipip_hdr(skb));
 }
 
 /* Add encapsulation header.
@@ -51,12 +51,12 @@ static int xfrm6_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
        int dsfield;
 
        skb_push(skb, x->props.header_len);
-       iph = skb->nh.ipv6h;
+       iph = ipv6_hdr(skb);
 
-       skb->nh.raw = skb->data;
-       top_iph = skb->nh.ipv6h;
-       skb->nh.raw = &top_iph->nexthdr;
-       skb->h.ipv6h = top_iph + 1;
+       skb_reset_network_header(skb);
+       top_iph = ipv6_hdr(skb);
+       skb->transport_header = skb->network_header + sizeof(struct ipv6hdr);
+       skb->network_header   += offsetof(struct ipv6hdr, nexthdr);
 
        top_iph->version = 6;
        if (xdst->route->ops->family == AF_INET6) {
@@ -86,9 +86,11 @@ static int xfrm6_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
 static int xfrm6_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
 {
        int err = -EINVAL;
+       const unsigned char *old_mac;
+       const unsigned char *nh = skb_network_header(skb);
 
-       if (skb->nh.raw[IP6CB(skb)->nhoff] != IPPROTO_IPV6
-           && skb->nh.raw[IP6CB(skb)->nhoff] != IPPROTO_IPIP)
+       if (nh[IP6CB(skb)->nhoff] != IPPROTO_IPV6 &&
+           nh[IP6CB(skb)->nhoff] != IPPROTO_IPIP)
                goto out;
        if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
                goto out;
@@ -97,9 +99,10 @@ static int xfrm6_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
            (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
                goto out;
 
-       if (skb->nh.raw[IP6CB(skb)->nhoff] == IPPROTO_IPV6) {
+       nh = skb_network_header(skb);
+       if (nh[IP6CB(skb)->nhoff] == IPPROTO_IPV6) {
                if (x->props.flags & XFRM_STATE_DECAP_DSCP)
-                       ipv6_copy_dscp(skb->nh.ipv6h, skb->h.ipv6h);
+                       ipv6_copy_dscp(ipv6_hdr(skb), ipipv6_hdr(skb));
                if (!(x->props.flags & XFRM_STATE_NOECN))
                        ipip6_ecn_decapsulate(skb);
        } else {
@@ -107,9 +110,10 @@ static int xfrm6_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
                        ip6ip_ecn_decapsulate(skb);
                skb->protocol = htons(ETH_P_IP);
        }
-       skb->mac.raw = memmove(skb->data - skb->mac_len,
-                              skb->mac.raw, skb->mac_len);
-       skb->nh.raw = skb->data;
+       old_mac = skb_mac_header(skb);
+       skb_set_mac_header(skb, -skb->mac_len);
+       memmove(skb_mac_header(skb), old_mac, skb->mac_len);
+       skb_reset_network_header(skb);
        err = 0;
 
 out:
index d6d786b..56364a5 100644 (file)
@@ -23,6 +23,8 @@ int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb,
        return ip6_find_1stfragopt(skb, prevhdr);
 }
 
+EXPORT_SYMBOL(xfrm6_find_1stfragopt);
+
 static int xfrm6_tunnel_check_size(struct sk_buff *skb)
 {
        int mtu, ret = 0;
@@ -76,11 +78,11 @@ static int xfrm6_output_one(struct sk_buff *skb)
                x->curlft.bytes += skb->len;
                x->curlft.packets++;
                if (x->props.mode == XFRM_MODE_ROUTEOPTIMIZATION)
-                       x->lastused = (u64)xtime.tv_sec;
+                       x->lastused = get_seconds();
 
                spin_unlock_bh(&x->lock);
 
-               skb->nh.raw = skb->data;
+               skb_reset_network_header(skb);
 
                if (!(skb->dst = dst_pop(dst))) {
                        err = -EHOSTUNREACH;
index d8a585b..1faa2ea 100644 (file)
@@ -240,7 +240,8 @@ __xfrm6_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int
                if (!afinfo) {
                        dst = *dst_p;
                        goto error;
-               };
+               }
+
                dst_prev->output = afinfo->output;
                xfrm_state_put_afinfo(afinfo);
                /* Sheit... I remember I did this right. Apparently,
@@ -270,17 +271,19 @@ error:
 static inline void
 _decode_session6(struct sk_buff *skb, struct flowi *fl)
 {
-       u16 offset = skb->h.raw - skb->nh.raw;
-       struct ipv6hdr *hdr = skb->nh.ipv6h;
+       u16 offset = skb_network_header_len(skb);
+       struct ipv6hdr *hdr = ipv6_hdr(skb);
        struct ipv6_opt_hdr *exthdr;
-       u8 nexthdr = skb->nh.raw[IP6CB(skb)->nhoff];
+       const unsigned char *nh = skb_network_header(skb);
+       u8 nexthdr = nh[IP6CB(skb)->nhoff];
 
        memset(fl, 0, sizeof(struct flowi));
        ipv6_addr_copy(&fl->fl6_dst, &hdr->daddr);
        ipv6_addr_copy(&fl->fl6_src, &hdr->saddr);
 
-       while (pskb_may_pull(skb, skb->nh.raw + offset + 1 - skb->data)) {
-               exthdr = (struct ipv6_opt_hdr*)(skb->nh.raw + offset);
+       while (pskb_may_pull(skb, nh + offset + 1 - skb->data)) {
+               nh = skb_network_header(skb);
+               exthdr = (struct ipv6_opt_hdr *)(nh + offset);
 
                switch (nexthdr) {
                case NEXTHDR_ROUTING:
@@ -288,7 +291,7 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl)
                case NEXTHDR_DEST:
                        offset += ipv6_optlen(exthdr);
                        nexthdr = exthdr->nexthdr;
-                       exthdr = (struct ipv6_opt_hdr*)(skb->nh.raw + offset);
+                       exthdr = (struct ipv6_opt_hdr *)(nh + offset);
                        break;
 
                case IPPROTO_UDP:
@@ -296,7 +299,7 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl)
                case IPPROTO_TCP:
                case IPPROTO_SCTP:
                case IPPROTO_DCCP:
-                       if (pskb_may_pull(skb, skb->nh.raw + offset + 4 - skb->data)) {
+                       if (pskb_may_pull(skb, nh + offset + 4 - skb->data)) {
                                __be16 *ports = (__be16 *)exthdr;
 
                                fl->fl_ip_sport = ports[0];
@@ -306,7 +309,7 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl)
                        return;
 
                case IPPROTO_ICMPV6:
-                       if (pskb_may_pull(skb, skb->nh.raw + offset + 2 - skb->data)) {
+                       if (pskb_may_pull(skb, nh + offset + 2 - skb->data)) {
                                u8 *icmp = (u8 *)exthdr;
 
                                fl->fl_icmp_type = icmp[0];
@@ -317,7 +320,7 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl)
 
 #ifdef CONFIG_IPV6_MIP6
                case IPPROTO_MH:
-                       if (pskb_may_pull(skb, skb->nh.raw + offset + 3 - skb->data)) {
+                       if (pskb_may_pull(skb, nh + offset + 3 - skb->data)) {
                                struct ip6_mh *mh;
                                mh = (struct ip6_mh *)exthdr;
 
@@ -335,7 +338,7 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl)
                        fl->fl_ipsec_spi = 0;
                        fl->proto = nexthdr;
                        return;
-               };
+               }
        }
 }
 
index 93c4223..538499a 100644 (file)
@@ -257,7 +257,7 @@ static int xfrm6_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
 
 static int xfrm6_tunnel_rcv(struct sk_buff *skb)
 {
-       struct ipv6hdr *iph = skb->nh.ipv6h;
+       struct ipv6hdr *iph = ipv6_hdr(skb);
        __be32 spi;
 
        spi = xfrm6_tunnel_spi_lookup((xfrm_address_t *)&iph->saddr);
index cac35a7..392f8bc 100644 (file)
@@ -576,7 +576,9 @@ static struct sk_buff *ipxitf_adjust_skbuff(struct ipx_interface *intrfc,
        skb2 = alloc_skb(len, GFP_ATOMIC);
        if (skb2) {
                skb_reserve(skb2, out_offset);
-               skb2->nh.raw = skb2->h.raw = skb_put(skb2, skb->len);
+               skb_reset_network_header(skb2);
+               skb_reset_transport_header(skb2);
+               skb_put(skb2, skb->len);
                memcpy(ipx_hdr(skb2), ipx_hdr(skb), skb->len);
                memcpy(skb2->cb, skb->cb, sizeof(skb->cb));
        }
@@ -1807,8 +1809,8 @@ static int ipx_recvmsg(struct kiocb *iocb, struct socket *sock,
                                     copied);
        if (rc)
                goto out_free;
-       if (skb->tstamp.off_sec)
-               skb_get_timestamp(skb, &sk->sk_stamp);
+       if (skb->tstamp.tv64)
+               sk->sk_stamp = skb->tstamp;
 
        msg->msg_namelen = sizeof(*sipx);
 
index 8e1cad9..e16c114 100644 (file)
@@ -203,7 +203,9 @@ int ipxrtr_route_packet(struct sock *sk, struct sockaddr_ipx *usipx,
        skb->sk = sk;
 
        /* Fill in IPX header */
-       skb->h.raw = skb->nh.raw = skb_put(skb, sizeof(struct ipxhdr));
+       skb_reset_network_header(skb);
+       skb_reset_transport_header(skb);
+       skb_put(skb, sizeof(struct ipxhdr));
        ipx = ipx_hdr(skb);
        ipx->ipx_pktsize = htons(len + sizeof(struct ipxhdr));
        IPX_SKB_CB(skb)->ipx_tctrl = 0;
index eabd683..06c97c6 100644 (file)
@@ -89,7 +89,6 @@ static int irda_data_indication(void *instance, void *sap, struct sk_buff *skb)
 
        self = instance;
        sk = instance;
-       IRDA_ASSERT(sk != NULL, return -1;);
 
        err = sock_queue_rcv_skb(sk, skb);
        if (err) {
@@ -131,15 +130,12 @@ static void irda_disconnect_indication(void *instance, void *sap,
        }
 
        /* Prevent race conditions with irda_release() and irda_shutdown() */
+       bh_lock_sock(sk);
        if (!sock_flag(sk, SOCK_DEAD) && sk->sk_state != TCP_CLOSE) {
-               lock_sock(sk);
                sk->sk_state     = TCP_CLOSE;
-               sk->sk_err       = ECONNRESET;
                sk->sk_shutdown |= SEND_SHUTDOWN;
 
                sk->sk_state_change(sk);
-               sock_orphan(sk);
-               release_sock(sk);
 
                /* Close our TSAP.
                 * If we leave it open, IrLMP put it back into the list of
@@ -159,6 +155,7 @@ static void irda_disconnect_indication(void *instance, void *sap,
                        self->tsap = NULL;
                }
        }
+       bh_unlock_sock(sk);
 
        /* Note : once we are there, there is not much you want to do
         * with the socket anymore, apart from closing it.
@@ -221,7 +218,7 @@ static void irda_connect_confirm(void *instance, void *sap,
                break;
        default:
                self->max_data_size = irttp_get_max_seg_size(self->tsap);
-       };
+       }
 
        IRDA_DEBUG(2, "%s(), max_data_size=%d\n", __FUNCTION__,
                   self->max_data_size);
@@ -284,7 +281,7 @@ static void irda_connect_indication(void *instance, void *sap,
                break;
        default:
                self->max_data_size = irttp_get_max_seg_size(self->tsap);
-       };
+       }
 
        IRDA_DEBUG(2, "%s(), max_data_size=%d\n", __FUNCTION__,
                   self->max_data_size);
@@ -307,8 +304,6 @@ static void irda_connect_response(struct irda_sock *self)
 
        IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
 
-       IRDA_ASSERT(self != NULL, return;);
-
        skb = alloc_skb(TTP_MAX_HEADER + TTP_SAR_HEADER,
                        GFP_ATOMIC);
        if (skb == NULL) {
@@ -338,7 +333,7 @@ static void irda_flow_indication(void *instance, void *sap, LOCAL_FLOW flow)
 
        self = instance;
        sk = instance;
-       IRDA_ASSERT(sk != NULL, return;);
+       BUG_ON(sk == NULL);
 
        switch (flow) {
        case FLOW_STOP:
@@ -450,7 +445,7 @@ static void irda_discovery_timeout(u_long priv)
        IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
 
        self = (struct irda_sock *) priv;
-       IRDA_ASSERT(self != NULL, return;);
+       BUG_ON(self == NULL);
 
        /* Nothing for the caller */
        self->cachelog = NULL;
@@ -547,8 +542,6 @@ static int irda_find_lsap_sel(struct irda_sock *self, char *name)
 {
        IRDA_DEBUG(2, "%s(%p, %s)\n", __FUNCTION__, self, name);
 
-       IRDA_ASSERT(self != NULL, return -1;);
-
        if (self->iriap) {
                IRDA_WARNING("%s(): busy with a previous query\n",
                             __FUNCTION__);
@@ -636,8 +629,6 @@ static int irda_discover_daddr_and_lsap_sel(struct irda_sock *self, char *name)
 
        IRDA_DEBUG(2, "%s(), name=%s\n", __FUNCTION__, name);
 
-       IRDA_ASSERT(self != NULL, return -1;);
-
        /* Ask lmp for the current discovery log
         * Note : we have to use irlmp_get_discoveries(), as opposed
         * to play with the cachelog directly, because while we are
@@ -785,8 +776,6 @@ static int irda_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
        struct irda_sock *self = irda_sk(sk);
        int err;
 
-       IRDA_ASSERT(self != NULL, return -1;);
-
        IRDA_DEBUG(2, "%s(%p)\n", __FUNCTION__, self);
 
        if (addr_len != sizeof(struct sockaddr_irda))
@@ -842,8 +831,6 @@ static int irda_accept(struct socket *sock, struct socket *newsock, int flags)
 
        IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
 
-       IRDA_ASSERT(self != NULL, return -1;);
-
        err = irda_create(newsock, sk->sk_protocol);
        if (err)
                return err;
@@ -874,44 +861,28 @@ static int irda_accept(struct socket *sock, struct socket *newsock, int flags)
         * calling us, the data is waiting for us ;-)
         * Jean II
         */
-       skb = skb_dequeue(&sk->sk_receive_queue);
-       if (skb == NULL) {
-               int ret = 0;
-               DECLARE_WAITQUEUE(waitq, current);
+       while (1) {
+               skb = skb_dequeue(&sk->sk_receive_queue);
+               if (skb)
+                       break;
 
                /* Non blocking operation */
                if (flags & O_NONBLOCK)
                        return -EWOULDBLOCK;
 
-               /* The following code is a cut'n'paste of the
-                * wait_event_interruptible() macro.
-                * We don't us the macro because the condition has
-                * side effects : we want to make sure that only one
-                * skb get dequeued - Jean II */
-               add_wait_queue(sk->sk_sleep, &waitq);
-               for (;;) {
-                       set_current_state(TASK_INTERRUPTIBLE);
-                       skb = skb_dequeue(&sk->sk_receive_queue);
-                       if (skb != NULL)
-                               break;
-                       if (!signal_pending(current)) {
-                               schedule();
-                               continue;
-                       }
-                       ret = -ERESTARTSYS;
-                       break;
-               }
-               current->state = TASK_RUNNING;
-               remove_wait_queue(sk->sk_sleep, &waitq);
-               if(ret)
-                       return -ERESTARTSYS;
+               err = wait_event_interruptible(*(sk->sk_sleep),
+                                       skb_peek(&sk->sk_receive_queue));
+               if (err)
+                       return err;
        }
 
        newsk = newsock->sk;
+       if (newsk == NULL)
+               return -EIO;
+
        newsk->sk_state = TCP_ESTABLISHED;
 
        new = irda_sk(newsk);
-       IRDA_ASSERT(new != NULL, return -1;);
 
        /* Now attach up the new socket */
        new->tsap = irttp_dup(self->tsap, new);
@@ -1062,7 +1033,8 @@ static int irda_connect(struct socket *sock, struct sockaddr *uaddr,
 
        if (sk->sk_state != TCP_ESTABLISHED) {
                sock->state = SS_UNCONNECTED;
-               return sock_error(sk);  /* Always set at this point */
+               err = sock_error(sk);
+               return err? err : -ECONNRESET;
        }
 
        sock->state = SS_CONNECTED;
@@ -1172,8 +1144,6 @@ static void irda_destroy_socket(struct irda_sock *self)
 {
        IRDA_DEBUG(2, "%s(%p)\n", __FUNCTION__, self);
 
-       IRDA_ASSERT(self != NULL, return;);
-
        /* Unregister with IrLMP */
        irlmp_unregister_client(self->ckey);
        irlmp_unregister_service(self->skey);
@@ -1275,7 +1245,6 @@ static int irda_sendmsg(struct kiocb *iocb, struct socket *sock,
        struct sock *sk = sock->sk;
        struct irda_sock *self;
        struct sk_buff *skb;
-       unsigned char *asmptr;
        int err;
 
        IRDA_DEBUG(4, "%s(), len=%zd\n", __FUNCTION__, len);
@@ -1293,7 +1262,6 @@ static int irda_sendmsg(struct kiocb *iocb, struct socket *sock,
                return -ENOTCONN;
 
        self = irda_sk(sk);
-       IRDA_ASSERT(self != NULL, return -1;);
 
        /* Check if IrTTP is wants us to slow down */
 
@@ -1318,9 +1286,9 @@ static int irda_sendmsg(struct kiocb *iocb, struct socket *sock,
                return -ENOBUFS;
 
        skb_reserve(skb, self->max_header_size + 16);
-
-       asmptr = skb->h.raw = skb_put(skb, len);
-       err = memcpy_fromiovec(asmptr, msg->msg_iov, len);
+       skb_reset_transport_header(skb);
+       skb_put(skb, len);
+       err = memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len);
        if (err) {
                kfree_skb(skb);
                return err;
@@ -1356,16 +1324,16 @@ static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock,
 
        IRDA_DEBUG(4, "%s()\n", __FUNCTION__);
 
-       IRDA_ASSERT(self != NULL, return -1;);
-       IRDA_ASSERT(!sock_error(sk), return -1;);
+       if ((err = sock_error(sk)) < 0)
+               return err;
 
        skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
                                flags & MSG_DONTWAIT, &err);
        if (!skb)
                return err;
 
-       skb->h.raw = skb->data;
-       copied     = skb->len;
+       skb_reset_transport_header(skb);
+       copied = skb->len;
 
        if (copied > size) {
                IRDA_DEBUG(2, "%s(), Received truncated frame (%zd < %zd)!\n",
@@ -1404,13 +1372,13 @@ static int irda_recvmsg_stream(struct kiocb *iocb, struct socket *sock,
        struct irda_sock *self = irda_sk(sk);
        int noblock = flags & MSG_DONTWAIT;
        size_t copied = 0;
-       int target = 1;
-       DECLARE_WAITQUEUE(waitq, current);
+       int target, err;
+       long timeo;
 
        IRDA_DEBUG(3, "%s()\n", __FUNCTION__);
 
-       IRDA_ASSERT(self != NULL, return -1;);
-       IRDA_ASSERT(!sock_error(sk), return -1;);
+       if ((err = sock_error(sk)) < 0)
+               return err;
 
        if (sock->flags & __SO_ACCEPTCON)
                return(-EINVAL);
@@ -1418,8 +1386,8 @@ static int irda_recvmsg_stream(struct kiocb *iocb, struct socket *sock,
        if (flags & MSG_OOB)
                return -EOPNOTSUPP;
 
-       if (flags & MSG_WAITALL)
-               target = size;
+       target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
+       timeo = sock_rcvtimeo(sk, noblock);
 
        msg->msg_namelen = 0;
 
@@ -1427,42 +1395,37 @@ static int irda_recvmsg_stream(struct kiocb *iocb, struct socket *sock,
                int chunk;
                struct sk_buff *skb = skb_dequeue(&sk->sk_receive_queue);
 
-               if (skb==NULL) {
+               if (skb == NULL) {
+                       DEFINE_WAIT(wait);
                        int ret = 0;
 
                        if (copied >= target)
                                break;
 
-                       /* The following code is a cut'n'paste of the
-                        * wait_event_interruptible() macro.
-                        * We don't us the macro because the test condition
-                        * is messy. - Jean II */
-                       set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
-                       add_wait_queue(sk->sk_sleep, &waitq);
-                       set_current_state(TASK_INTERRUPTIBLE);
+                       prepare_to_wait_exclusive(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
 
                        /*
                         *      POSIX 1003.1g mandates this order.
                         */
                        ret = sock_error(sk);
                        if (ret)
-                               break;
+                               ;
                        else if (sk->sk_shutdown & RCV_SHUTDOWN)
                                ;
                        else if (noblock)
                                ret = -EAGAIN;
                        else if (signal_pending(current))
-                               ret = -ERESTARTSYS;
+                               ret = sock_intr_errno(timeo);
+                       else if (sk->sk_state != TCP_ESTABLISHED)
+                               ret = -ENOTCONN;
                        else if (skb_peek(&sk->sk_receive_queue) == NULL)
                                /* Wait process until data arrives */
                                schedule();
 
-                       current->state = TASK_RUNNING;
-                       remove_wait_queue(sk->sk_sleep, &waitq);
-                       clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
+                       finish_wait(sk->sk_sleep, &wait);
 
-                       if(ret)
-                               return(ret);
+                       if (ret)
+                               return ret;
                        if (sk->sk_shutdown & RCV_SHUTDOWN)
                                break;
 
@@ -1531,7 +1494,6 @@ static int irda_sendmsg_dgram(struct kiocb *iocb, struct socket *sock,
        struct sock *sk = sock->sk;
        struct irda_sock *self;
        struct sk_buff *skb;
-       unsigned char *asmptr;
        int err;
 
        IRDA_DEBUG(4, "%s(), len=%zd\n", __FUNCTION__, len);
@@ -1548,7 +1510,6 @@ static int irda_sendmsg_dgram(struct kiocb *iocb, struct socket *sock,
                return -ENOTCONN;
 
        self = irda_sk(sk);
-       IRDA_ASSERT(self != NULL, return -1;);
 
        /*
         * Check that we don't send out too big frames. This is an unreliable
@@ -1567,10 +1528,11 @@ static int irda_sendmsg_dgram(struct kiocb *iocb, struct socket *sock,
                return -ENOBUFS;
 
        skb_reserve(skb, self->max_header_size);
+       skb_reset_transport_header(skb);
 
        IRDA_DEBUG(4, "%s(), appending user data\n", __FUNCTION__);
-       asmptr = skb->h.raw = skb_put(skb, len);
-       err = memcpy_fromiovec(asmptr, msg->msg_iov, len);
+       skb_put(skb, len);
+       err = memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len);
        if (err) {
                kfree_skb(skb);
                return err;
@@ -1603,7 +1565,6 @@ static int irda_sendmsg_ultra(struct kiocb *iocb, struct socket *sock,
        __u8 pid = 0;
        int bound = 0;
        struct sk_buff *skb;
-       unsigned char *asmptr;
        int err;
 
        IRDA_DEBUG(4, "%s(), len=%zd\n", __FUNCTION__, len);
@@ -1617,7 +1578,6 @@ static int irda_sendmsg_ultra(struct kiocb *iocb, struct socket *sock,
        }
 
        self = irda_sk(sk);
-       IRDA_ASSERT(self != NULL, return -1;);
 
        /* Check if an address was specified with sendto. Jean II */
        if (msg->msg_name) {
@@ -1663,10 +1623,11 @@ static int irda_sendmsg_ultra(struct kiocb *iocb, struct socket *sock,
                return -ENOBUFS;
 
        skb_reserve(skb, self->max_header_size);
+       skb_reset_transport_header(skb);
 
        IRDA_DEBUG(4, "%s(), appending user data\n", __FUNCTION__);
-       asmptr = skb->h.raw = skb_put(skb, len);
-       err = memcpy_fromiovec(asmptr, msg->msg_iov, len);
+       skb_put(skb, len);
+       err = memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len);
        if (err) {
                kfree_skb(skb);
                return err;
@@ -1690,8 +1651,6 @@ static int irda_shutdown(struct socket *sock, int how)
        struct sock *sk = sock->sk;
        struct irda_sock *self = irda_sk(sk);
 
-       IRDA_ASSERT(self != NULL, return -1;);
-
        IRDA_DEBUG(1, "%s(%p)\n", __FUNCTION__, self);
 
        sk->sk_state       = TCP_CLOSE;
@@ -1864,8 +1823,6 @@ static int irda_setsockopt(struct socket *sock, int level, int optname,
        struct ias_attrib *     ias_attr;       /* Attribute in IAS object */
        int opt;
 
-       IRDA_ASSERT(self != NULL, return -1;);
-
        IRDA_DEBUG(2, "%s(%p)\n", __FUNCTION__, self);
 
        if (level != SOL_IRLMP)
index 01d7c9c..e5e4792 100644 (file)
@@ -133,8 +133,8 @@ int ircomm_param_request(struct ircomm_tty_cb *self, __u8 pi, int flush)
         * Inserting is a little bit tricky since we don't know how much
         * room we will need. But this should hopefully work OK
         */
-       count = irda_param_insert(self, pi, skb->tail, skb_tailroom(skb),
-                                 &ircomm_param_info);
+       count = irda_param_insert(self, pi, skb_tail_pointer(skb),
+                                 skb_tailroom(skb), &ircomm_param_info);
        if (count < 0) {
                IRDA_WARNING("%s(), no room for parameter!\n", __FUNCTION__);
                spin_unlock_irqrestore(&self->spinlock, flags);
index e717801..7b5def1 100644 (file)
@@ -375,7 +375,7 @@ EXPORT_SYMBOL(alloc_irdadev);
 dongle_t *irda_device_dongle_init(struct net_device *dev, int type)
 {
        struct dongle_reg *reg;
-       dongle_t *dongle = NULL;
+       dongle_t *dongle = kzalloc(sizeof(dongle_t), GFP_KERNEL);
 
        might_sleep();
 
@@ -397,19 +397,14 @@ dongle_t *irda_device_dongle_init(struct net_device *dev, int type)
        if (!reg || !try_module_get(reg->owner) ) {
                IRDA_ERROR("IrDA: Unable to find requested dongle type %x\n",
                           type);
-               goto out;
+               kfree(dongle);
+               dongle = NULL;
+       }
+       if (dongle) {
+               /* Bind the registration info to this particular instance */
+               dongle->issue = reg;
+               dongle->dev = dev;
        }
-
-       /* Allocate dongle info for this instance */
-       dongle = kzalloc(sizeof(dongle_t), GFP_KERNEL);
-       if (!dongle)
-               goto out;
-
-       /* Bind the registration info to this particular instance */
-       dongle->issue = reg;
-       dongle->dev = dev;
-
- out:
        spin_unlock(&dongles->hb_spinlock);
        return dongle;
 }
index fcf9d65..ed69773 100644 (file)
@@ -1039,7 +1039,7 @@ static int __irlan_insert_param(struct sk_buff *skb, char *param, int type,
        }
 
        /* Insert at end of sk-buffer */
-       frame = skb->tail;
+       frame = skb_tail_pointer(skb);
 
        /* Make space for data */
        if (skb_tailroom(skb) < (param_len+value_len+3)) {
index 672ab3f..c421521 100644 (file)
@@ -234,8 +234,7 @@ int irlan_eth_receive(void *instance, void *sap, struct sk_buff *skb)
         * might have been previously set by the low level IrDA network
         * device driver
         */
-       skb->dev = self->dev;
-       skb->protocol=eth_type_trans(skb, skb->dev); /* Remove eth header */
+       skb->protocol = eth_type_trans(skb, self->dev); /* Remove eth header */
 
        self->stats.rx_packets++;
        self->stats.rx_bytes += skb->len;
index 7b6433f..0b02073 100644 (file)
@@ -590,7 +590,7 @@ static int irlap_state_query(struct irlap_cb *self, IRLAP_EVENT event,
                if (!self->discovery_log) {
                        IRDA_WARNING("%s: discovery log is gone! "
                                     "maybe the discovery timeout has been set"
-                                    " to short?\n", __FUNCTION__);
+                                    " too short?\n", __FUNCTION__);
                        break;
                }
                hashbin_insert(self->discovery_log,
index 0b04603..3c5a68e 100644 (file)
@@ -93,7 +93,9 @@ void irlap_queue_xmit(struct irlap_cb *self, struct sk_buff *skb)
 {
        /* Some common init stuff */
        skb->dev = self->netdev;
-       skb->h.raw = skb->nh.raw = skb->mac.raw = skb->data;
+       skb_reset_mac_header(skb);
+       skb_reset_network_header(skb);
+       skb_reset_transport_header(skb);
        skb->protocol = htons(ETH_P_IRDA);
        skb->priority = TC_PRIO_BESTEFFORT;
 
@@ -411,7 +413,7 @@ static void irlap_recv_discovery_xid_rsp(struct irlap_cb *self,
        IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
 
        if (!pskb_may_pull(skb, sizeof(struct xid_frame))) {
-               IRDA_ERROR("%s: frame to short!\n", __FUNCTION__);
+               IRDA_ERROR("%s: frame too short!\n", __FUNCTION__);
                return;
        }
 
@@ -482,7 +484,7 @@ static void irlap_recv_discovery_xid_cmd(struct irlap_cb *self,
        char *text;
 
        if (!pskb_may_pull(skb, sizeof(struct xid_frame))) {
-               IRDA_ERROR("%s: frame to short!\n", __FUNCTION__);
+               IRDA_ERROR("%s: frame too short!\n", __FUNCTION__);
                return;
        }
 
@@ -526,7 +528,7 @@ static void irlap_recv_discovery_xid_cmd(struct irlap_cb *self,
                /* Check if things are sane at this point... */
                if((discovery_info == NULL) ||
                   !pskb_may_pull(skb, 3)) {
-                       IRDA_ERROR("%s: discovery frame to short!\n",
+                       IRDA_ERROR("%s: discovery frame too short!\n",
                                   __FUNCTION__);
                        return;
                }
@@ -1171,7 +1173,7 @@ static void irlap_recv_frmr_frame(struct irlap_cb *self, struct sk_buff *skb,
        IRDA_ASSERT(info != NULL, return;);
 
        if (!pskb_may_pull(skb, 4)) {
-               IRDA_ERROR("%s: frame to short!\n", __FUNCTION__);
+               IRDA_ERROR("%s: frame too short!\n", __FUNCTION__);
                return;
        }
 
@@ -1260,7 +1262,7 @@ static void irlap_recv_test_frame(struct irlap_cb *self, struct sk_buff *skb,
        IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
 
        if (!pskb_may_pull(skb, sizeof(*frame))) {
-               IRDA_ERROR("%s: frame to short!\n", __FUNCTION__);
+               IRDA_ERROR("%s: frame too short!\n", __FUNCTION__);
                return;
        }
        frame = (struct test_frame *) skb->data;
@@ -1268,7 +1270,7 @@ static void irlap_recv_test_frame(struct irlap_cb *self, struct sk_buff *skb,
        /* Broadcast frames must carry saddr and daddr fields */
        if (info->caddr == CBROADCAST) {
                if (skb->len < sizeof(struct test_frame)) {
-                       IRDA_DEBUG(0, "%s() test frame to short!\n",
+                       IRDA_DEBUG(0, "%s() test frame too short!\n",
                                   __FUNCTION__);
                        return;
                }
@@ -1334,7 +1336,7 @@ int irlap_driver_rcv(struct sk_buff *skb, struct net_device *dev,
 
        /* Check if frame is large enough for parsing */
        if (!pskb_may_pull(skb, 2)) {
-               IRDA_ERROR("%s: frame to short!\n", __FUNCTION__);
+               IRDA_ERROR("%s: frame too short!\n", __FUNCTION__);
                dev_kfree_skb(skb);
                return -1;
        }
index 9266233..d058b46 100644 (file)
@@ -384,6 +384,9 @@ EXPORT_SYMBOL(hashbin_new);
  *    for deallocating this structure if it's complex. If not the user can
  *    just supply kfree, which should take care of the job.
  */
+#ifdef CONFIG_LOCKDEP
+static int hashbin_lock_depth = 0;
+#endif
 int hashbin_delete( hashbin_t* hashbin, FREE_FUNC free_func)
 {
        irda_queue_t* queue;
@@ -395,7 +398,8 @@ int hashbin_delete( hashbin_t* hashbin, FREE_FUNC free_func)
 
        /* Synchronize */
        if ( hashbin->hb_type & HB_LOCK ) {
-               spin_lock_irqsave(&hashbin->hb_spinlock, flags);
+               spin_lock_irqsave_nested(&hashbin->hb_spinlock, flags,
+                                        hashbin_lock_depth++);
        }
 
        /*
@@ -419,6 +423,9 @@ int hashbin_delete( hashbin_t* hashbin, FREE_FUNC free_func)
        /* Release lock */
        if ( hashbin->hb_type & HB_LOCK) {
                spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
+#ifdef CONFIG_LOCKDEP
+               hashbin_lock_depth--;
+#endif
        }
 
        /*
index da3f2bc..7069e4a 100644 (file)
@@ -256,7 +256,7 @@ static struct sk_buff *irttp_reassemble_skb(struct tsap_cb *self)
         *  Copy all fragments to a new buffer
         */
        while ((frag = skb_dequeue(&self->rx_fragments)) != NULL) {
-               memcpy(skb->data+n, frag->data, frag->len);
+               skb_copy_to_linear_data_offset(skb, n, frag->data, frag->len);
                n += frag->len;
 
                dev_kfree_skb(frag);
@@ -314,8 +314,8 @@ static inline void irttp_fragment_skb(struct tsap_cb *self,
                skb_reserve(frag, self->max_header_size);
 
                /* Copy data from the original skb into this fragment. */
-               memcpy(skb_put(frag, self->max_seg_size), skb->data,
-                      self->max_seg_size);
+               skb_copy_from_linear_data(skb, skb_put(frag, self->max_seg_size),
+                             self->max_seg_size);
 
                /* Insert TTP header, with the more bit set */
                frame = skb_push(frag, TTP_HEADER);
@@ -551,7 +551,7 @@ int irttp_udata_request(struct tsap_cb *self, struct sk_buff *skb)
        }
 
        if (skb->len > self->max_seg_size) {
-               IRDA_DEBUG(1, "%s(), UData is to large for IrLAP!\n",
+               IRDA_DEBUG(1, "%s(), UData is too large for IrLAP!\n",
                           __FUNCTION__);
                goto err;
        }
@@ -598,7 +598,7 @@ int irttp_data_request(struct tsap_cb *self, struct sk_buff *skb)
         *  inside an IrLAP frame
         */
        if ((self->tx_max_sdu_size == 0) && (skb->len > self->max_seg_size)) {
-               IRDA_ERROR("%s: SAR disabled, and data is to large for IrLAP!\n",
+               IRDA_ERROR("%s: SAR disabled, and data is too large for IrLAP!\n",
                           __FUNCTION__);
                ret = -EMSGSIZE;
                goto err;
index 75a72d2..2627dad 100644 (file)
@@ -160,7 +160,7 @@ static int irda_insert_integer(void *self, __u8 *buf, int len, __u8 pi,
        }
        /* Check if buffer is long enough for insertion */
        if (len < (2+p.pl)) {
-               IRDA_WARNING("%s: buffer to short for insertion!\n",
+               IRDA_WARNING("%s: buffer too short for insertion!\n",
                             __FUNCTION__);
                return -1;
        }
@@ -216,7 +216,7 @@ static int irda_extract_integer(void *self, __u8 *buf, int len, __u8 pi,
 
        /* Check if buffer is long enough for parsing */
        if (len < (2+p.pl)) {
-               IRDA_WARNING("%s: buffer to short for parsing! "
+               IRDA_WARNING("%s: buffer too short for parsing! "
                             "Need %d bytes, but len is only %d\n",
                             __FUNCTION__, p.pl, len);
                return -1;
@@ -304,7 +304,7 @@ static int irda_extract_string(void *self, __u8 *buf, int len, __u8 pi,
 
        /* Check if buffer is long enough for parsing */
        if (len < (2+p.pl)) {
-               IRDA_WARNING("%s: buffer to short for parsing! "
+               IRDA_WARNING("%s: buffer too short for parsing! "
                             "Need %d bytes, but len is only %d\n",
                             __FUNCTION__, p.pl, len);
                return -1;
@@ -343,7 +343,7 @@ static int irda_extract_octseq(void *self, __u8 *buf, int len, __u8 pi,
 
        /* Check if buffer is long enough for parsing */
        if (len < (2+p.pl)) {
-               IRDA_WARNING("%s: buffer to short for parsing! "
+               IRDA_WARNING("%s: buffer too short for parsing! "
                             "Need %d bytes, but len is only %d\n",
                             __FUNCTION__, p.pl, len);
                return -1;
index 349012c..aeb18cf 100644 (file)
@@ -469,49 +469,49 @@ int irlap_insert_qos_negotiation_params(struct irlap_cb *self,
        int ret;
 
        /* Insert data rate */
-       ret = irda_param_insert(self, PI_BAUD_RATE, skb->tail,
+       ret = irda_param_insert(self, PI_BAUD_RATE, skb_tail_pointer(skb),
                                skb_tailroom(skb), &irlap_param_info);
        if (ret < 0)
                return ret;
        skb_put(skb, ret);
 
        /* Insert max turnaround time */
-       ret = irda_param_insert(self, PI_MAX_TURN_TIME, skb->tail,
+       ret = irda_param_insert(self, PI_MAX_TURN_TIME, skb_tail_pointer(skb),
                                skb_tailroom(skb), &irlap_param_info);
        if (ret < 0)
                return ret;
        skb_put(skb, ret);
 
        /* Insert data size */
-       ret = irda_param_insert(self, PI_DATA_SIZE, skb->tail,
+       ret = irda_param_insert(self, PI_DATA_SIZE, skb_tail_pointer(skb),
                                skb_tailroom(skb), &irlap_param_info);
        if (ret < 0)
                return ret;
        skb_put(skb, ret);
 
        /* Insert window size */
-       ret = irda_param_insert(self, PI_WINDOW_SIZE, skb->tail,
+       ret = irda_param_insert(self, PI_WINDOW_SIZE, skb_tail_pointer(skb),
                                skb_tailroom(skb), &irlap_param_info);
        if (ret < 0)
                return ret;
        skb_put(skb, ret);
 
        /* Insert additional BOFs */
-       ret = irda_param_insert(self, PI_ADD_BOFS, skb->tail,
+       ret = irda_param_insert(self, PI_ADD_BOFS, skb_tail_pointer(skb),
                                skb_tailroom(skb), &irlap_param_info);
        if (ret < 0)
                return ret;
        skb_put(skb, ret);
 
        /* Insert minimum turnaround time */
-       ret = irda_param_insert(self, PI_MIN_TURN_TIME, skb->tail,
+       ret = irda_param_insert(self, PI_MIN_TURN_TIME, skb_tail_pointer(skb),
                                skb_tailroom(skb), &irlap_param_info);
        if (ret < 0)
                return ret;
        skb_put(skb, ret);
 
        /* Insert link disconnect/threshold time */
-       ret = irda_param_insert(self, PI_LINK_DISC, skb->tail,
+       ret = irda_param_insert(self, PI_LINK_DISC, skb_tail_pointer(skb),
                                skb_tailroom(skb), &irlap_param_info);
        if (ret < 0)
                return ret;
index 5abfb71..a7a7f19 100644 (file)
@@ -239,7 +239,8 @@ async_bump(struct net_device *dev,
 
        if(docopy) {
                /* Copy data without CRC (lenght already checked) */
-               memcpy(newskb->data, rx_buff->data, rx_buff->len - 2);
+               skb_copy_to_linear_data(newskb, rx_buff->data,
+                                       rx_buff->len - 2);
                /* Deliver this skb */
                dataskb = newskb;
        } else {
@@ -256,7 +257,7 @@ async_bump(struct net_device *dev,
 
        /* Feed it to IrLAP layer */
        dataskb->dev = dev;
-       dataskb->mac.raw  = dataskb->data;
+       skb_reset_mac_header(dataskb);
        dataskb->protocol = htons(ETH_P_IRDA);
 
        netif_rx(dataskb);
index acc9421..e84c924 100644 (file)
@@ -181,7 +181,7 @@ static void iucv_sock_close(struct sock *sk)
        default:
                sock_set_flag(sk, SOCK_ZAPPED);
                break;
-       };
+       }
 
        release_sock(sk);
        iucv_sock_kill(sk);
@@ -953,8 +953,8 @@ static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
                        return;
                }
 
-               skb->h.raw = skb->data;
-               skb->nh.raw = skb->data;
+               skb_reset_transport_header(skb);
+               skb_reset_network_header(skb);
                skb->len = msg->length;
        }
 
index 1b10d57..60f2938 100644 (file)
@@ -519,7 +519,6 @@ static void iucv_disable(void)
        kfree(iucv_path_table);
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
 static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
                                     unsigned long action, void *hcpu)
 {
@@ -565,7 +564,6 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
 static struct notifier_block iucv_cpu_notifier = {
        .notifier_call = iucv_cpu_notify,
 };
-#endif
 
 /**
  * iucv_sever_pathid
index cf77930..a994441 100644 (file)
@@ -379,7 +379,7 @@ static int verify_address_len(void *p)
                 */
                return -EINVAL;
                break;
-       };
+       }
 
        return 0;
 }
@@ -2072,7 +2072,7 @@ static int pfkey_xfrm_policy2msg(struct sk_buff *skb, struct xfrm_policy *xp, in
                rq->sadb_x_ipsecrequest_proto = t->id.proto;
                if ((mode = pfkey_mode_from_xfrm(t->mode)) < 0)
                        return -EINVAL;
-               mode = pfkey_mode_from_xfrm(t->mode);
+               rq->sadb_x_ipsecrequest_mode = mode;
                rq->sadb_x_ipsecrequest_level = IPSEC_LEVEL_REQUIRE;
                if (t->reqid)
                        rq->sadb_x_ipsecrequest_level = IPSEC_LEVEL_UNIQUE;
@@ -3667,7 +3667,7 @@ static int pfkey_recvmsg(struct kiocb *kiocb,
                copied = len;
        }
 
-       skb->h.raw = skb->data;
+       skb_reset_transport_header(skb);
        err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
        if (err)
                goto out_free;
index b3f65d1..099ed8f 100644 (file)
@@ -112,7 +112,7 @@ static inline int llc_fixup_skb(struct sk_buff *skb)
        if (unlikely(!pskb_may_pull(skb, llc_len)))
                return 0;
 
-       skb->h.raw += llc_len;
+       skb->transport_header += llc_len;
        skb_pull(skb, llc_len);
        if (skb->protocol == htons(ETH_P_802_2)) {
                __be16 pdulen = eth_hdr(skb)->h_proto;
index f4291f3..754f4fe 100644 (file)
@@ -41,7 +41,8 @@ int llc_mac_hdr_init(struct sk_buff *skb,
                struct net_device *dev = skb->dev;
                struct trh_hdr *trh;
 
-               skb->mac.raw = skb_push(skb, sizeof(*trh));
+               skb_push(skb, sizeof(*trh));
+               skb_reset_mac_header(skb);
                trh = tr_hdr(skb);
                trh->ac = AC;
                trh->fc = LLC_FRAME;
@@ -52,7 +53,7 @@ int llc_mac_hdr_init(struct sk_buff *skb,
                if (da) {
                        memcpy(trh->daddr, da, dev->addr_len);
                        tr_source_route(skb, trh, dev);
-                       skb->mac.raw = skb->data;
+                       skb_reset_mac_header(skb);
                }
                break;
        }
@@ -62,7 +63,8 @@ int llc_mac_hdr_init(struct sk_buff *skb,
                unsigned short len = skb->len;
                struct ethhdr *eth;
 
-               skb->mac.raw = skb_push(skb, sizeof(*eth));
+               skb_push(skb, sizeof(*eth));
+               skb_reset_mac_header(skb);
                eth = eth_hdr(skb);
                eth->h_proto = htons(len);
                memcpy(eth->h_dest, da, ETH_ALEN);
index 2615dc8..2525165 100644 (file)
@@ -36,11 +36,12 @@ struct sk_buff *llc_alloc_frame(struct sock *sk, struct net_device *dev)
        struct sk_buff *skb = alloc_skb(128, GFP_ATOMIC);
 
        if (skb) {
+               skb_reset_mac_header(skb);
                skb_reserve(skb, 50);
-               skb->nh.raw   = skb->h.raw = skb->data;
+               skb_reset_network_header(skb);
+               skb_reset_transport_header(skb);
                skb->protocol = htons(ETH_P_802_2);
                skb->dev      = dev;
-               skb->mac.raw  = skb->head;
                if (sk != NULL)
                        skb_set_owner_w(skb, sk);
        }
index 54698af..c558f32 100644 (file)
@@ -25,6 +25,7 @@ config NETFILTER_NETLINK_LOG
          and is also scheduled to replace the old syslog-based ipt_LOG
          and ip6t_LOG modules.
 
+# Rename this to NF_CONNTRACK in a 2.6.25
 config NF_CONNTRACK_ENABLED
        tristate "Netfilter connection tracking support"
        help
@@ -39,42 +40,9 @@ config NF_CONNTRACK_ENABLED
 
          To compile it as a module, choose M here.  If unsure, say N.
 
-choice
-       prompt "Netfilter connection tracking support"
-       depends on NF_CONNTRACK_ENABLED
-
-config NF_CONNTRACK_SUPPORT
-       bool "Layer 3 Independent Connection tracking"
-       help
-         Layer 3 independent connection tracking is experimental scheme
-         which generalize ip_conntrack to support other layer 3 protocols.
-
-         This is required to do Masquerading or other kinds of Network
-         Address Translation (except for Fast NAT).  It can also be used to
-         enhance packet filtering (see `Connection state match support'
-         below).
-
-config IP_NF_CONNTRACK_SUPPORT
-       bool "Layer 3 Dependent Connection tracking (OBSOLETE)"
-       help
-         The old, Layer 3 dependent ip_conntrack subsystem of netfilter.
-
-         This is required to do Masquerading or other kinds of Network
-         Address Translation (except for Fast NAT).  It can also be used to
-         enhance packet filtering (see `Connection state match support'
-         below).
-
-endchoice
-
 config NF_CONNTRACK
        tristate
-       default m if NF_CONNTRACK_SUPPORT && NF_CONNTRACK_ENABLED=m
-       default y if NF_CONNTRACK_SUPPORT && NF_CONNTRACK_ENABLED=y
-
-config IP_NF_CONNTRACK
-       tristate
-       default m if IP_NF_CONNTRACK_SUPPORT && NF_CONNTRACK_ENABLED=m
-       default y if IP_NF_CONNTRACK_SUPPORT && NF_CONNTRACK_ENABLED=y
+       default NF_CONNTRACK_ENABLED
 
 config NF_CT_ACCT
        bool "Connection tracking flow accounting"
@@ -303,9 +271,8 @@ config NETFILTER_XT_TARGET_CONNMARK
        tristate  '"CONNMARK" target support'
        depends on NETFILTER_XTABLES
        depends on IP_NF_MANGLE || IP6_NF_MANGLE
-       depends on IP_NF_CONNTRACK || NF_CONNTRACK
-       select IP_NF_CONNTRACK_MARK if IP_NF_CONNTRACK
-       select NF_CONNTRACK_MARK if NF_CONNTRACK
+       depends on NF_CONNTRACK
+       select NF_CONNTRACK_MARK
        help
          This option adds a `CONNMARK' target, which allows one to manipulate
          the connection mark value.  Similar to the MARK target, but
@@ -366,7 +333,7 @@ config NETFILTER_XT_TARGET_NOTRACK
        tristate  '"NOTRACK" target support'
        depends on NETFILTER_XTABLES
        depends on IP_NF_RAW || IP6_NF_RAW
-       depends on IP_NF_CONNTRACK || NF_CONNTRACK
+       depends on NF_CONNTRACK
        help
          The NOTRACK target allows a select rule to specify
          which packets *not* to enter the conntrack/NAT
@@ -387,9 +354,7 @@ config NETFILTER_XT_TARGET_SECMARK
 
 config NETFILTER_XT_TARGET_CONNSECMARK
        tristate '"CONNSECMARK" target support'
-       depends on NETFILTER_XTABLES && \
-                  ((NF_CONNTRACK && NF_CONNTRACK_SECMARK) || \
-                   (IP_NF_CONNTRACK && IP_NF_CONNTRACK_SECMARK))
+       depends on NETFILTER_XTABLES && NF_CONNTRACK && NF_CONNTRACK_SECMARK
        help
          The CONNSECMARK target copies security markings from packets
          to connections, and restores security markings from connections
@@ -437,9 +402,8 @@ config NETFILTER_XT_MATCH_COMMENT
 config NETFILTER_XT_MATCH_CONNBYTES
        tristate  '"connbytes" per-connection counter match support'
        depends on NETFILTER_XTABLES
-       depends on IP_NF_CONNTRACK || NF_CONNTRACK
-       select IP_NF_CT_ACCT if IP_NF_CONNTRACK
-       select NF_CT_ACCT if NF_CONNTRACK
+       depends on NF_CONNTRACK
+       select NF_CT_ACCT
        help
          This option adds a `connbytes' match, which allows you to match the
          number of bytes and/or packets for each direction within a connection.
@@ -450,9 +414,8 @@ config NETFILTER_XT_MATCH_CONNBYTES
 config NETFILTER_XT_MATCH_CONNMARK
        tristate  '"connmark" connection mark match support'
        depends on NETFILTER_XTABLES
-       depends on IP_NF_CONNTRACK || NF_CONNTRACK
-       select IP_NF_CONNTRACK_MARK if IP_NF_CONNTRACK
-       select NF_CONNTRACK_MARK if NF_CONNTRACK
+       depends on NF_CONNTRACK
+       select NF_CONNTRACK_MARK
        help
          This option adds a `connmark' match, which allows you to match the
          connection mark value previously set for the session by `CONNMARK'. 
@@ -464,7 +427,7 @@ config NETFILTER_XT_MATCH_CONNMARK
 config NETFILTER_XT_MATCH_CONNTRACK
        tristate '"conntrack" connection tracking match support'
        depends on NETFILTER_XTABLES
-       depends on IP_NF_CONNTRACK || NF_CONNTRACK
+       depends on NF_CONNTRACK
        help
          This is a general conntrack match module, a superset of the state match.
 
@@ -508,7 +471,7 @@ config NETFILTER_XT_MATCH_ESP
 config NETFILTER_XT_MATCH_HELPER
        tristate '"helper" match support'
        depends on NETFILTER_XTABLES
-       depends on IP_NF_CONNTRACK || NF_CONNTRACK
+       depends on NF_CONNTRACK
        help
          Helper matching allows you to match packets in dynamic connections
          tracked by a conntrack-helper, ie. ip_conntrack_ftp
@@ -632,7 +595,7 @@ config NETFILTER_XT_MATCH_SCTP
 config NETFILTER_XT_MATCH_STATE
        tristate '"state" match support'
        depends on NETFILTER_XTABLES
-       depends on IP_NF_CONNTRACK || NF_CONNTRACK
+       depends on NF_CONNTRACK
        help
          Connection state matching allows you to match packets based on their
          relationship to a tracked connection (ie. previous packets).  This
index c3ebdbd..a84478e 100644 (file)
@@ -5,10 +5,6 @@
  * way.
  *
  * Rusty Russell (C)2000 -- This code is GPL.
- *
- * February 2000: Modified by James Morris to have 1 queue per protocol.
- * 15-Mar-2000:   Added NF_REPEAT --RR.
- * 08-May-2003:          Internal logging interface added by Jozsef Kadlecsik.
  */
 #include <linux/kernel.h>
 #include <linux/netfilter.h>
@@ -244,6 +240,7 @@ void nf_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb,
 }
 EXPORT_SYMBOL(nf_proto_csum_replace4);
 
+#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 /* This does not belong here, but locally generated errors need it if connection
    tracking in use: without this, connection may not be in hash table, and hence
    manufactured ICMP or RST packets will not be associated with it. */
@@ -264,6 +261,22 @@ void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb)
 }
 EXPORT_SYMBOL(nf_ct_attach);
 
+void (*nf_ct_destroy)(struct nf_conntrack *);
+EXPORT_SYMBOL(nf_ct_destroy);
+
+void nf_conntrack_destroy(struct nf_conntrack *nfct)
+{
+       void (*destroy)(struct nf_conntrack *);
+
+       rcu_read_lock();
+       destroy = rcu_dereference(nf_ct_destroy);
+       BUG_ON(destroy == NULL);
+       destroy(nfct);
+       rcu_read_unlock();
+}
+EXPORT_SYMBOL(nf_conntrack_destroy);
+#endif /* CONFIG_NF_CONNTRACK */
+
 #ifdef CONFIG_PROC_FS
 struct proc_dir_entry *proc_net_netfilter;
 EXPORT_SYMBOL(proc_net_netfilter);
index b3a70eb..e132c8a 100644 (file)
@@ -9,24 +9,6 @@
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
- *
- * 23 Apr 2001: Harald Welte <laforge@gnumonks.org>
- *     - new API and handling of conntrack/nat helpers
- *     - now capable of multiple expectations for one master
- * 16 Jul 2002: Harald Welte <laforge@gnumonks.org>
- *     - add usage/reference counts to ip_conntrack_expect
- *     - export ip_conntrack[_expect]_{find_get,put} functions
- * 16 Dec 2003: Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
- *     - generalize L3 protocol denendent part.
- * 23 Mar 2004: Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
- *     - add support various size of conntrack structures.
- * 26 Jan 2006: Harald Welte <laforge@netfilter.org>
- *     - restructure nf_conn (introduce nf_conn_help)
- *     - redesign 'features' how they were originally intended
- * 26 Feb 2006: Pablo Neira Ayuso <pablo@eurodev.net>
- *     - add support for L3 protocol module load on demand.
- *
- * Derived from net/ipv4/netfilter/ip_conntrack_core.c
  */
 
 #include <linux/types.h>
@@ -128,10 +110,11 @@ static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple,
                                  unsigned int size, unsigned int rnd)
 {
        unsigned int a, b;
-       a = jhash((void *)tuple->src.u3.all, sizeof(tuple->src.u3.all),
-                 ((tuple->src.l3num) << 16) | tuple->dst.protonum);
-       b = jhash((void *)tuple->dst.u3.all, sizeof(tuple->dst.u3.all),
-                       (tuple->src.u.all << 16) | tuple->dst.u.all);
+
+       a = jhash2(tuple->src.u3.all, ARRAY_SIZE(tuple->src.u3.all),
+                  (tuple->src.l3num << 16) | tuple->dst.protonum);
+       b = jhash2(tuple->dst.u3.all, ARRAY_SIZE(tuple->dst.u3.all),
+                  (tuple->src.u.all << 16) | tuple->dst.u.all);
 
        return jhash_2words(a, b, rnd) % size;
 }
@@ -633,13 +616,11 @@ __nf_conntrack_alloc(const struct nf_conntrack_tuple *orig,
        memset(conntrack, 0, nf_ct_cache[features].size);
        conntrack->features = features;
        atomic_set(&conntrack->ct_general.use, 1);
-       conntrack->ct_general.destroy = destroy_conntrack;
        conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;
        conntrack->tuplehash[IP_CT_DIR_REPLY].tuple = *repl;
        /* Don't set timer yet: wait for confirmation */
-       init_timer(&conntrack->timeout);
-       conntrack->timeout.data = (unsigned long)conntrack;
-       conntrack->timeout.function = death_by_timeout;
+       setup_timer(&conntrack->timeout, death_by_timeout,
+                   (unsigned long)conntrack);
        read_unlock_bh(&nf_ct_cache_lock);
 
        return conntrack;
@@ -768,7 +749,7 @@ resolve_normal_ct(struct sk_buff *skb,
        struct nf_conntrack_tuple_hash *h;
        struct nf_conn *ct;
 
-       if (!nf_ct_get_tuple(skb, (unsigned int)(skb->nh.raw - skb->data),
+       if (!nf_ct_get_tuple(skb, skb_network_offset(skb),
                             dataoff, l3num, protonum, &tuple, l3proto,
                             l4proto)) {
                DEBUGP("resolve_normal_ct: Can't get tuple\n");
@@ -960,7 +941,7 @@ void __nf_ct_refresh_acct(struct nf_conn *ct,
        if (do_acct) {
                ct->counters[CTINFO2DIR(ctinfo)].packets++;
                ct->counters[CTINFO2DIR(ctinfo)].bytes +=
-                       skb->len - (unsigned int)(skb->nh.raw - skb->data);
+                       skb->len - skb_network_offset(skb);
 
                if ((ct->counters[CTINFO2DIR(ctinfo)].packets & 0x80000000)
                    || (ct->counters[CTINFO2DIR(ctinfo)].bytes & 0x80000000))
@@ -1140,6 +1121,8 @@ void nf_conntrack_cleanup(void)
        while (atomic_read(&nf_conntrack_untracked.ct_general.use) > 1)
                schedule();
 
+       rcu_assign_pointer(nf_ct_destroy, NULL);
+
        for (i = 0; i < NF_CT_F_NUM; i++) {
                if (nf_ct_cache[i].use == 0)
                        continue;
@@ -1152,14 +1135,7 @@ void nf_conntrack_cleanup(void)
        free_conntrack_hash(nf_conntrack_hash, nf_conntrack_vmalloc,
                            nf_conntrack_htable_size);
 
-       nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_generic);
-
-       /* free l3proto protocol tables */
-       for (i = 0; i < PF_MAX; i++)
-               if (nf_ct_protos[i]) {
-                       kfree(nf_ct_protos[i]);
-                       nf_ct_protos[i] = NULL;
-               }
+       nf_conntrack_proto_fini();
 }
 
 static struct list_head *alloc_hashtable(int size, int *vmalloced)
@@ -1237,7 +1213,6 @@ module_param_call(hashsize, set_hashsize, param_get_uint,
 
 int __init nf_conntrack_init(void)
 {
-       unsigned int i;
        int ret;
 
        /* Idea from tcp.c: use 1/16384 of memory.  On i386: 32MB
@@ -1279,18 +1254,13 @@ int __init nf_conntrack_init(void)
                goto err_free_conntrack_slab;
        }
 
-       ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_generic);
+       ret = nf_conntrack_proto_init();
        if (ret < 0)
                goto out_free_expect_slab;
 
-       /* Don't NEED lock here, but good form anyway. */
-       write_lock_bh(&nf_conntrack_lock);
-       for (i = 0; i < AF_MAX; i++)
-               nf_ct_l3protos[i] = &nf_conntrack_l3proto_generic;
-       write_unlock_bh(&nf_conntrack_lock);
-
        /* For use by REJECT target */
        rcu_assign_pointer(ip_ct_attach, __nf_conntrack_attach);
+       rcu_assign_pointer(nf_ct_destroy, destroy_conntrack);
 
        /* Set up fake conntrack:
            - to never be deleted, not in any hashes */
index 1a223e0..6bd421d 100644 (file)
@@ -91,3 +91,26 @@ void nf_ct_event_cache_flush(void)
        }
 }
 
+int nf_conntrack_register_notifier(struct notifier_block *nb)
+{
+       return atomic_notifier_chain_register(&nf_conntrack_chain, nb);
+}
+EXPORT_SYMBOL_GPL(nf_conntrack_register_notifier);
+
+int nf_conntrack_unregister_notifier(struct notifier_block *nb)
+{
+       return atomic_notifier_chain_unregister(&nf_conntrack_chain, nb);
+}
+EXPORT_SYMBOL_GPL(nf_conntrack_unregister_notifier);
+
+int nf_conntrack_expect_register_notifier(struct notifier_block *nb)
+{
+       return atomic_notifier_chain_register(&nf_conntrack_expect_chain, nb);
+}
+EXPORT_SYMBOL_GPL(nf_conntrack_expect_register_notifier);
+
+int nf_conntrack_expect_unregister_notifier(struct notifier_block *nb)
+{
+       return atomic_notifier_chain_unregister(&nf_conntrack_expect_chain, nb);
+}
+EXPORT_SYMBOL_GPL(nf_conntrack_expect_unregister_notifier);
index ce70a6f..c31af29 100644 (file)
@@ -290,9 +290,7 @@ static void nf_conntrack_expect_insert(struct nf_conntrack_expect *exp)
        master_help->expecting++;
        list_add(&exp->list, &nf_conntrack_expect_list);
 
-       init_timer(&exp->timeout);
-       exp->timeout.data = (unsigned long)exp;
-       exp->timeout.function = expectation_timed_out;
+       setup_timer(&exp->timeout, expectation_timed_out, (unsigned long)exp);
        exp->timeout.expires = jiffies + master_help->helper->timeout * HZ;
        add_timer(&exp->timeout);
 
index 3089dfc..a186799 100644 (file)
@@ -7,12 +7,6 @@
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
- *
- * 16 Dec 2003: Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
- *     - enable working with Layer 3 protocol independent connection tracking.
- *     - track EPRT and EPSV commands with IPv6 address.
- *
- * Derived from net/ipv4/netfilter/ip_conntrack_ftp.c
  */
 
 #include <linux/module.h>
index bb26a65..1093478 100644 (file)
@@ -46,7 +46,7 @@ static int help(struct sk_buff **pskb, unsigned int protoff,
                struct nf_conn *ct, enum ip_conntrack_info ctinfo)
 {
        struct nf_conntrack_expect *exp;
-       struct iphdr *iph = (*pskb)->nh.iph;
+       struct iphdr *iph = ip_hdr(*pskb);
        struct rtable *rt = (struct rtable *)(*pskb)->dst;
        struct in_device *in_dev;
        __be32 mask = 0;
index 48f0531..aa1a97e 100644 (file)
@@ -6,9 +6,6 @@
  * (C) 2003 by Patrick Mchardy <kaber@trash.net>
  * (C) 2005-2006 by Pablo Neira Ayuso <pablo@eurodev.net>
  *
- * I've reworked this stuff to use attributes instead of conntrack
- * structures. 5.44 am. I need more tea. --pablo 05/07/11.
- *
  * Initial connection tracking via netlink development funded and
  * generally made possible by Network Robots, Inc. (www.networkrobots.com)
  *
@@ -16,8 +13,6 @@
  *
  * This software may be used and distributed according to the terms
  * of the GNU General Public License, incorporated herein by reference.
- *
- * Derived from ip_conntrack_netlink.c: Port by Pablo Neira Ayuso (05/11/14)
  */
 
 #include <linux/init.h>
@@ -33,6 +28,7 @@
 #include <linux/notifier.h>
 
 #include <linux/netfilter.h>
+#include <net/netlink.h>
 #include <net/netfilter/nf_conntrack.h>
 #include <net/netfilter/nf_conntrack_core.h>
 #include <net/netfilter/nf_conntrack_expect.h>
@@ -268,9 +264,7 @@ ctnetlink_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
        struct nlmsghdr *nlh;
        struct nfgenmsg *nfmsg;
        struct nfattr *nest_parms;
-       unsigned char *b;
-
-       b = skb->tail;
+       unsigned char *b = skb_tail_pointer(skb);
 
        event |= NFNL_SUBSYS_CTNETLINK << 8;
        nlh    = NLMSG_PUT(skb, pid, seq, event, sizeof(struct nfgenmsg));
@@ -303,12 +297,12 @@ ctnetlink_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
            ctnetlink_dump_use(skb, ct) < 0)
                goto nfattr_failure;
 
-       nlh->nlmsg_len = skb->tail - b;
+       nlh->nlmsg_len = skb_tail_pointer(skb) - b;
        return skb->len;
 
 nlmsg_failure:
 nfattr_failure:
-       skb_trim(skb, b - skb->data);
+       nlmsg_trim(skb, b);
        return -1;
 }
 
@@ -322,7 +316,7 @@ static int ctnetlink_conntrack_event(struct notifier_block *this,
        struct nf_conn *ct = (struct nf_conn *)ptr;
        struct sk_buff *skb;
        unsigned int type;
-       unsigned char *b;
+       sk_buff_data_t b;
        unsigned int flags = 0, group;
 
        /* ignore our fake conntrack entry */
@@ -662,7 +656,7 @@ static const size_t cta_min[CTA_MAX] = {
 
 static int
 ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
-                       struct nlmsghdr *nlh, struct nfattr *cda[], int *errp)
+                       struct nlmsghdr *nlh, struct nfattr *cda[])
 {
        struct nf_conntrack_tuple_hash *h;
        struct nf_conntrack_tuple tuple;
@@ -710,7 +704,7 @@ ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
 
 static int
 ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb,
-                       struct nlmsghdr *nlh, struct nfattr *cda[], int *errp)
+                       struct nlmsghdr *nlh, struct nfattr *cda[])
 {
        struct nf_conntrack_tuple_hash *h;
        struct nf_conntrack_tuple tuple;
@@ -721,22 +715,12 @@ ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb,
        int err = 0;
 
        if (nlh->nlmsg_flags & NLM_F_DUMP) {
-               u32 rlen;
-
 #ifndef CONFIG_NF_CT_ACCT
                if (NFNL_MSG_TYPE(nlh->nlmsg_type) == IPCTNL_MSG_CT_GET_CTRZERO)
                        return -ENOTSUPP;
 #endif
-               if ((*errp = netlink_dump_start(ctnl, skb, nlh,
-                                               ctnetlink_dump_table,
-                                               ctnetlink_done)) != 0)
-                       return -EINVAL;
-
-               rlen = NLMSG_ALIGN(nlh->nlmsg_len);
-               if (rlen > skb->len)
-                       rlen = skb->len;
-               skb_pull(skb, rlen);
-               return 0;
+               return netlink_dump_start(ctnl, skb, nlh, ctnetlink_dump_table,
+                                         ctnetlink_done);
        }
 
        if (nfattr_bad_size(cda, CTA_MAX, cta_min))
@@ -1010,7 +994,7 @@ err:
 
 static int
 ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
-                       struct nlmsghdr *nlh, struct nfattr *cda[], int *errp)
+                       struct nlmsghdr *nlh, struct nfattr *cda[])
 {
        struct nf_conntrack_tuple otuple, rtuple;
        struct nf_conntrack_tuple_hash *h = NULL;
@@ -1152,9 +1136,7 @@ ctnetlink_exp_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
 {
        struct nlmsghdr *nlh;
        struct nfgenmsg *nfmsg;
-       unsigned char *b;
-
-       b = skb->tail;
+       unsigned char *b = skb_tail_pointer(skb);
 
        event |= NFNL_SUBSYS_CTNETLINK_EXP << 8;
        nlh    = NLMSG_PUT(skb, pid, seq, event, sizeof(struct nfgenmsg));
@@ -1168,12 +1150,12 @@ ctnetlink_exp_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
        if (ctnetlink_exp_dump_expect(skb, exp) < 0)
                goto nfattr_failure;
 
-       nlh->nlmsg_len = skb->tail - b;
+       nlh->nlmsg_len = skb_tail_pointer(skb) - b;
        return skb->len;
 
 nlmsg_failure:
 nfattr_failure:
-       skb_trim(skb, b - skb->data);
+       nlmsg_trim(skb, b);
        return -1;
 }
 
@@ -1186,7 +1168,7 @@ static int ctnetlink_expect_event(struct notifier_block *this,
        struct nf_conntrack_expect *exp = (struct nf_conntrack_expect *)ptr;
        struct sk_buff *skb;
        unsigned int type;
-       unsigned char *b;
+       sk_buff_data_t b;
        int flags = 0;
 
        if (events & IPEXP_NEW) {
@@ -1263,7 +1245,7 @@ static const size_t cta_min_exp[CTA_EXPECT_MAX] = {
 
 static int
 ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
-                    struct nlmsghdr *nlh, struct nfattr *cda[], int *errp)
+                    struct nlmsghdr *nlh, struct nfattr *cda[])
 {
        struct nf_conntrack_tuple tuple;
        struct nf_conntrack_expect *exp;
@@ -1276,17 +1258,9 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
                return -EINVAL;
 
        if (nlh->nlmsg_flags & NLM_F_DUMP) {
-               u32 rlen;
-
-               if ((*errp = netlink_dump_start(ctnl, skb, nlh,
-                                               ctnetlink_exp_dump_table,
-                                               ctnetlink_done)) != 0)
-                       return -EINVAL;
-               rlen = NLMSG_ALIGN(nlh->nlmsg_len);
-               if (rlen > skb->len)
-                       rlen = skb->len;
-               skb_pull(skb, rlen);
-               return 0;
+               return netlink_dump_start(ctnl, skb, nlh,
+                                         ctnetlink_exp_dump_table,
+                                         ctnetlink_done);
        }
 
        if (cda[CTA_EXPECT_MASTER-1])
@@ -1333,7 +1307,7 @@ out:
 
 static int
 ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
-                    struct nlmsghdr *nlh, struct nfattr *cda[], int *errp)
+                    struct nlmsghdr *nlh, struct nfattr *cda[])
 {
        struct nf_conntrack_expect *exp, *tmp;
        struct nf_conntrack_tuple tuple;
@@ -1467,7 +1441,7 @@ out:
 
 static int
 ctnetlink_new_expect(struct sock *ctnl, struct sk_buff *skb,
-                    struct nlmsghdr *nlh, struct nfattr *cda[], int *errp)
+                    struct nlmsghdr *nlh, struct nfattr *cda[])
 {
        struct nf_conntrack_tuple tuple;
        struct nf_conntrack_expect *exp;
index 456155f..6d94706 100644 (file)
 #include <net/netfilter/nf_conntrack_l4proto.h>
 #include <net/netfilter/nf_conntrack_core.h>
 
-struct nf_conntrack_l4proto **nf_ct_protos[PF_MAX] __read_mostly;
+static struct nf_conntrack_l4proto **nf_ct_protos[PF_MAX] __read_mostly;
 struct nf_conntrack_l3proto *nf_ct_l3protos[AF_MAX] __read_mostly;
 EXPORT_SYMBOL_GPL(nf_ct_l3protos);
 
-#ifdef CONFIG_SYSCTL
-static DEFINE_MUTEX(nf_ct_proto_sysctl_mutex);
+static DEFINE_MUTEX(nf_ct_proto_mutex);
 
+#ifdef CONFIG_SYSCTL
 static int
 nf_ct_register_sysctl(struct ctl_table_header **header, struct ctl_table *path,
                      struct ctl_table *table, unsigned int *users)
@@ -164,13 +164,11 @@ static int nf_ct_l3proto_register_sysctl(struct nf_conntrack_l3proto *l3proto)
        int err = 0;
 
 #ifdef CONFIG_SYSCTL
-       mutex_lock(&nf_ct_proto_sysctl_mutex);
        if (l3proto->ctl_table != NULL) {
                err = nf_ct_register_sysctl(&l3proto->ctl_table_header,
                                            l3proto->ctl_table_path,
                                            l3proto->ctl_table, NULL);
        }
-       mutex_unlock(&nf_ct_proto_sysctl_mutex);
 #endif
        return err;
 }
@@ -178,11 +176,9 @@ static int nf_ct_l3proto_register_sysctl(struct nf_conntrack_l3proto *l3proto)
 static void nf_ct_l3proto_unregister_sysctl(struct nf_conntrack_l3proto *l3proto)
 {
 #ifdef CONFIG_SYSCTL
-       mutex_lock(&nf_ct_proto_sysctl_mutex);
        if (l3proto->ctl_table_header != NULL)
                nf_ct_unregister_sysctl(&l3proto->ctl_table_header,
                                        l3proto->ctl_table, NULL);
-       mutex_unlock(&nf_ct_proto_sysctl_mutex);
 #endif
 }
 
@@ -190,27 +186,23 @@ int nf_conntrack_l3proto_register(struct nf_conntrack_l3proto *proto)
 {
        int ret = 0;
 
-       if (proto->l3proto >= AF_MAX) {
-               ret = -EBUSY;
-               goto out;
-       }
+       if (proto->l3proto >= AF_MAX)
+               return -EBUSY;
 
-       write_lock_bh(&nf_conntrack_lock);
+       mutex_lock(&nf_ct_proto_mutex);
        if (nf_ct_l3protos[proto->l3proto] != &nf_conntrack_l3proto_generic) {
                ret = -EBUSY;
                goto out_unlock;
        }
-       rcu_assign_pointer(nf_ct_l3protos[proto->l3proto], proto);
-       write_unlock_bh(&nf_conntrack_lock);
 
        ret = nf_ct_l3proto_register_sysctl(proto);
        if (ret < 0)
-               nf_conntrack_l3proto_unregister(proto);
-       return ret;
+               goto out_unlock;
+
+       rcu_assign_pointer(nf_ct_l3protos[proto->l3proto], proto);
 
 out_unlock:
-       write_unlock_bh(&nf_conntrack_lock);
-out:
+       mutex_unlock(&nf_ct_proto_mutex);
        return ret;
 }
 EXPORT_SYMBOL_GPL(nf_conntrack_l3proto_register);
@@ -219,14 +211,14 @@ void nf_conntrack_l3proto_unregister(struct nf_conntrack_l3proto *proto)
 {
        BUG_ON(proto->l3proto >= AF_MAX);
 
-       write_lock_bh(&nf_conntrack_lock);
+       mutex_lock(&nf_ct_proto_mutex);
        BUG_ON(nf_ct_l3protos[proto->l3proto] != proto);
        rcu_assign_pointer(nf_ct_l3protos[proto->l3proto],
                           &nf_conntrack_l3proto_generic);
-       write_unlock_bh(&nf_conntrack_lock);
-       synchronize_rcu();
-
        nf_ct_l3proto_unregister_sysctl(proto);
+       mutex_unlock(&nf_ct_proto_mutex);
+
+       synchronize_rcu();
 
        /* Remove all contrack entries for this protocol */
        nf_ct_iterate_cleanup(kill_l3proto, proto);
@@ -238,7 +230,6 @@ static int nf_ct_l4proto_register_sysctl(struct nf_conntrack_l4proto *l4proto)
        int err = 0;
 
 #ifdef CONFIG_SYSCTL
-       mutex_lock(&nf_ct_proto_sysctl_mutex);
        if (l4proto->ctl_table != NULL) {
                err = nf_ct_register_sysctl(l4proto->ctl_table_header,
                                            nf_net_netfilter_sysctl_path,
@@ -260,7 +251,6 @@ static int nf_ct_l4proto_register_sysctl(struct nf_conntrack_l4proto *l4proto)
        }
 #endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */
 out:
-       mutex_unlock(&nf_ct_proto_sysctl_mutex);
 #endif /* CONFIG_SYSCTL */
        return err;
 }
@@ -268,7 +258,6 @@ out:
 static void nf_ct_l4proto_unregister_sysctl(struct nf_conntrack_l4proto *l4proto)
 {
 #ifdef CONFIG_SYSCTL
-       mutex_lock(&nf_ct_proto_sysctl_mutex);
        if (l4proto->ctl_table_header != NULL &&
            *l4proto->ctl_table_header != NULL)
                nf_ct_unregister_sysctl(l4proto->ctl_table_header,
@@ -279,7 +268,6 @@ static void nf_ct_l4proto_unregister_sysctl(struct nf_conntrack_l4proto *l4proto
                nf_ct_unregister_sysctl(&l4proto->ctl_compat_table_header,
                                        l4proto->ctl_compat_table, NULL);
 #endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */
-       mutex_unlock(&nf_ct_proto_sysctl_mutex);
 #endif /* CONFIG_SYSCTL */
 }
 
@@ -289,68 +277,41 @@ int nf_conntrack_l4proto_register(struct nf_conntrack_l4proto *l4proto)
 {
        int ret = 0;
 
-       if (l4proto->l3proto >= PF_MAX) {
-               ret = -EBUSY;
-               goto out;
-       }
-
-       if (l4proto == &nf_conntrack_l4proto_generic)
-               return nf_ct_l4proto_register_sysctl(l4proto);
+       if (l4proto->l3proto >= PF_MAX)
+               return -EBUSY;
 
-retry:
-       write_lock_bh(&nf_conntrack_lock);
-       if (nf_ct_protos[l4proto->l3proto]) {
-               if (nf_ct_protos[l4proto->l3proto][l4proto->l4proto]
-                               != &nf_conntrack_l4proto_generic) {
-                       ret = -EBUSY;
-                       goto out_unlock;
-               }
-       } else {
+       mutex_lock(&nf_ct_proto_mutex);
+       if (!nf_ct_protos[l4proto->l3proto]) {
                /* l3proto may be loaded latter. */
                struct nf_conntrack_l4proto **proto_array;
                int i;
 
-               write_unlock_bh(&nf_conntrack_lock);
-
-               proto_array = (struct nf_conntrack_l4proto **)
-                               kmalloc(MAX_NF_CT_PROTO *
-                                        sizeof(struct nf_conntrack_l4proto *),
-                                       GFP_KERNEL);
+               proto_array = kmalloc(MAX_NF_CT_PROTO *
+                                     sizeof(struct nf_conntrack_l4proto *),
+                                     GFP_KERNEL);
                if (proto_array == NULL) {
                        ret = -ENOMEM;
-                       goto out;
+                       goto out_unlock;
                }
+
                for (i = 0; i < MAX_NF_CT_PROTO; i++)
                        proto_array[i] = &nf_conntrack_l4proto_generic;
-
-               write_lock_bh(&nf_conntrack_lock);
-               if (nf_ct_protos[l4proto->l3proto]) {
-                       /* bad timing, but no problem */
-                       write_unlock_bh(&nf_conntrack_lock);
-                       kfree(proto_array);
-               } else {
-                       nf_ct_protos[l4proto->l3proto] = proto_array;
-                       write_unlock_bh(&nf_conntrack_lock);
-               }
-
-               /*
-                * Just once because array is never freed until unloading
-                * nf_conntrack.ko
-                */
-               goto retry;
+               nf_ct_protos[l4proto->l3proto] = proto_array;
+       } else if (nf_ct_protos[l4proto->l3proto][l4proto->l4proto] !=
+                                       &nf_conntrack_l4proto_generic) {
+               ret = -EBUSY;
+               goto out_unlock;
        }
 
-       rcu_assign_pointer(nf_ct_protos[l4proto->l3proto][l4proto->l4proto], l4proto);
-       write_unlock_bh(&nf_conntrack_lock);
-
        ret = nf_ct_l4proto_register_sysctl(l4proto);
        if (ret < 0)
-               nf_conntrack_l4proto_unregister(l4proto);
-       return ret;
+               goto out_unlock;
+
+       rcu_assign_pointer(nf_ct_protos[l4proto->l3proto][l4proto->l4proto],
+                          l4proto);
 
 out_unlock:
-       write_unlock_bh(&nf_conntrack_lock);
-out:
+       mutex_unlock(&nf_ct_proto_mutex);
        return ret;
 }
 EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_register);
@@ -359,21 +320,42 @@ void nf_conntrack_l4proto_unregister(struct nf_conntrack_l4proto *l4proto)
 {
        BUG_ON(l4proto->l3proto >= PF_MAX);
 
-       if (l4proto == &nf_conntrack_l4proto_generic) {
-               nf_ct_l4proto_unregister_sysctl(l4proto);
-               return;
-       }
-
-       write_lock_bh(&nf_conntrack_lock);
+       mutex_lock(&nf_ct_proto_mutex);
        BUG_ON(nf_ct_protos[l4proto->l3proto][l4proto->l4proto] != l4proto);
        rcu_assign_pointer(nf_ct_protos[l4proto->l3proto][l4proto->l4proto],
                           &nf_conntrack_l4proto_generic);
-       write_unlock_bh(&nf_conntrack_lock);
-       synchronize_rcu();
-
        nf_ct_l4proto_unregister_sysctl(l4proto);
+       mutex_unlock(&nf_ct_proto_mutex);
+
+       synchronize_rcu();
 
        /* Remove all contrack entries for this protocol */
        nf_ct_iterate_cleanup(kill_l4proto, l4proto);
 }
 EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_unregister);
+
+int nf_conntrack_proto_init(void)
+{
+       unsigned int i;
+       int err;
+
+       err = nf_ct_l4proto_register_sysctl(&nf_conntrack_l4proto_generic);
+       if (err < 0)
+               return err;
+
+       for (i = 0; i < AF_MAX; i++)
+               rcu_assign_pointer(nf_ct_l3protos[i],
+                                  &nf_conntrack_l3proto_generic);
+       return 0;
+}
+
+void nf_conntrack_proto_fini(void)
+{
+       unsigned int i;
+
+       nf_ct_l4proto_unregister_sysctl(&nf_conntrack_l4proto_generic);
+
+       /* free l3proto protocol tables */
+       for (i = 0; i < PF_MAX; i++)
+               kfree(nf_ct_protos[i]);
+}
index 7c06993..6faf1be 100644 (file)
@@ -4,11 +4,6 @@
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
- *
- * 16 Dec 2003: Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
- *     - enable working with L3 protocol independent connection tracking.
- *
- * Derived from net/ipv4/netfilter/ip_conntrack_proto_generic.c
  */
 
 #include <linux/types.h>
index 3c80558..0d3254b 100644 (file)
@@ -7,15 +7,6 @@
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
- *
- * 17 Oct 2004: Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
- *     - enable working with L3 protocol independent connection tracking.
- *
- * Derived from net/ipv4/ip_conntrack_sctp.c
- */
-
-/*
- * Added support for proc manipulation of timeouts.
  */
 
 #include <linux/types.h>
index 153d661..ccdd5d2 100644 (file)
@@ -4,24 +4,6 @@
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
- *
- * Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>:
- *     - Real stateful connection tracking
- *     - Modified state transitions table
- *     - Window scaling support added
- *     - SACK support added
- *
- * Willy Tarreau:
- *     - State table bugfixes
- *     - More robust state changes
- *     - Tuning timer parameters
- *
- * 27 Oct 2004: Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
- *     - genelized Layer 3 protocol part.
- *
- * Derived from net/ipv4/netfilter/ip_conntrack_proto_tcp.c
- *
- * version 2.2
  */
 
 #include <linux/types.h>
@@ -470,11 +452,10 @@ static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff,
 
        /* Fast path for timestamp-only option */
        if (length == TCPOLEN_TSTAMP_ALIGNED*4
-           && *(__be32 *)ptr ==
-               __constant_htonl((TCPOPT_NOP << 24)
-                                | (TCPOPT_NOP << 16)
-                                | (TCPOPT_TIMESTAMP << 8)
-                                | TCPOLEN_TIMESTAMP))
+           && *(__be32 *)ptr == htonl((TCPOPT_NOP << 24)
+                                      | (TCPOPT_NOP << 16)
+                                      | (TCPOPT_TIMESTAMP << 8)
+                                      | TCPOLEN_TIMESTAMP))
                return;
 
        while (length > 0) {
@@ -765,26 +746,18 @@ EXPORT_SYMBOL_GPL(nf_conntrack_tcp_update);
 #define        TH_ECE  0x40
 #define        TH_CWR  0x80
 
-/* table of valid flag combinations - ECE and CWR are always valid */
-static u8 tcp_valid_flags[(TH_FIN|TH_SYN|TH_RST|TH_PUSH|TH_ACK|TH_URG) + 1] =
+/* table of valid flag combinations - PUSH, ECE and CWR are always valid */
+static u8 tcp_valid_flags[(TH_FIN|TH_SYN|TH_RST|TH_ACK|TH_URG) + 1] =
 {
        [TH_SYN]                        = 1,
-       [TH_SYN|TH_PUSH]                = 1,
        [TH_SYN|TH_URG]                 = 1,
-       [TH_SYN|TH_PUSH|TH_URG]         = 1,
        [TH_SYN|TH_ACK]                 = 1,
-       [TH_SYN|TH_ACK|TH_PUSH]         = 1,
        [TH_RST]                        = 1,
        [TH_RST|TH_ACK]                 = 1,
-       [TH_RST|TH_ACK|TH_PUSH]         = 1,
        [TH_FIN|TH_ACK]                 = 1,
+       [TH_FIN|TH_ACK|TH_URG]          = 1,
        [TH_ACK]                        = 1,
-       [TH_ACK|TH_PUSH]                = 1,
        [TH_ACK|TH_URG]                 = 1,
-       [TH_ACK|TH_URG|TH_PUSH]         = 1,
-       [TH_FIN|TH_ACK|TH_PUSH]         = 1,
-       [TH_FIN|TH_ACK|TH_URG]          = 1,
-       [TH_FIN|TH_ACK|TH_URG|TH_PUSH]  = 1,
 };
 
 /* Protect conntrack agaist broken packets. Code taken from ipt_unclean.c.  */
@@ -831,7 +804,7 @@ static int tcp_error(struct sk_buff *skb,
        }
 
        /* Check TCP flags. */
-       tcpflags = (((u_int8_t *)th)[13] & ~(TH_ECE|TH_CWR));
+       tcpflags = (((u_int8_t *)th)[13] & ~(TH_ECE|TH_CWR|TH_PUSH));
        if (!tcp_valid_flags[tcpflags]) {
                if (LOG_INVALID(IPPROTO_TCP))
                        nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
@@ -1110,11 +1083,26 @@ static int tcp_to_nfattr(struct sk_buff *skb, struct nfattr *nfa,
                         const struct nf_conn *ct)
 {
        struct nfattr *nest_parms;
+       struct nf_ct_tcp_flags tmp = {};
 
        read_lock_bh(&tcp_lock);
        nest_parms = NFA_NEST(skb, CTA_PROTOINFO_TCP);
        NFA_PUT(skb, CTA_PROTOINFO_TCP_STATE, sizeof(u_int8_t),
                &ct->proto.tcp.state);
+
+       NFA_PUT(skb, CTA_PROTOINFO_TCP_WSCALE_ORIGINAL, sizeof(u_int8_t),
+               &ct->proto.tcp.seen[0].td_scale);
+
+       NFA_PUT(skb, CTA_PROTOINFO_TCP_WSCALE_REPLY, sizeof(u_int8_t),
+               &ct->proto.tcp.seen[1].td_scale);
+
+       tmp.flags = ct->proto.tcp.seen[0].flags;
+       NFA_PUT(skb, CTA_PROTOINFO_TCP_FLAGS_ORIGINAL,
+               sizeof(struct nf_ct_tcp_flags), &tmp);
+
+       tmp.flags = ct->proto.tcp.seen[1].flags;
+       NFA_PUT(skb, CTA_PROTOINFO_TCP_FLAGS_REPLY,
+               sizeof(struct nf_ct_tcp_flags), &tmp);
        read_unlock_bh(&tcp_lock);
 
        NFA_NEST_END(skb, nest_parms);
@@ -1127,7 +1115,11 @@ nfattr_failure:
 }
 
 static const size_t cta_min_tcp[CTA_PROTOINFO_TCP_MAX] = {
-       [CTA_PROTOINFO_TCP_STATE-1]     = sizeof(u_int8_t),
+       [CTA_PROTOINFO_TCP_STATE-1]           = sizeof(u_int8_t),
+       [CTA_PROTOINFO_TCP_WSCALE_ORIGINAL-1] = sizeof(u_int8_t),
+       [CTA_PROTOINFO_TCP_WSCALE_REPLY-1]    = sizeof(u_int8_t),
+       [CTA_PROTOINFO_TCP_FLAGS_ORIGINAL-1]  = sizeof(struct nf_ct_tcp_flags),
+       [CTA_PROTOINFO_TCP_FLAGS_REPLY-1]     = sizeof(struct nf_ct_tcp_flags)
 };
 
 static int nfattr_to_tcp(struct nfattr *cda[], struct nf_conn *ct)
@@ -1151,6 +1143,30 @@ static int nfattr_to_tcp(struct nfattr *cda[], struct nf_conn *ct)
        write_lock_bh(&tcp_lock);
        ct->proto.tcp.state =
                *(u_int8_t *)NFA_DATA(tb[CTA_PROTOINFO_TCP_STATE-1]);
+
+       if (tb[CTA_PROTOINFO_TCP_FLAGS_ORIGINAL-1]) {
+               struct nf_ct_tcp_flags *attr =
+                       NFA_DATA(tb[CTA_PROTOINFO_TCP_FLAGS_ORIGINAL-1]);
+               ct->proto.tcp.seen[0].flags &= ~attr->mask;
+               ct->proto.tcp.seen[0].flags |= attr->flags & attr->mask;
+       }
+
+       if (tb[CTA_PROTOINFO_TCP_FLAGS_REPLY-1]) {
+               struct nf_ct_tcp_flags *attr =
+                       NFA_DATA(tb[CTA_PROTOINFO_TCP_FLAGS_REPLY-1]);
+               ct->proto.tcp.seen[1].flags &= ~attr->mask;
+               ct->proto.tcp.seen[1].flags |= attr->flags & attr->mask;
+       }
+
+       if (tb[CTA_PROTOINFO_TCP_WSCALE_ORIGINAL-1] &&
+           tb[CTA_PROTOINFO_TCP_WSCALE_REPLY-1] &&
+           ct->proto.tcp.seen[0].flags & IP_CT_TCP_FLAG_WINDOW_SCALE &&
+           ct->proto.tcp.seen[1].flags & IP_CT_TCP_FLAG_WINDOW_SCALE) {
+               ct->proto.tcp.seen[0].td_scale = *(u_int8_t *)
+                       NFA_DATA(tb[CTA_PROTOINFO_TCP_WSCALE_ORIGINAL-1]);
+               ct->proto.tcp.seen[1].td_scale = *(u_int8_t *)
+                       NFA_DATA(tb[CTA_PROTOINFO_TCP_WSCALE_REPLY-1]);
+       }
        write_unlock_bh(&tcp_lock);
 
        return 0;
index a5e5726..3620ecc 100644 (file)
@@ -4,11 +4,6 @@
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
- *
- * 16 Dec 2003: Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
- *     - enable working with Layer 3 protocol independent connection tracking.
- *
- * Derived from net/ipv4/netfilter/ip_conntrack_proto_udp.c
  */
 
 #include <linux/types.h>
index b858636..45baeb0 100644 (file)
@@ -1,20 +1,9 @@
-/* This file contains all the functions required for the standalone
-   nf_conntrack module.
-
-   These are not required by the compatibility layer.
-*/
-
 /* (C) 1999-2001 Paul `Rusty' Russell
  * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
- *
- * 16 Dec 2003: Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
- *     - generalize L3 protocol dependent part.
- *
- * Derived from net/ipv4/netfilter/ip_conntrack_standalone.c
  */
 
 #include <linux/types.h>
index bf23e48..8797e69 100644 (file)
@@ -3,7 +3,7 @@
  *
  * (C) 2001 by Jay Schulist <jschlst@samba.org>,
  * (C) 2002-2005 by Harald Welte <laforge@gnumonks.org>
- * (C) 2005 by Pablo Neira Ayuso <pablo@eurodev.net>
+ * (C) 2005,2007 by Pablo Neira Ayuso <pablo@netfilter.org>
  *
  * Initial netfilter messages via netlink development funded and
  * generally made possible by Network Robots, Inc. (www.networkrobots.com)
 #include <asm/uaccess.h>
 #include <asm/system.h>
 #include <net/sock.h>
+#include <net/netlink.h>
 #include <linux/init.h>
-#include <linux/spinlock.h>
 
-#include <linux/netfilter.h>
 #include <linux/netlink.h>
 #include <linux/netfilter/nfnetlink.h>
 
@@ -41,32 +40,34 @@ MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_NETFILTER);
 
 static char __initdata nfversion[] = "0.30";
 
-#if 0
-#define DEBUGP(format, args...)        \
-               printk(KERN_DEBUG "%s(%d):%s(): " format, __FILE__, \
-                       __LINE__, __FUNCTION__, ## args)
-#else
-#define DEBUGP(format, args...)
-#endif
-
 static struct sock *nfnl = NULL;
 static struct nfnetlink_subsystem *subsys_table[NFNL_SUBSYS_COUNT];
-DECLARE_MUTEX(nfnl_sem);
+static DEFINE_MUTEX(nfnl_mutex);
 
-void nfnl_lock(void)
+static void nfnl_lock(void)
 {
-       nfnl_shlock();
+       mutex_lock(&nfnl_mutex);
 }
 
-void nfnl_unlock(void)
+static int nfnl_trylock(void)
 {
-       nfnl_shunlock();
+       return !mutex_trylock(&nfnl_mutex);
 }
 
-int nfnetlink_subsys_register(struct nfnetlink_subsystem *n)
+static void __nfnl_unlock(void)
 {
-       DEBUGP("registering subsystem ID %u\n", n->subsys_id);
+       mutex_unlock(&nfnl_mutex);
+}
+
+static void nfnl_unlock(void)
+{
+       mutex_unlock(&nfnl_mutex);
+       if (nfnl->sk_receive_queue.qlen)
+               nfnl->sk_data_ready(nfnl, 0);
+}
 
+int nfnetlink_subsys_register(struct nfnetlink_subsystem *n)
+{
        nfnl_lock();
        if (subsys_table[n->subsys_id]) {
                nfnl_unlock();
@@ -77,24 +78,23 @@ int nfnetlink_subsys_register(struct nfnetlink_subsystem *n)
 
        return 0;
 }
+EXPORT_SYMBOL_GPL(nfnetlink_subsys_register);
 
 int nfnetlink_subsys_unregister(struct nfnetlink_subsystem *n)
 {
-       DEBUGP("unregistering subsystem ID %u\n", n->subsys_id);
-
        nfnl_lock();
        subsys_table[n->subsys_id] = NULL;
        nfnl_unlock();
 
        return 0;
 }
+EXPORT_SYMBOL_GPL(nfnetlink_subsys_unregister);
 
 static inline struct nfnetlink_subsystem *nfnetlink_get_subsys(u_int16_t type)
 {
        u_int8_t subsys_id = NFNL_SUBSYS_ID(type);
 
-       if (subsys_id >= NFNL_SUBSYS_COUNT
-           || subsys_table[subsys_id] == NULL)
+       if (subsys_id >= NFNL_SUBSYS_COUNT)
                return NULL;
 
        return subsys_table[subsys_id];
@@ -105,10 +105,8 @@ nfnetlink_find_client(u_int16_t type, struct nfnetlink_subsystem *ss)
 {
        u_int8_t cb_id = NFNL_MSG_TYPE(type);
 
-       if (cb_id >= ss->cb_count) {
-               DEBUGP("msgtype %u >= %u, returning\n", type, ss->cb_count);
+       if (cb_id >= ss->cb_count)
                return NULL;
-       }
 
        return &ss->cb[cb_id];
 }
@@ -125,6 +123,7 @@ void __nfa_fill(struct sk_buff *skb, int attrtype, int attrlen,
        memcpy(NFA_DATA(nfa), data, attrlen);
        memset(NFA_DATA(nfa) + attrlen, 0, NFA_ALIGN(size) - size);
 }
+EXPORT_SYMBOL_GPL(__nfa_fill);
 
 void nfattr_parse(struct nfattr *tb[], int maxattr, struct nfattr *nfa, int len)
 {
@@ -137,6 +136,7 @@ void nfattr_parse(struct nfattr *tb[], int maxattr, struct nfattr *nfa, int len)
                nfa = NFA_NEXT(nfa, len);
        }
 }
+EXPORT_SYMBOL_GPL(nfattr_parse);
 
 /**
  * nfnetlink_check_attributes - check and parse nfnetlink attributes
@@ -150,37 +150,15 @@ static int
 nfnetlink_check_attributes(struct nfnetlink_subsystem *subsys,
                           struct nlmsghdr *nlh, struct nfattr *cda[])
 {
-       int min_len;
-       u_int16_t attr_count;
+       int min_len = NLMSG_SPACE(sizeof(struct nfgenmsg));
        u_int8_t cb_id = NFNL_MSG_TYPE(nlh->nlmsg_type);
-
-       if (unlikely(cb_id >= subsys->cb_count)) {
-               DEBUGP("msgtype %u >= %u, returning\n",
-                       cb_id, subsys->cb_count);
-               return -EINVAL;
-       }
-
-       min_len = NLMSG_SPACE(sizeof(struct nfgenmsg));
-       if (unlikely(nlh->nlmsg_len < min_len))
-               return -EINVAL;
-
-       attr_count = subsys->cb[cb_id].attr_count;
-       memset(cda, 0, sizeof(struct nfattr *) * attr_count);
+       u_int16_t attr_count = subsys->cb[cb_id].attr_count;
 
        /* check attribute lengths. */
        if (likely(nlh->nlmsg_len > min_len)) {
                struct nfattr *attr = NFM_NFA(NLMSG_DATA(nlh));
                int attrlen = nlh->nlmsg_len - NLMSG_ALIGN(min_len);
-
-               while (NFA_OK(attr, attrlen)) {
-                       unsigned flavor = NFA_TYPE(attr);
-                       if (flavor) {
-                               if (flavor > attr_count)
-                                       return -EINVAL;
-                               cda[flavor - 1] = attr;
-                       }
-                       attr = NFA_NEXT(attr, attrlen);
-               }
+               nfattr_parse(cda, attr_count, attr, attrlen);
        }
 
        /* implicit: if nlmsg_len == min_len, we return 0, and an empty
@@ -208,62 +186,46 @@ int nfnetlink_send(struct sk_buff *skb, u32 pid, unsigned group, int echo)
 
        return err;
 }
+EXPORT_SYMBOL_GPL(nfnetlink_send);
 
 int nfnetlink_unicast(struct sk_buff *skb, u_int32_t pid, int flags)
 {
        return netlink_unicast(nfnl, skb, pid, flags);
 }
+EXPORT_SYMBOL_GPL(nfnetlink_unicast);
 
 /* Process one complete nfnetlink message. */
-static int nfnetlink_rcv_msg(struct sk_buff *skb,
-                                   struct nlmsghdr *nlh, int *errp)
+static int nfnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
 {
        struct nfnl_callback *nc;
        struct nfnetlink_subsystem *ss;
-       int type, err = 0;
-
-       DEBUGP("entered; subsys=%u, msgtype=%u\n",
-                NFNL_SUBSYS_ID(nlh->nlmsg_type),
-                NFNL_MSG_TYPE(nlh->nlmsg_type));
-
-       if (security_netlink_recv(skb, CAP_NET_ADMIN)) {
-               DEBUGP("missing CAP_NET_ADMIN\n");
-               *errp = -EPERM;
-               return -1;
-       }
+       int type, err;
 
-       /* Only requests are handled by kernel now. */
-       if (!(nlh->nlmsg_flags & NLM_F_REQUEST)) {
-               DEBUGP("received non-request message\n");
-               return 0;
-       }
+       if (security_netlink_recv(skb, CAP_NET_ADMIN))
+               return -EPERM;
 
        /* All the messages must at least contain nfgenmsg */
-       if (nlh->nlmsg_len < NLMSG_SPACE(sizeof(struct nfgenmsg))) {
-               DEBUGP("received message was too short\n");
+       if (nlh->nlmsg_len < NLMSG_SPACE(sizeof(struct nfgenmsg)))
                return 0;
-       }
 
        type = nlh->nlmsg_type;
        ss = nfnetlink_get_subsys(type);
        if (!ss) {
 #ifdef CONFIG_KMOD
-               /* don't call nfnl_shunlock, since it would reenter
+               /* don't call nfnl_unlock, since it would reenter
                 * with further packet processing */
-               up(&nfnl_sem);
+               __nfnl_unlock();
                request_module("nfnetlink-subsys-%d", NFNL_SUBSYS_ID(type));
-               nfnl_shlock();
+               nfnl_lock();
                ss = nfnetlink_get_subsys(type);
                if (!ss)
 #endif
-                       goto err_inval;
+                       return -EINVAL;
        }
 
        nc = nfnetlink_find_client(type, ss);
-       if (!nc) {
-               DEBUGP("unable to find client for type %d\n", type);
-               goto err_inval;
-       }
+       if (!nc)
+               return -EINVAL;
 
        {
                u_int16_t attr_count =
@@ -274,73 +236,21 @@ static int nfnetlink_rcv_msg(struct sk_buff *skb,
 
                err = nfnetlink_check_attributes(ss, nlh, cda);
                if (err < 0)
-                       goto err_inval;
-
-               DEBUGP("calling handler\n");
-               err = nc->call(nfnl, skb, nlh, cda, errp);
-               *errp = err;
-               return err;
-       }
-
-err_inval:
-       DEBUGP("returning -EINVAL\n");
-       *errp = -EINVAL;
-       return -1;
-}
-
-/* Process one packet of messages. */
-static inline int nfnetlink_rcv_skb(struct sk_buff *skb)
-{
-       int err;
-       struct nlmsghdr *nlh;
-
-       while (skb->len >= NLMSG_SPACE(0)) {
-               u32 rlen;
-
-               nlh = (struct nlmsghdr *)skb->data;
-               if (nlh->nlmsg_len < sizeof(struct nlmsghdr)
-                   || skb->len < nlh->nlmsg_len)
-                       return 0;
-               rlen = NLMSG_ALIGN(nlh->nlmsg_len);
-               if (rlen > skb->len)
-                       rlen = skb->len;
-               if (nfnetlink_rcv_msg(skb, nlh, &err)) {
-                       if (!err)
-                               return -1;
-                       netlink_ack(skb, nlh, err);
-               } else
-                       if (nlh->nlmsg_flags & NLM_F_ACK)
-                               netlink_ack(skb, nlh, 0);
-               skb_pull(skb, rlen);
+                       return err;
+               return nc->call(nfnl, skb, nlh, cda);
        }
-
-       return 0;
 }
 
 static void nfnetlink_rcv(struct sock *sk, int len)
 {
-       do {
-               struct sk_buff *skb;
+       unsigned int qlen = 0;
 
-               if (nfnl_shlock_nowait())
+       do {
+               if (nfnl_trylock())
                        return;
-
-               while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
-                       if (nfnetlink_rcv_skb(skb)) {
-                               if (skb->len)
-                                       skb_queue_head(&sk->sk_receive_queue,
-                                                      skb);
-                               else
-                                       kfree_skb(skb);
-                               break;
-                       }
-                       kfree_skb(skb);
-               }
-
-               /* don't call nfnl_shunlock, since it would reenter
-                * with further packet processing */
-               up(&nfnl_sem);
-       } while(nfnl && nfnl->sk_receive_queue.qlen);
+               netlink_run_queue(sk, &qlen, nfnetlink_rcv_msg);
+               __nfnl_unlock();
+       } while (qlen);
 }
 
 static void __exit nfnetlink_exit(void)
@@ -355,7 +265,7 @@ static int __init nfnetlink_init(void)
        printk("Netfilter messages via NETLINK v%s.\n", nfversion);
 
        nfnl = netlink_kernel_create(NETLINK_NETFILTER, NFNLGRP_MAX,
-                                    nfnetlink_rcv, THIS_MODULE);
+                                    nfnetlink_rcv, NULL, THIS_MODULE);
        if (!nfnl) {
                printk(KERN_ERR "cannot initialize nfnetlink!\n");
                return -1;
@@ -366,10 +276,3 @@ static int __init nfnetlink_init(void)
 
 module_init(nfnetlink_init);
 module_exit(nfnetlink_exit);
-
-EXPORT_SYMBOL_GPL(nfnetlink_subsys_register);
-EXPORT_SYMBOL_GPL(nfnetlink_subsys_unregister);
-EXPORT_SYMBOL_GPL(nfnetlink_send);
-EXPORT_SYMBOL_GPL(nfnetlink_unicast);
-EXPORT_SYMBOL_GPL(nfattr_parse);
-EXPORT_SYMBOL_GPL(__nfa_fill);
index 5cb30eb..e32e30e 100644 (file)
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
- *
- * 2006-01-26 Harald Welte <laforge@netfilter.org>
- *     - Add optional local and global sequence number to detect lost
- *       events from userspace
- *
  */
 #include <linux/module.h>
 #include <linux/skbuff.h>
@@ -163,10 +158,7 @@ instance_create(u_int16_t group_num, int pid)
        /* needs to be two, since we _put() after creation */
        atomic_set(&inst->use, 2);
 
-       init_timer(&inst->timer);
-       inst->timer.function = nfulnl_timer;
-       inst->timer.data = (unsigned long)inst;
-       /* don't start timer yet. (re)start it  with every packet */
+       setup_timer(&inst->timer, nfulnl_timer, (unsigned long)inst);
 
        inst->peer_pid = pid;
        inst->group_num = group_num;
@@ -200,20 +192,14 @@ out_unlock:
 static int __nfulnl_send(struct nfulnl_instance *inst);
 
 static void
-_instance_destroy2(struct nfulnl_instance *inst, int lock)
+__instance_destroy(struct nfulnl_instance *inst)
 {
        /* first pull it out of the global list */
-       if (lock)
-               write_lock_bh(&instances_lock);
-
        UDEBUG("removing instance %p (queuenum=%u) from hash\n",
                inst, inst->group_num);
 
        hlist_del(&inst->hlist);
 
-       if (lock)
-               write_unlock_bh(&instances_lock);
-
        /* then flush all pending packets from skb */
 
        spin_lock_bh(&inst->lock);
@@ -234,16 +220,12 @@ _instance_destroy2(struct nfulnl_instance *inst, int lock)
        instance_put(inst);
 }
 
-static inline void
-__instance_destroy(struct nfulnl_instance *inst)
-{
-       _instance_destroy2(inst, 0);
-}
-
 static inline void
 instance_destroy(struct nfulnl_instance *inst)
 {
-       _instance_destroy2(inst, 1);
+       write_lock_bh(&instances_lock);
+       __instance_destroy(inst);
+       write_unlock_bh(&instances_lock);
 }
 
 static int
@@ -365,9 +347,6 @@ __nfulnl_send(struct nfulnl_instance *inst)
 {
        int status;
 
-       if (!inst->skb)
-               return 0;
-
        if (inst->qlen > 1)
                inst->lastnlh->nlmsg_type = NLMSG_DONE;
 
@@ -391,7 +370,8 @@ static void nfulnl_timer(unsigned long data)
        UDEBUG("timer function called, flushing buffer\n");
 
        spin_lock_bh(&inst->lock);
-       __nfulnl_send(inst);
+       if (inst->skb)
+               __nfulnl_send(inst);
        spin_unlock_bh(&inst->lock);
        instance_put(inst);
 }
@@ -409,15 +389,14 @@ __build_packet_message(struct nfulnl_instance *inst,
                        const struct nf_loginfo *li,
                        const char *prefix, unsigned int plen)
 {
-       unsigned char *old_tail;
        struct nfulnl_msg_packet_hdr pmsg;
        struct nlmsghdr *nlh;
        struct nfgenmsg *nfmsg;
        __be32 tmp_uint;
+       sk_buff_data_t old_tail = inst->skb->tail;
 
        UDEBUG("entered\n");
 
-       old_tail = inst->skb->tail;
        nlh = NLMSG_PUT(inst->skb, 0, 0,
                        NFNL_SUBSYS_ULOG << 8 | NFULNL_MSG_PACKET,
                        sizeof(struct nfgenmsg));
@@ -509,11 +488,11 @@ __build_packet_message(struct nfulnl_instance *inst,
                NFA_PUT(inst->skb, NFULA_HWADDR, sizeof(phw), &phw);
        }
 
-       if (skb->tstamp.off_sec) {
+       if (skb->tstamp.tv64) {
                struct nfulnl_msg_packet_timestamp ts;
-
-               ts.sec = cpu_to_be64(skb->tstamp.off_sec);
-               ts.usec = cpu_to_be64(skb->tstamp.off_usec);
+               struct timeval tv = ktime_to_timeval(skb->tstamp);
+               ts.sec = cpu_to_be64(tv.tv_sec);
+               ts.usec = cpu_to_be64(tv.tv_usec);
 
                NFA_PUT(inst->skb, NFULA_TIMESTAMP, sizeof(ts), &ts);
        }
@@ -596,7 +575,6 @@ nfulnl_log_packet(unsigned int pf,
        struct nfulnl_instance *inst;
        const struct nf_loginfo *li;
        unsigned int qthreshold;
-       unsigned int nlbufsiz;
        unsigned int plen;
 
        if (li_user && li_user->type == NF_LOG_TYPE_ULOG)
@@ -606,12 +584,7 @@ nfulnl_log_packet(unsigned int pf,
 
        inst = instance_lookup_get(li->u.ulog.group);
        if (!inst)
-               inst = instance_lookup_get(0);
-       if (!inst) {
-               PRINTR("nfnetlink_log: trying to log packet, "
-                       "but no instance for group %u\n", li->u.ulog.group);
                return;
-       }
 
        plen = 0;
        if (prefix)
@@ -667,24 +640,11 @@ nfulnl_log_packet(unsigned int pf,
                break;
 
        default:
-               spin_unlock_bh(&inst->lock);
-               instance_put(inst);
-               return;
+               goto unlock_and_release;
        }
 
-       if (size > inst->nlbufsiz)
-               nlbufsiz = size;
-       else
-               nlbufsiz = inst->nlbufsiz;
-
-       if (!inst->skb) {
-               if (!(inst->skb = nfulnl_alloc_skb(nlbufsiz, size))) {
-                       UDEBUG("error in nfulnl_alloc_skb(%u, %u)\n",
-                               inst->nlbufsiz, size);
-                       goto alloc_failure;
-               }
-       } else if (inst->qlen >= qthreshold ||
-                  size > skb_tailroom(inst->skb)) {
+       if (inst->qlen >= qthreshold ||
+           (inst->skb && size > skb_tailroom(inst->skb))) {
                /* either the queue len is too high or we don't have
                 * enough room in the skb left. flush to userspace. */
                UDEBUG("flushing old skb\n");
@@ -693,12 +653,12 @@ nfulnl_log_packet(unsigned int pf,
                if (del_timer(&inst->timer))
                        instance_put(inst);
                __nfulnl_send(inst);
+       }
 
-               if (!(inst->skb = nfulnl_alloc_skb(nlbufsiz, size))) {
-                       UDEBUG("error in nfulnl_alloc_skb(%u, %u)\n",
-                               inst->nlbufsiz, size);
+       if (!inst->skb) {
+               inst->skb = nfulnl_alloc_skb(inst->nlbufsiz, size);
+               if (!inst->skb)
                        goto alloc_failure;
-               }
        }
 
        UDEBUG("qlen %d, qthreshold %d\n", inst->qlen, qthreshold);
@@ -760,7 +720,7 @@ static struct notifier_block nfulnl_rtnl_notifier = {
 
 static int
 nfulnl_recv_unsupp(struct sock *ctnl, struct sk_buff *skb,
-                 struct nlmsghdr *nlh, struct nfattr *nfqa[], int *errp)
+                 struct nlmsghdr *nlh, struct nfattr *nfqa[])
 {
        return -ENOTSUPP;
 }
@@ -798,7 +758,7 @@ static const int nfula_cfg_min[NFULA_CFG_MAX] = {
 
 static int
 nfulnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
-                  struct nlmsghdr *nlh, struct nfattr *nfula[], int *errp)
+                  struct nlmsghdr *nlh, struct nfattr *nfula[])
 {
        struct nfgenmsg *nfmsg = NLMSG_DATA(nlh);
        u_int16_t group_num = ntohs(nfmsg->res_id);
@@ -830,13 +790,13 @@ nfulnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
                                               NETLINK_CB(skb).pid);
                        if (!inst) {
                                ret = -EINVAL;
-                               goto out_put;
+                               goto out;
                        }
                        break;
                case NFULNL_CFG_CMD_UNBIND:
                        if (!inst) {
                                ret = -ENODEV;
-                               goto out_put;
+                               goto out;
                        }
 
                        if (inst->peer_pid != NETLINK_CB(skb).pid) {
@@ -845,7 +805,7 @@ nfulnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
                        }
 
                        instance_destroy(inst);
-                       break;
+                       goto out;
                case NFULNL_CFG_CMD_PF_BIND:
                        UDEBUG("registering log handler for pf=%u\n", pf);
                        ret = nf_log_register(pf, &nfulnl_logger);
@@ -869,7 +829,7 @@ nfulnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
                                "group=%u pid=%u =>ENOENT\n",
                                group_num, NETLINK_CB(skb).pid);
                        ret = -ENOENT;
-                       goto out_put;
+                       goto out;
                }
 
                if (inst->peer_pid != NETLINK_CB(skb).pid) {
@@ -939,10 +899,8 @@ struct iter_state {
        unsigned int bucket;
 };
 
-static struct hlist_node *get_first(struct seq_file *seq)
+static struct hlist_node *get_first(struct iter_state *st)
 {
-       struct iter_state *st = seq->private;
-
        if (!st)
                return NULL;
 
@@ -953,10 +911,8 @@ static struct hlist_node *get_first(struct seq_file *seq)
        return NULL;
 }
 
-static struct hlist_node *get_next(struct seq_file *seq, struct hlist_node *h)
+static struct hlist_node *get_next(struct iter_state *st, struct hlist_node *h)
 {
-       struct iter_state *st = seq->private;
-
        h = h->next;
        while (!h) {
                if (++st->bucket >= INSTANCE_BUCKETS)
@@ -967,13 +923,13 @@ static struct hlist_node *get_next(struct seq_file *seq, struct hlist_node *h)
        return h;
 }
 
-static struct hlist_node *get_idx(struct seq_file *seq, loff_t pos)
+static struct hlist_node *get_idx(struct iter_state *st, loff_t pos)
 {
        struct hlist_node *head;
-       head = get_first(seq);
+       head = get_first(st);
 
        if (head)
-               while (pos && (head = get_next(seq, head)))
+               while (pos && (head = get_next(st, head)))
                        pos--;
        return pos ? NULL : head;
 }
@@ -981,13 +937,13 @@ static struct hlist_node *get_idx(struct seq_file *seq, loff_t pos)
 static void *seq_start(struct seq_file *seq, loff_t *pos)
 {
        read_lock_bh(&instances_lock);
-       return get_idx(seq, *pos);
+       return get_idx(seq->private, *pos);
 }
 
 static void *seq_next(struct seq_file *s, void *v, loff_t *pos)
 {
        (*pos)++;
-       return get_next(s, v);
+       return get_next(s->private, v);
 }
 
 static void seq_stop(struct seq_file *s, void *v)
index d9ce4a7..7a97bec 100644 (file)
@@ -338,7 +338,7 @@ static struct sk_buff *
 nfqnl_build_packet_message(struct nfqnl_instance *queue,
                           struct nfqnl_queue_entry *entry, int *errp)
 {
-       unsigned char *old_tail;
+       sk_buff_data_t old_tail;
        size_t size;
        size_t data_len = 0;
        struct sk_buff *skb;
@@ -404,7 +404,7 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
        if (!skb)
                goto nlmsg_failure;
 
-       old_tail= skb->tail;
+       old_tail = skb->tail;
        nlh = NLMSG_PUT(skb, 0, 0,
                        NFNL_SUBSYS_QUEUE << 8 | NFQNL_MSG_PACKET,
                        sizeof(struct nfgenmsg));
@@ -495,11 +495,11 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
                NFA_PUT(skb, NFQA_HWADDR, sizeof(phw), &phw);
        }
 
-       if (entskb->tstamp.off_sec) {
+       if (entskb->tstamp.tv64) {
                struct nfqnl_msg_packet_timestamp ts;
-
-               ts.sec = cpu_to_be64(entskb->tstamp.off_sec);
-               ts.usec = cpu_to_be64(entskb->tstamp.off_usec);
+               struct timeval tv = ktime_to_timeval(entskb->tstamp);
+               ts.sec = cpu_to_be64(tv.tv_sec);
+               ts.usec = cpu_to_be64(tv.tv_usec);
 
                NFA_PUT(skb, NFQA_TIMESTAMP, sizeof(ts), &ts);
        }
@@ -648,7 +648,7 @@ nfqnl_mangle(void *data, int data_len, struct nfqnl_queue_entry *e)
        }
        if (!skb_make_writable(&e->skb, data_len))
                return -ENOMEM;
-       memcpy(e->skb->data, data, data_len);
+       skb_copy_to_linear_data(e->skb, data, data_len);
        e->skb->ip_summed = CHECKSUM_NONE;
        return 0;
 }
@@ -783,7 +783,7 @@ static const int nfqa_verdict_min[NFQA_MAX] = {
 
 static int
 nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb,
-                  struct nlmsghdr *nlh, struct nfattr *nfqa[], int *errp)
+                  struct nlmsghdr *nlh, struct nfattr *nfqa[])
 {
        struct nfgenmsg *nfmsg = NLMSG_DATA(nlh);
        u_int16_t queue_num = ntohs(nfmsg->res_id);
@@ -848,7 +848,7 @@ err_out_put:
 
 static int
 nfqnl_recv_unsupp(struct sock *ctnl, struct sk_buff *skb,
-                 struct nlmsghdr *nlh, struct nfattr *nfqa[], int *errp)
+                 struct nlmsghdr *nlh, struct nfattr *nfqa[])
 {
        return -ENOTSUPP;
 }
@@ -865,7 +865,7 @@ static struct nf_queue_handler nfqh = {
 
 static int
 nfqnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
-                 struct nlmsghdr *nlh, struct nfattr *nfqa[], int *errp)
+                 struct nlmsghdr *nlh, struct nfattr *nfqa[])
 {
        struct nfgenmsg *nfmsg = NLMSG_DATA(nlh);
        u_int16_t queue_num = ntohs(nfmsg->res_id);
index ec607a4..0eb2504 100644 (file)
@@ -56,8 +56,8 @@ enum {
 };
 
 static const char *xt_prefix[NPROTO] = {
-       [AF_INET]       = "ip",
-       [AF_INET6]      = "ip6",
+       [AF_INET]       = "ip",
+       [AF_INET6]      = "ip6",
        [NF_ARP]        = "arp",
 };
 
@@ -651,12 +651,6 @@ void *xt_unregister_table(struct xt_table *table)
 EXPORT_SYMBOL_GPL(xt_unregister_table);
 
 #ifdef CONFIG_PROC_FS
-static char *xt_proto_prefix[NPROTO] = {
-       [AF_INET]       = "ip",
-       [AF_INET6]      = "ip6",
-       [NF_ARP]        = "arp",
-};
-
 static struct list_head *xt_get_idx(struct list_head *list, struct seq_file *seq, loff_t pos)
 {
        struct list_head *head = list->next;
@@ -798,7 +792,7 @@ int xt_proto_init(int af)
 
 
 #ifdef CONFIG_PROC_FS
-       strlcpy(buf, xt_proto_prefix[af], sizeof(buf));
+       strlcpy(buf, xt_prefix[af], sizeof(buf));
        strlcat(buf, FORMAT_TABLES, sizeof(buf));
        proc = proc_net_fops_create(buf, 0440, &xt_file_ops);
        if (!proc)
@@ -806,14 +800,14 @@ int xt_proto_init(int af)
        proc->data = (void *) ((unsigned long) af | (TABLE << 16));
 
 
-       strlcpy(buf, xt_proto_prefix[af], sizeof(buf));
+       strlcpy(buf, xt_prefix[af], sizeof(buf));
        strlcat(buf, FORMAT_MATCHES, sizeof(buf));
        proc = proc_net_fops_create(buf, 0440, &xt_file_ops);
        if (!proc)
                goto out_remove_tables;
        proc->data = (void *) ((unsigned long) af | (MATCH << 16));
 
-       strlcpy(buf, xt_proto_prefix[af], sizeof(buf));
+       strlcpy(buf, xt_prefix[af], sizeof(buf));
        strlcat(buf, FORMAT_TARGETS, sizeof(buf));
        proc = proc_net_fops_create(buf, 0440, &xt_file_ops);
        if (!proc)
@@ -825,12 +819,12 @@ int xt_proto_init(int af)
 
 #ifdef CONFIG_PROC_FS
 out_remove_matches:
-       strlcpy(buf, xt_proto_prefix[af], sizeof(buf));
+       strlcpy(buf, xt_prefix[af], sizeof(buf));
        strlcat(buf, FORMAT_MATCHES, sizeof(buf));
        proc_net_remove(buf);
 
 out_remove_tables:
-       strlcpy(buf, xt_proto_prefix[af], sizeof(buf));
+       strlcpy(buf, xt_prefix[af], sizeof(buf));
        strlcat(buf, FORMAT_TABLES, sizeof(buf));
        proc_net_remove(buf);
 out:
@@ -844,15 +838,15 @@ void xt_proto_fini(int af)
 #ifdef CONFIG_PROC_FS
        char buf[XT_FUNCTION_MAXNAMELEN];
 
-       strlcpy(buf, xt_proto_prefix[af], sizeof(buf));
+       strlcpy(buf, xt_prefix[af], sizeof(buf));
        strlcat(buf, FORMAT_TABLES, sizeof(buf));
        proc_net_remove(buf);
 
-       strlcpy(buf, xt_proto_prefix[af], sizeof(buf));
+       strlcpy(buf, xt_prefix[af], sizeof(buf));
        strlcat(buf, FORMAT_TARGETS, sizeof(buf));
        proc_net_remove(buf);
 
-       strlcpy(buf, xt_proto_prefix[af], sizeof(buf));
+       strlcpy(buf, xt_prefix[af], sizeof(buf));
        strlcat(buf, FORMAT_MATCHES, sizeof(buf));
        proc_net_remove(buf);
 #endif /*CONFIG_PROC_FS*/
index 795c058..b03ce00 100644 (file)
@@ -30,10 +30,7 @@ MODULE_ALIAS("ipt_CONNMARK");
 
 #include <linux/netfilter/x_tables.h>
 #include <linux/netfilter/xt_CONNMARK.h>
-#include <net/netfilter/nf_conntrack_compat.h>
-#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 #include <net/netfilter/nf_conntrack_ecache.h>
-#endif
 
 static unsigned int
 target(struct sk_buff **pskb,
@@ -44,40 +41,33 @@ target(struct sk_buff **pskb,
        const void *targinfo)
 {
        const struct xt_connmark_target_info *markinfo = targinfo;
+       struct nf_conn *ct;
+       enum ip_conntrack_info ctinfo;
        u_int32_t diff;
        u_int32_t mark;
        u_int32_t newmark;
-       u_int32_t ctinfo;
-       u_int32_t *ctmark = nf_ct_get_mark(*pskb, &ctinfo);
 
-       if (ctmark) {
+       ct = nf_ct_get(*pskb, &ctinfo);
+       if (ct) {
                switch(markinfo->mode) {
                case XT_CONNMARK_SET:
-                       newmark = (*ctmark & ~markinfo->mask) | markinfo->mark;
-                       if (newmark != *ctmark) {
-                               *ctmark = newmark;
-#if defined(CONFIG_IP_NF_CONNTRACK) || defined(CONFIG_IP_NF_CONNTRACK_MODULE)
-                               ip_conntrack_event_cache(IPCT_MARK, *pskb);
-#else
+                       newmark = (ct->mark & ~markinfo->mask) | markinfo->mark;
+                       if (newmark != ct->mark) {
+                               ct->mark = newmark;
                                nf_conntrack_event_cache(IPCT_MARK, *pskb);
-#endif
                        }
                        break;
                case XT_CONNMARK_SAVE:
-                       newmark = (*ctmark & ~markinfo->mask) |
+                       newmark = (ct->mark & ~markinfo->mask) |
                                  ((*pskb)->mark & markinfo->mask);
-                       if (*ctmark != newmark) {
-                               *ctmark = newmark;
-#if defined(CONFIG_IP_NF_CONNTRACK) || defined(CONFIG_IP_NF_CONNTRACK_MODULE)
-                               ip_conntrack_event_cache(IPCT_MARK, *pskb);
-#else
+                       if (ct->mark != newmark) {
+                               ct->mark = newmark;
                                nf_conntrack_event_cache(IPCT_MARK, *pskb);
-#endif
                        }
                        break;
                case XT_CONNMARK_RESTORE:
                        mark = (*pskb)->mark;
-                       diff = (*ctmark ^ mark) & markinfo->mask;
+                       diff = (ct->mark ^ mark) & markinfo->mask;
                        (*pskb)->mark = mark ^ diff;
                        break;
                }
index 1ab0db6..81c0c58 100644 (file)
@@ -19,7 +19,7 @@
 #include <linux/skbuff.h>
 #include <linux/netfilter/x_tables.h>
 #include <linux/netfilter/xt_CONNSECMARK.h>
-#include <net/netfilter/nf_conntrack_compat.h>
+#include <net/netfilter/nf_conntrack.h>
 
 #define PFX "CONNSECMARK: "
 
@@ -36,12 +36,12 @@ MODULE_ALIAS("ip6t_CONNSECMARK");
 static void secmark_save(struct sk_buff *skb)
 {
        if (skb->secmark) {
-               u32 *connsecmark;
+               struct nf_conn *ct;
                enum ip_conntrack_info ctinfo;
 
-               connsecmark = nf_ct_get_secmark(skb, &ctinfo);
-               if (connsecmark && !*connsecmark)
-                       *connsecmark = skb->secmark;
+               ct = nf_ct_get(skb, &ctinfo);
+               if (ct && !ct->secmark)
+                       ct->secmark = skb->secmark;
        }
 }
 
@@ -52,12 +52,12 @@ static void secmark_save(struct sk_buff *skb)
 static void secmark_restore(struct sk_buff *skb)
 {
        if (!skb->secmark) {
-               u32 *connsecmark;
+               struct nf_conn *ct;
                enum ip_conntrack_info ctinfo;
 
-               connsecmark = nf_ct_get_secmark(skb, &ctinfo);
-               if (connsecmark && *connsecmark)
-                       skb->secmark = *connsecmark;
+               ct = nf_ct_get(skb, &ctinfo);
+               if (ct && ct->secmark)
+                       skb->secmark = ct->secmark;
        }
 }
 
index a7cc75a..9f2f220 100644 (file)
@@ -8,8 +8,6 @@
  * published by the Free Software Foundation.
  *
  * See RFC2474 for a description of the DSCP field within the IP Header.
- *
- * xt_DSCP.c,v 1.8 2002/08/06 18:41:57 laforge Exp
 */
 
 #include <linux/module.h>
@@ -35,13 +33,13 @@ static unsigned int target(struct sk_buff **pskb,
                           const void *targinfo)
 {
        const struct xt_DSCP_info *dinfo = targinfo;
-       u_int8_t dscp = ipv4_get_dsfield((*pskb)->nh.iph) >> XT_DSCP_SHIFT;
+       u_int8_t dscp = ipv4_get_dsfield(ip_hdr(*pskb)) >> XT_DSCP_SHIFT;
 
        if (dscp != dinfo->dscp) {
                if (!skb_make_writable(pskb, sizeof(struct iphdr)))
                        return NF_DROP;
 
-               ipv4_change_dsfield((*pskb)->nh.iph, (__u8)(~XT_DSCP_MASK),
+               ipv4_change_dsfield(ip_hdr(*pskb), (__u8)(~XT_DSCP_MASK),
                                    dinfo->dscp << XT_DSCP_SHIFT);
 
        }
@@ -56,13 +54,13 @@ static unsigned int target6(struct sk_buff **pskb,
                            const void *targinfo)
 {
        const struct xt_DSCP_info *dinfo = targinfo;
-       u_int8_t dscp = ipv6_get_dsfield((*pskb)->nh.ipv6h) >> XT_DSCP_SHIFT;
+       u_int8_t dscp = ipv6_get_dsfield(ipv6_hdr(*pskb)) >> XT_DSCP_SHIFT;
 
        if (dscp != dinfo->dscp) {
                if (!skb_make_writable(pskb, sizeof(struct ipv6hdr)))
                        return NF_DROP;
 
-               ipv6_change_dsfield((*pskb)->nh.ipv6h, (__u8)(~XT_DSCP_MASK),
+               ipv6_change_dsfield(ipv6_hdr(*pskb), (__u8)(~XT_DSCP_MASK),
                                    dinfo->dscp << XT_DSCP_SHIFT);
        }
        return XT_CONTINUE;
index b874a20..5085fb3 100644 (file)
@@ -5,7 +5,7 @@
 #include <linux/skbuff.h>
 
 #include <linux/netfilter/x_tables.h>
-#include <net/netfilter/nf_conntrack_compat.h>
+#include <net/netfilter/nf_conntrack.h>
 
 MODULE_LICENSE("GPL");
 MODULE_ALIAS("ipt_NOTRACK");
@@ -26,7 +26,7 @@ target(struct sk_buff **pskb,
           If there is a real ct entry correspondig to this packet,
           it'll hang aroun till timing out. We don't deal with it
           for performance reasons. JK */
-       nf_ct_untrack(*pskb);
+       (*pskb)->nfct = &nf_conntrack_untracked.ct_general;
        (*pskb)->nfctinfo = IP_CT_NEW;
        nf_conntrack_get((*pskb)->nfct);
 
index db7e38c..15fe8f6 100644 (file)
@@ -54,7 +54,7 @@ tcpmss_mangle_packet(struct sk_buff **pskb,
                return -1;
 
        tcplen = (*pskb)->len - tcphoff;
-       tcph = (struct tcphdr *)((*pskb)->nh.raw + tcphoff);
+       tcph = (struct tcphdr *)(skb_network_header(*pskb) + tcphoff);
 
        /* Since it passed flags test in tcp match, we know it is is
           not a fragment, and has data >= tcp header length.  SYN
@@ -113,7 +113,7 @@ tcpmss_mangle_packet(struct sk_buff **pskb,
                        return -1;
                kfree_skb(*pskb);
                *pskb = newskb;
-               tcph = (struct tcphdr *)((*pskb)->nh.raw + tcphoff);
+               tcph = (struct tcphdr *)(skb_network_header(*pskb) + tcphoff);
        }
 
        skb_put((*pskb), TCPOLEN_MSS);
@@ -145,7 +145,7 @@ xt_tcpmss_target4(struct sk_buff **pskb,
                  const struct xt_target *target,
                  const void *targinfo)
 {
-       struct iphdr *iph = (*pskb)->nh.iph;
+       struct iphdr *iph = ip_hdr(*pskb);
        __be16 newlen;
        int ret;
 
@@ -154,7 +154,7 @@ xt_tcpmss_target4(struct sk_buff **pskb,
        if (ret < 0)
                return NF_DROP;
        if (ret > 0) {
-               iph = (*pskb)->nh.iph;
+               iph = ip_hdr(*pskb);
                newlen = htons(ntohs(iph->tot_len) + ret);
                nf_csum_replace2(&iph->check, iph->tot_len, newlen);
                iph->tot_len = newlen;
@@ -171,7 +171,7 @@ xt_tcpmss_target6(struct sk_buff **pskb,
                  const struct xt_target *target,
                  const void *targinfo)
 {
-       struct ipv6hdr *ipv6h = (*pskb)->nh.ipv6h;
+       struct ipv6hdr *ipv6h = ipv6_hdr(*pskb);
        u8 nexthdr;
        int tcphoff;
        int ret;
@@ -187,7 +187,7 @@ xt_tcpmss_target6(struct sk_buff **pskb,
        if (ret < 0)
                return NF_DROP;
        if (ret > 0) {
-               ipv6h = (*pskb)->nh.ipv6h;
+               ipv6h = ipv6_hdr(*pskb);
                ipv6h->payload_len = htons(ntohs(ipv6h->payload_len) + ret);
        }
        return XT_CONTINUE;
index 5e32dfa..804afe5 100644 (file)
@@ -1,20 +1,11 @@
 /* Kernel module to match connection tracking byte counter.
  * GPL (C) 2002 Martin Devera (devik@cdi.cz).
- *
- * 2004-07-20 Harald Welte <laforge@netfilter.org>
- *     - reimplemented to use per-connection accounting counters
- *     - add functionality to match number of packets
- *     - add functionality to match average packet size
- *     - add support to match directions seperately
- * 2005-10-16 Harald Welte <laforge@netfilter.org>
- *     - Port to x_tables
- *
  */
 #include <linux/module.h>
 #include <linux/skbuff.h>
-#include <net/netfilter/nf_conntrack_compat.h>
 #include <linux/netfilter/x_tables.h>
 #include <linux/netfilter/xt_connbytes.h>
+#include <net/netfilter/nf_conntrack.h>
 
 #include <asm/div64.h>
 #include <asm/bitops.h>
@@ -24,22 +15,6 @@ MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
 MODULE_DESCRIPTION("iptables match for matching number of pkts/bytes per connection");
 MODULE_ALIAS("ipt_connbytes");
 
-/* 64bit divisor, dividend and result. dynamic precision */
-static u_int64_t div64_64(u_int64_t dividend, u_int64_t divisor)
-{
-       u_int32_t d = divisor;
-
-       if (divisor > 0xffffffffULL) {
-               unsigned int shift = fls(divisor >> 32);
-
-               d = divisor >> shift;
-               dividend >>= shift;
-       }
-
-       do_div(dividend, d);
-       return dividend;
-}
-
 static int
 match(const struct sk_buff *skb,
       const struct net_device *in,
@@ -51,13 +26,17 @@ match(const struct sk_buff *skb,
       int *hotdrop)
 {
        const struct xt_connbytes_info *sinfo = matchinfo;
+       struct nf_conn *ct;
+       enum ip_conntrack_info ctinfo;
        u_int64_t what = 0;     /* initialize to make gcc happy */
        u_int64_t bytes = 0;
        u_int64_t pkts = 0;
        const struct ip_conntrack_counter *counters;
 
-       if (!(counters = nf_ct_get_counters(skb)))
-               return 0; /* no match */
+       ct = nf_ct_get(skb, &ctinfo);
+       if (!ct)
+               return 0;
+       counters = ct->counters;
 
        switch (sinfo->what) {
        case XT_CONNBYTES_PKTS:
index 36c2def..e180325 100644 (file)
 
 #include <linux/module.h>
 #include <linux/skbuff.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_connmark.h>
 
 MODULE_AUTHOR("Henrik Nordstrom <hno@marasytems.com>");
 MODULE_DESCRIPTION("IP tables connmark match module");
 MODULE_LICENSE("GPL");
 MODULE_ALIAS("ipt_connmark");
 
-#include <linux/netfilter/x_tables.h>
-#include <linux/netfilter/xt_connmark.h>
-#include <net/netfilter/nf_conntrack_compat.h>
-
 static int
 match(const struct sk_buff *skb,
       const struct net_device *in,
@@ -42,12 +41,14 @@ match(const struct sk_buff *skb,
       int *hotdrop)
 {
        const struct xt_connmark_info *info = matchinfo;
-       u_int32_t ctinfo;
-       const u_int32_t *ctmark = nf_ct_get_mark(skb, &ctinfo);
-       if (!ctmark)
+       struct nf_conn *ct;
+       enum ip_conntrack_info ctinfo;
+
+       ct = nf_ct_get(skb, &ctinfo);
+       if (!ct)
                return 0;
 
-       return (((*ctmark) & info->mask) == info->mark) ^ info->invert;
+       return (((ct->mark) & info->mask) == info->mark) ^ info->invert;
 }
 
 static int
index 2885c37..f4ea8fe 100644 (file)
 
 #include <linux/module.h>
 #include <linux/skbuff.h>
-
-#if defined(CONFIG_IP_NF_CONNTRACK) || defined(CONFIG_IP_NF_CONNTRACK_MODULE)
-#include <linux/netfilter_ipv4/ip_conntrack.h>
-#include <linux/netfilter_ipv4/ip_conntrack_tuple.h>
-#else
-#include <net/netfilter/nf_conntrack.h>
-#endif
-
 #include <linux/netfilter/x_tables.h>
 #include <linux/netfilter/xt_conntrack.h>
-#include <net/netfilter/nf_conntrack_compat.h>
+#include <net/netfilter/nf_conntrack.h>
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Marc Boucher <marc@mbsi.ca>");
 MODULE_DESCRIPTION("iptables connection tracking match module");
 MODULE_ALIAS("ipt_conntrack");
 
-#if defined(CONFIG_IP_NF_CONNTRACK) || defined(CONFIG_IP_NF_CONNTRACK_MODULE)
-
-static int
-match(const struct sk_buff *skb,
-      const struct net_device *in,
-      const struct net_device *out,
-      const struct xt_match *match,
-      const void *matchinfo,
-      int offset,
-      unsigned int protoff,
-      int *hotdrop)
-{
-       const struct xt_conntrack_info *sinfo = matchinfo;
-       struct ip_conntrack *ct;
-       enum ip_conntrack_info ctinfo;
-       unsigned int statebit;
-
-       ct = ip_conntrack_get((struct sk_buff *)skb, &ctinfo);
-
-#define FWINV(bool, invflg) ((bool) ^ !!(sinfo->invflags & invflg))
-
-       if (ct == &ip_conntrack_untracked)
-               statebit = XT_CONNTRACK_STATE_UNTRACKED;
-       else if (ct)
-               statebit = XT_CONNTRACK_STATE_BIT(ctinfo);
-       else
-               statebit = XT_CONNTRACK_STATE_INVALID;
-
-       if (sinfo->flags & XT_CONNTRACK_STATE) {
-               if (ct) {
-                       if (test_bit(IPS_SRC_NAT_BIT, &ct->status))
-                               statebit |= XT_CONNTRACK_STATE_SNAT;
-                       if (test_bit(IPS_DST_NAT_BIT, &ct->status))
-                               statebit |= XT_CONNTRACK_STATE_DNAT;
-               }
-               if (FWINV((statebit & sinfo->statemask) == 0,
-                         XT_CONNTRACK_STATE))
-                       return 0;
-       }
-
-       if (ct == NULL) {
-               if (sinfo->flags & ~XT_CONNTRACK_STATE)
-                       return 0;
-               return 1;
-       }
-
-       if (sinfo->flags & XT_CONNTRACK_PROTO &&
-           FWINV(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum !=
-                 sinfo->tuple[IP_CT_DIR_ORIGINAL].dst.protonum,
-                 XT_CONNTRACK_PROTO))
-               return 0;
-
-       if (sinfo->flags & XT_CONNTRACK_ORIGSRC &&
-           FWINV((ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.ip &
-                  sinfo->sipmsk[IP_CT_DIR_ORIGINAL].s_addr) !=
-                 sinfo->tuple[IP_CT_DIR_ORIGINAL].src.ip,
-                 XT_CONNTRACK_ORIGSRC))
-               return 0;
-
-       if (sinfo->flags & XT_CONNTRACK_ORIGDST &&
-           FWINV((ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.ip &
-                  sinfo->dipmsk[IP_CT_DIR_ORIGINAL].s_addr) !=
-                 sinfo->tuple[IP_CT_DIR_ORIGINAL].dst.ip,
-                 XT_CONNTRACK_ORIGDST))
-               return 0;
-
-       if (sinfo->flags & XT_CONNTRACK_REPLSRC &&
-           FWINV((ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.ip &
-                  sinfo->sipmsk[IP_CT_DIR_REPLY].s_addr) !=
-                 sinfo->tuple[IP_CT_DIR_REPLY].src.ip,
-                 XT_CONNTRACK_REPLSRC))
-               return 0;
-
-       if (sinfo->flags & XT_CONNTRACK_REPLDST &&
-           FWINV((ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.ip &
-                  sinfo->dipmsk[IP_CT_DIR_REPLY].s_addr) !=
-                 sinfo->tuple[IP_CT_DIR_REPLY].dst.ip,
-                 XT_CONNTRACK_REPLDST))
-               return 0;
-
-       if (sinfo->flags & XT_CONNTRACK_STATUS &&
-           FWINV((ct->status & sinfo->statusmask) == 0,
-                 XT_CONNTRACK_STATUS))
-               return 0;
-
-       if (sinfo->flags & XT_CONNTRACK_EXPIRES) {
-               unsigned long expires = timer_pending(&ct->timeout) ?
-                                       (ct->timeout.expires - jiffies)/HZ : 0;
-
-               if (FWINV(!(expires >= sinfo->expires_min &&
-                           expires <= sinfo->expires_max),
-                         XT_CONNTRACK_EXPIRES))
-                       return 0;
-       }
-       return 1;
-}
-
-#else /* CONFIG_IP_NF_CONNTRACK */
 static int
 match(const struct sk_buff *skb,
       const struct net_device *in,
@@ -220,8 +114,6 @@ match(const struct sk_buff *skb,
        return 1;
 }
 
-#endif /* CONFIG_NF_IP_CONNTRACK */
-
 static int
 checkentry(const char *tablename,
           const void *ip,
index 26c7f4a..56b247e 100644 (file)
@@ -1,6 +1,4 @@
 /* IP tables module for matching the value of the IPv4/IPv6 DSCP field
- *
- * xt_dscp.c,v 1.3 2002/08/05 19:00:21 laforge Exp
  *
  * (C) 2002 by Harald Welte <laforge@netfilter.org>
  *
@@ -34,7 +32,7 @@ static int match(const struct sk_buff *skb,
                 int *hotdrop)
 {
        const struct xt_dscp_info *info = matchinfo;
-       u_int8_t dscp = ipv4_get_dsfield(skb->nh.iph) >> XT_DSCP_SHIFT;
+       u_int8_t dscp = ipv4_get_dsfield(ip_hdr(skb)) >> XT_DSCP_SHIFT;
 
        return (dscp == info->dscp) ^ !!info->invert;
 }
@@ -49,7 +47,7 @@ static int match6(const struct sk_buff *skb,
                  int *hotdrop)
 {
        const struct xt_dscp_info *info = matchinfo;
-       u_int8_t dscp = ipv6_get_dsfield(skb->nh.ipv6h) >> XT_DSCP_SHIFT;
+       u_int8_t dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> XT_DSCP_SHIFT;
 
        return (dscp == info->dscp) ^ !!info->invert;
 }
index 9f37d59..d3043fa 100644 (file)
@@ -216,10 +216,8 @@ static int htable_create(struct xt_hashlimit_info *minfo, int family)
        hinfo->pde->proc_fops = &dl_file_ops;
        hinfo->pde->data = hinfo;
 
-       init_timer(&hinfo->timer);
+       setup_timer(&hinfo->timer, htable_gc, (unsigned long )hinfo);
        hinfo->timer.expires = jiffies + msecs_to_jiffies(hinfo->cfg.gc_interval);
-       hinfo->timer.data = (unsigned long )hinfo;
-       hinfo->timer.function = htable_gc;
        add_timer(&hinfo->timer);
 
        spin_lock_bh(&hashlimit_lock);
@@ -380,22 +378,22 @@ hashlimit_init_dst(struct xt_hashlimit_htable *hinfo, struct dsthash_dst *dst,
        switch (hinfo->family) {
        case AF_INET:
                if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_DIP)
-                       dst->addr.ip.dst = skb->nh.iph->daddr;
+                       dst->addr.ip.dst = ip_hdr(skb)->daddr;
                if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_SIP)
-                       dst->addr.ip.src = skb->nh.iph->saddr;
+                       dst->addr.ip.src = ip_hdr(skb)->saddr;
 
                if (!(hinfo->cfg.mode &
                      (XT_HASHLIMIT_HASH_DPT | XT_HASHLIMIT_HASH_SPT)))
                        return 0;
-               nexthdr = skb->nh.iph->protocol;
+               nexthdr = ip_hdr(skb)->protocol;
                break;
 #if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
        case AF_INET6:
                if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_DIP)
-                       memcpy(&dst->addr.ip6.dst, &skb->nh.ipv6h->daddr,
+                       memcpy(&dst->addr.ip6.dst, &ipv6_hdr(skb)->daddr,
                               sizeof(dst->addr.ip6.dst));
                if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_SIP)
-                       memcpy(&dst->addr.ip6.src, &skb->nh.ipv6h->saddr,
+                       memcpy(&dst->addr.ip6.src, &ipv6_hdr(skb)->saddr,
                               sizeof(dst->addr.ip6.src));
 
                if (!(hinfo->cfg.mode &
index 407d1d5..c139b2f 100644 (file)
@@ -5,26 +5,16 @@
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
- *
- *   19 Mar 2002 Harald Welte <laforge@gnumonks.org>:
- *              - Port to newnat infrastructure
  */
 
 #include <linux/module.h>
 #include <linux/skbuff.h>
 #include <linux/netfilter.h>
-#if defined(CONFIG_IP_NF_CONNTRACK) || defined(CONFIG_IP_NF_CONNTRACK_MODULE)
-#include <linux/netfilter_ipv4/ip_conntrack.h>
-#include <linux/netfilter_ipv4/ip_conntrack_core.h>
-#include <linux/netfilter_ipv4/ip_conntrack_helper.h>
-#else
 #include <net/netfilter/nf_conntrack.h>
 #include <net/netfilter/nf_conntrack_core.h>
 #include <net/netfilter/nf_conntrack_helper.h>
-#endif
 #include <linux/netfilter/x_tables.h>
 #include <linux/netfilter/xt_helper.h>
-#include <net/netfilter/nf_conntrack_compat.h>
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Martin Josefsson <gandalf@netfilter.org>");
@@ -38,55 +28,6 @@ MODULE_ALIAS("ip6t_helper");
 #define DEBUGP(format, args...)
 #endif
 
-#if defined(CONFIG_IP_NF_CONNTRACK) || defined(CONFIG_IP_NF_CONNTRACK_MODULE)
-static int
-match(const struct sk_buff *skb,
-      const struct net_device *in,
-      const struct net_device *out,
-      const struct xt_match *match,
-      const void *matchinfo,
-      int offset,
-      unsigned int protoff,
-      int *hotdrop)
-{
-       const struct xt_helper_info *info = matchinfo;
-       struct ip_conntrack *ct;
-       enum ip_conntrack_info ctinfo;
-       int ret = info->invert;
-
-       ct = ip_conntrack_get((struct sk_buff *)skb, &ctinfo);
-       if (!ct) {
-               DEBUGP("xt_helper: Eek! invalid conntrack?\n");
-               return ret;
-       }
-
-       if (!ct->master) {
-               DEBUGP("xt_helper: conntrack %p has no master\n", ct);
-               return ret;
-       }
-
-       read_lock_bh(&ip_conntrack_lock);
-       if (!ct->master->helper) {
-               DEBUGP("xt_helper: master ct %p has no helper\n",
-                       exp->expectant);
-               goto out_unlock;
-       }
-
-       DEBUGP("master's name = %s , info->name = %s\n",
-               ct->master->helper->name, info->name);
-
-       if (info->name[0] == '\0')
-               ret ^= 1;
-       else
-               ret ^= !strncmp(ct->master->helper->name, info->name,
-                               strlen(ct->master->helper->name));
-out_unlock:
-       read_unlock_bh(&ip_conntrack_lock);
-       return ret;
-}
-
-#else /* CONFIG_IP_NF_CONNTRACK */
-
 static int
 match(const struct sk_buff *skb,
       const struct net_device *in,
@@ -134,7 +75,6 @@ out_unlock:
        read_unlock_bh(&nf_conntrack_lock);
        return ret;
 }
-#endif
 
 static int check(const char *tablename,
                 const void *inf,
index 32fb998..77288c5 100644 (file)
@@ -31,7 +31,7 @@ match(const struct sk_buff *skb,
       int *hotdrop)
 {
        const struct xt_length_info *info = matchinfo;
-       u_int16_t pktlen = ntohs(skb->nh.iph->tot_len);
+       u_int16_t pktlen = ntohs(ip_hdr(skb)->tot_len);
 
        return (pktlen >= info->min && pktlen <= info->max) ^ info->invert;
 }
@@ -47,7 +47,8 @@ match6(const struct sk_buff *skb,
        int *hotdrop)
 {
        const struct xt_length_info *info = matchinfo;
-       u_int16_t pktlen = ntohs(skb->nh.ipv6h->payload_len) + sizeof(struct ipv6hdr);
+       const u_int16_t pktlen = (ntohs(ipv6_hdr(skb)->payload_len) +
+                                 sizeof(struct ipv6hdr));
 
        return (pktlen >= info->min && pktlen <= info->max) ^ info->invert;
 }
index 6fd8347..571a72a 100644 (file)
@@ -1,10 +1,3 @@
-/* Kernel module to control the rate
- *
- * 2 September 1999: Changed from the target RATE to the match
- *                   `limit', removed logging.  Did I mention that
- *                   Alexey is a fucking genius?
- *                   Rusty Russell (rusty@rustcorp.com.au).  */
-
 /* (C) 1999 Jérôme de Vivie <devivie@info.enserb.u-bordeaux.fr>
  * (C) 1999 Hervé Eychenne <eychenne@info.enserb.u-bordeaux.fr>
  *
index d430d90..1d3a1d9 100644 (file)
@@ -37,8 +37,8 @@ match(const struct sk_buff *skb,
     const struct xt_mac_info *info = matchinfo;
 
     /* Is mac pointer valid? */
-    return (skb->mac.raw >= skb->head
-           && (skb->mac.raw + ETH_HLEN) <= skb->data
+    return (skb_mac_header(skb) >= skb->head &&
+           (skb_mac_header(skb) + ETH_HLEN) <= skb->data
            /* If so, compare... */
            && ((!compare_ether_addr(eth_hdr(skb)->h_source, info->srcaddr))
                ^ info->invert));
index 16e7b08..e1409fc 100644 (file)
@@ -34,7 +34,7 @@ static int match(const struct sk_buff *skb,
        const struct xt_pkttype_info *info = matchinfo;
 
        if (skb->pkt_type == PACKET_LOOPBACK)
-               type = (MULTICAST(skb->nh.iph->daddr)
+               type = (MULTICAST(ip_hdr(skb)->daddr)
                        ? PACKET_MULTICAST
                        : PACKET_BROADCAST);
        else
index 97ffc2f..c2017f8 100644 (file)
@@ -1,6 +1,4 @@
 /* IP tables module for matching the routing realm
- *
- * $Id: ipt_realm.c,v 1.3 2004/03/05 13:25:40 laforge Exp $
  *
  * (C) 2003 by Sampsa Ranta <sampsa@netsonic.fi>
  *
index df37b91..149294f 100644 (file)
@@ -10,7 +10,7 @@
 
 #include <linux/module.h>
 #include <linux/skbuff.h>
-#include <net/netfilter/nf_conntrack_compat.h>
+#include <net/netfilter/nf_conntrack.h>
 #include <linux/netfilter/x_tables.h>
 #include <linux/netfilter/xt_state.h>
 
@@ -36,7 +36,7 @@ match(const struct sk_buff *skb,
 
        if (nf_ct_is_untracked(skb))
                statebit = XT_STATE_UNTRACKED;
-       else if (!nf_ct_get_ctinfo(skb, &ctinfo))
+       else if (!nf_ct_get(skb, &ctinfo))
                statebit = XT_STATE_INVALID;
        else
                statebit = XT_STATE_BIT(ctinfo);
index e73d8f5..42d2fb9 100644 (file)
@@ -56,6 +56,7 @@
 #include <linux/types.h>
 #include <linux/audit.h>
 #include <linux/selinux.h>
+#include <linux/mutex.h>
 
 #include <net/sock.h>
 #include <net/scm.h>
@@ -76,7 +77,8 @@ struct netlink_sock {
        unsigned long           state;
        wait_queue_head_t       wait;
        struct netlink_callback *cb;
-       spinlock_t              cb_lock;
+       struct mutex            *cb_mutex;
+       struct mutex            cb_def_mutex;
        void                    (*data_ready)(struct sock *sk, int bytes);
        struct module           *module;
 };
@@ -108,6 +110,7 @@ struct netlink_table {
        unsigned long *listeners;
        unsigned int nl_nonroot;
        unsigned int groups;
+       struct mutex *cb_mutex;
        struct module *module;
        int registered;
 };
@@ -118,6 +121,7 @@ static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
 
 static int netlink_dump(struct sock *sk);
 static void netlink_destroy_callback(struct netlink_callback *cb);
+static void netlink_queue_skip(struct nlmsghdr *nlh, struct sk_buff *skb);
 
 static DEFINE_RWLOCK(nl_table_lock);
 static atomic_t nl_table_users = ATOMIC_INIT(0);
@@ -370,7 +374,8 @@ static struct proto netlink_proto = {
        .obj_size = sizeof(struct netlink_sock),
 };
 
-static int __netlink_create(struct socket *sock, int protocol)
+static int __netlink_create(struct socket *sock, struct mutex *cb_mutex,
+                           int protocol)
 {
        struct sock *sk;
        struct netlink_sock *nlk;
@@ -384,7 +389,12 @@ static int __netlink_create(struct socket *sock, int protocol)
        sock_init_data(sock, sk);
 
        nlk = nlk_sk(sk);
-       spin_lock_init(&nlk->cb_lock);
+       if (cb_mutex)
+               nlk->cb_mutex = cb_mutex;
+       else {
+               nlk->cb_mutex = &nlk->cb_def_mutex;
+               mutex_init(nlk->cb_mutex);
+       }
        init_waitqueue_head(&nlk->wait);
 
        sk->sk_destruct = netlink_sock_destruct;
@@ -395,8 +405,8 @@ static int __netlink_create(struct socket *sock, int protocol)
 static int netlink_create(struct socket *sock, int protocol)
 {
        struct module *module = NULL;
+       struct mutex *cb_mutex;
        struct netlink_sock *nlk;
-       unsigned int groups;
        int err = 0;
 
        sock->state = SS_UNCONNECTED;
@@ -418,10 +428,10 @@ static int netlink_create(struct socket *sock, int protocol)
        if (nl_table[protocol].registered &&
            try_module_get(nl_table[protocol].module))
                module = nl_table[protocol].module;
-       groups = nl_table[protocol].groups;
+       cb_mutex = nl_table[protocol].cb_mutex;
        netlink_unlock_table();
 
-       if ((err = __netlink_create(sock, protocol)) < 0)
+       if ((err = __netlink_create(sock, cb_mutex, protocol)) < 0)
                goto out_module;
 
        nlk = nlk_sk(sock->sk);
@@ -443,21 +453,21 @@ static int netlink_release(struct socket *sock)
                return 0;
 
        netlink_remove(sk);
+       sock_orphan(sk);
        nlk = nlk_sk(sk);
 
-       spin_lock(&nlk->cb_lock);
+       mutex_lock(nlk->cb_mutex);
        if (nlk->cb) {
                if (nlk->cb->done)
                        nlk->cb->done(nlk->cb);
                netlink_destroy_callback(nlk->cb);
                nlk->cb = NULL;
        }
-       spin_unlock(&nlk->cb_lock);
+       mutex_unlock(nlk->cb_mutex);
 
        /* OK. Socket is unlinked, and, therefore,
           no new packets will arrive */
 
-       sock_orphan(sk);
        sock->sk = NULL;
        wake_up_interruptible_all(&nlk->wait);
 
@@ -1215,7 +1225,7 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
                copied = len;
        }
 
-       skb->h.raw = skb->data;
+       skb_reset_transport_header(skb);
        err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
 
        if (msg->msg_name) {
@@ -1242,6 +1252,9 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
 
        scm_recv(sock, msg, siocb->scm, flags);
 
+       if (flags & MSG_TRUNC)
+               copied = skb->len;
+
 out:
        netlink_rcv_wake(sk);
        return err ? : copied;
@@ -1265,7 +1278,7 @@ static void netlink_data_ready(struct sock *sk, int len)
 struct sock *
 netlink_kernel_create(int unit, unsigned int groups,
                      void (*input)(struct sock *sk, int len),
-                     struct module *module)
+                     struct mutex *cb_mutex, struct module *module)
 {
        struct socket *sock;
        struct sock *sk;
@@ -1280,7 +1293,7 @@ netlink_kernel_create(int unit, unsigned int groups,
        if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock))
                return NULL;
 
-       if (__netlink_create(sock, unit) < 0)
+       if (__netlink_create(sock, cb_mutex, unit) < 0)
                goto out_sock_release;
 
        if (groups < 32)
@@ -1304,6 +1317,7 @@ netlink_kernel_create(int unit, unsigned int groups,
        netlink_table_grab();
        nl_table[unit].groups = groups;
        nl_table[unit].listeners = listeners;
+       nl_table[unit].cb_mutex = cb_mutex;
        nl_table[unit].module = module;
        nl_table[unit].registered = 1;
        netlink_table_ungrab();
@@ -1346,7 +1360,7 @@ static int netlink_dump(struct sock *sk)
        if (!skb)
                goto errout;
 
-       spin_lock(&nlk->cb_lock);
+       mutex_lock(nlk->cb_mutex);
 
        cb = nlk->cb;
        if (cb == NULL) {
@@ -1357,7 +1371,7 @@ static int netlink_dump(struct sock *sk)
        len = cb->dump(skb, cb);
 
        if (len > 0) {
-               spin_unlock(&nlk->cb_lock);
+               mutex_unlock(nlk->cb_mutex);
                skb_queue_tail(&sk->sk_receive_queue, skb);
                sk->sk_data_ready(sk, len);
                return 0;
@@ -1375,13 +1389,13 @@ static int netlink_dump(struct sock *sk)
        if (cb->done)
                cb->done(cb);
        nlk->cb = NULL;
-       spin_unlock(&nlk->cb_lock);
+       mutex_unlock(nlk->cb_mutex);
 
        netlink_destroy_callback(cb);
        return 0;
 
 errout_skb:
-       spin_unlock(&nlk->cb_lock);
+       mutex_unlock(nlk->cb_mutex);
        kfree_skb(skb);
 errout:
        return err;
@@ -1412,20 +1426,25 @@ int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
                return -ECONNREFUSED;
        }
        nlk = nlk_sk(sk);
-       /* A dump is in progress... */
-       spin_lock(&nlk->cb_lock);
-       if (nlk->cb) {
-               spin_unlock(&nlk->cb_lock);
+       /* A dump or destruction is in progress... */
+       mutex_lock(nlk->cb_mutex);
+       if (nlk->cb || sock_flag(sk, SOCK_DEAD)) {
+               mutex_unlock(nlk->cb_mutex);
                netlink_destroy_callback(cb);
                sock_put(sk);
                return -EBUSY;
        }
        nlk->cb = cb;
-       spin_unlock(&nlk->cb_lock);
+       mutex_unlock(nlk->cb_mutex);
 
        netlink_dump(sk);
        sock_put(sk);
-       return 0;
+
+       /* We successfully started a dump, by returning -EINTR we
+        * signal the queue mangement to interrupt processing of
+        * any netlink messages so userspace gets a chance to read
+        * the results. */
+       return -EINTR;
 }
 
 void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
@@ -1462,27 +1481,35 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
 }
 
 static int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
-                                                    struct nlmsghdr *, int *))
+                                                    struct nlmsghdr *))
 {
        struct nlmsghdr *nlh;
        int err;
 
        while (skb->len >= nlmsg_total_size(0)) {
-               nlh = (struct nlmsghdr *) skb->data;
+               nlh = nlmsg_hdr(skb);
+               err = 0;
 
                if (nlh->nlmsg_len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len)
                        return 0;
 
-               if (cb(skb, nlh, &err) < 0) {
-                       /* Not an error, but we have to interrupt processing
-                        * here. Note: that in this case we do not pull
-                        * message from skb, it will be processed later.
-                        */
-                       if (err == 0)
-                               return -1;
+               /* Only requests are handled by the kernel */
+               if (!(nlh->nlmsg_flags & NLM_F_REQUEST))
+                       goto skip;
+
+               /* Skip control messages */
+               if (nlh->nlmsg_type < NLMSG_MIN_TYPE)
+                       goto skip;
+
+               err = cb(skb, nlh);
+               if (err == -EINTR) {
+                       /* Not an error, but we interrupt processing */
+                       netlink_queue_skip(nlh, skb);
+                       return err;
+               }
+skip:
+               if (nlh->nlmsg_flags & NLM_F_ACK || err)
                        netlink_ack(skb, nlh, err);
-               } else if (nlh->nlmsg_flags & NLM_F_ACK)
-                       netlink_ack(skb, nlh, 0);
 
                netlink_queue_skip(nlh, skb);
        }
@@ -1504,9 +1531,14 @@ static int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
  *
  * qlen must be initialized to 0 before the initial entry, afterwards
  * the function may be called repeatedly until qlen reaches 0.
+ *
+ * The callback function may return -EINTR to signal that processing
+ * of netlink messages shall be interrupted. In this case the message
+ * currently being processed will NOT be requeued onto the receive
+ * queue.
  */
 void netlink_run_queue(struct sock *sk, unsigned int *qlen,
-                      int (*cb)(struct sk_buff *, struct nlmsghdr *, int *))
+                      int (*cb)(struct sk_buff *, struct nlmsghdr *))
 {
        struct sk_buff *skb;
 
@@ -1537,7 +1569,7 @@ void netlink_run_queue(struct sock *sk, unsigned int *qlen,
  * Pulls the given netlink message off the socket buffer so the next
  * call to netlink_queue_run() will not reconsider the message.
  */
-void netlink_queue_skip(struct nlmsghdr *nlh, struct sk_buff *skb)
+static void netlink_queue_skip(struct nlmsghdr *nlh, struct sk_buff *skb)
 {
        int msglen = NLMSG_ALIGN(nlh->nlmsg_len);
 
@@ -1820,12 +1852,10 @@ core_initcall(netlink_proto_init);
 
 EXPORT_SYMBOL(netlink_ack);
 EXPORT_SYMBOL(netlink_run_queue);
-EXPORT_SYMBOL(netlink_queue_skip);
 EXPORT_SYMBOL(netlink_broadcast);
 EXPORT_SYMBOL(netlink_dump_start);
 EXPORT_SYMBOL(netlink_kernel_create);
 EXPORT_SYMBOL(netlink_register_notifier);
-EXPORT_SYMBOL(netlink_set_err);
 EXPORT_SYMBOL(netlink_set_nonroot);
 EXPORT_SYMBOL(netlink_unicast);
 EXPORT_SYMBOL(netlink_unregister_notifier);
index 0041395..df5f820 100644 (file)
@@ -67,6 +67,11 @@ static int validate_nla(struct nlattr *nla, int maxtype,
                }
                break;
 
+       case NLA_BINARY:
+               if (pt->len && attrlen > pt->len)
+                       return -ERANGE;
+               break;
+
        default:
                if (pt->len)
                        minlen = pt->len;
index c299679..6e31234 100644 (file)
@@ -295,66 +295,46 @@ int genl_unregister_family(struct genl_family *family)
        return -ENOENT;
 }
 
-static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
-                              int *errp)
+static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
 {
        struct genl_ops *ops;
        struct genl_family *family;
        struct genl_info info;
        struct genlmsghdr *hdr = nlmsg_data(nlh);
-       int hdrlen, err = -EINVAL;
-
-       if (!(nlh->nlmsg_flags & NLM_F_REQUEST))
-               goto ignore;
-
-       if (nlh->nlmsg_type < NLMSG_MIN_TYPE)
-               goto ignore;
+       int hdrlen, err;
 
        family = genl_family_find_byid(nlh->nlmsg_type);
-       if (family == NULL) {
-               err = -ENOENT;
-               goto errout;
-       }
+       if (family == NULL)
+               return -ENOENT;
 
        hdrlen = GENL_HDRLEN + family->hdrsize;
        if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen))
-               goto errout;
+               return -EINVAL;
 
        ops = genl_get_cmd(hdr->cmd, family);
-       if (ops == NULL) {
-               err = -EOPNOTSUPP;
-               goto errout;
-       }
+       if (ops == NULL)
+               return -EOPNOTSUPP;
 
-       if ((ops->flags & GENL_ADMIN_PERM) && security_netlink_recv(skb, CAP_NET_ADMIN)) {
-               err = -EPERM;
-               goto errout;
-       }
+       if ((ops->flags & GENL_ADMIN_PERM) &&
+           security_netlink_recv(skb, CAP_NET_ADMIN))
+               return -EPERM;
 
        if (nlh->nlmsg_flags & NLM_F_DUMP) {
-               if (ops->dumpit == NULL) {
-                       err = -EOPNOTSUPP;
-                       goto errout;
-               }
+               if (ops->dumpit == NULL)
+                       return -EOPNOTSUPP;
 
-               *errp = err = netlink_dump_start(genl_sock, skb, nlh,
-                                                ops->dumpit, ops->done);
-               if (err == 0)
-                       skb_pull(skb, min(NLMSG_ALIGN(nlh->nlmsg_len),
-                                         skb->len));
-               return -1;
+               return netlink_dump_start(genl_sock, skb, nlh,
+                                         ops->dumpit, ops->done);
        }
 
-       if (ops->doit == NULL) {
-               err = -EOPNOTSUPP;
-               goto errout;
-       }
+       if (ops->doit == NULL)
+               return -EOPNOTSUPP;
 
        if (family->attrbuf) {
                err = nlmsg_parse(nlh, hdrlen, family->attrbuf, family->maxattr,
                                  ops->policy);
                if (err < 0)
-                       goto errout;
+                       return err;
        }
 
        info.snd_seq = nlh->nlmsg_seq;
@@ -364,15 +344,7 @@ static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
        info.userhdr = nlmsg_data(nlh) + GENL_HDRLEN;
        info.attrs = family->attrbuf;
 
-       *errp = err = ops->doit(skb, &info);
-       return err;
-
-ignore:
-       return 0;
-
-errout:
-       *errp = err;
-       return -1;
+       return ops->doit(skb, &info);
 }
 
 static void genl_rcv(struct sock *sk, int len)
@@ -586,7 +558,7 @@ static int __init genl_init(void)
 
        netlink_set_nonroot(NETLINK_GENERIC, NL_NONROOT_RECV);
        genl_sock = netlink_kernel_create(NETLINK_GENERIC, GENL_MAX_ID,
-                                         genl_rcv, THIS_MODULE);
+                                         genl_rcv, NULL, THIS_MODULE);
        if (genl_sock == NULL)
                panic("GENL: Cannot initialize generic netlink\n");
 
index bf9837d..5d4a26c 100644 (file)
@@ -625,42 +625,42 @@ static int nr_connect(struct socket *sock, struct sockaddr *uaddr,
        ax25_address *source = NULL;
        ax25_uid_assoc *user;
        struct net_device *dev;
+       int err = 0;
 
        lock_sock(sk);
        if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) {
                sock->state = SS_CONNECTED;
-               release_sock(sk);
-               return 0;       /* Connect completed during a ERESTARTSYS event */
+               goto out_release;       /* Connect completed during a ERESTARTSYS event */
        }
 
        if (sk->sk_state == TCP_CLOSE && sock->state == SS_CONNECTING) {
                sock->state = SS_UNCONNECTED;
-               release_sock(sk);
-               return -ECONNREFUSED;
+               err = -ECONNREFUSED;
+               goto out_release;
        }
 
        if (sk->sk_state == TCP_ESTABLISHED) {
-               release_sock(sk);
-               return -EISCONN;        /* No reconnect on a seqpacket socket */
+               err = -EISCONN; /* No reconnect on a seqpacket socket */
+               goto out_release;
        }
 
        sk->sk_state   = TCP_CLOSE;
        sock->state = SS_UNCONNECTED;
 
        if (addr_len != sizeof(struct sockaddr_ax25) && addr_len != sizeof(struct full_sockaddr_ax25)) {
-               release_sock(sk);
-               return -EINVAL;
+               err = -EINVAL;
+               goto out_release;
        }
        if (addr->sax25_family != AF_NETROM) {
-               release_sock(sk);
-               return -EINVAL;
+               err = -EINVAL;
+               goto out_release;
        }
        if (sock_flag(sk, SOCK_ZAPPED)) {       /* Must bind first - autobinding in this may or may not work */
                sock_reset_flag(sk, SOCK_ZAPPED);
 
                if ((dev = nr_dev_first()) == NULL) {
-                       release_sock(sk);
-                       return -ENETUNREACH;
+                       err = -ENETUNREACH;
+                       goto out_release;
                }
                source = (ax25_address *)dev->dev_addr;
 
@@ -671,8 +671,8 @@ static int nr_connect(struct socket *sock, struct sockaddr *uaddr,
                } else {
                        if (ax25_uid_policy && !capable(CAP_NET_ADMIN)) {
                                dev_put(dev);
-                               release_sock(sk);
-                               return -EPERM;
+                               err = -EPERM;
+                               goto out_release;
                        }
                        nr->user_addr   = *source;
                }
@@ -707,8 +707,8 @@ static int nr_connect(struct socket *sock, struct sockaddr *uaddr,
 
        /* Now the loop */
        if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK)) {
-               release_sock(sk);
-               return -EINPROGRESS;
+               err = -EINPROGRESS;
+               goto out_release;
        }
 
        /*
@@ -716,46 +716,46 @@ static int nr_connect(struct socket *sock, struct sockaddr *uaddr,
         * closed.
         */
        if (sk->sk_state == TCP_SYN_SENT) {
-               struct task_struct *tsk = current;
-               DECLARE_WAITQUEUE(wait, tsk);
+               DEFINE_WAIT(wait);
 
-               add_wait_queue(sk->sk_sleep, &wait);
                for (;;) {
-                       set_current_state(TASK_INTERRUPTIBLE);
+                       prepare_to_wait(sk->sk_sleep, &wait,
+                                       TASK_INTERRUPTIBLE);
                        if (sk->sk_state != TCP_SYN_SENT)
                                break;
-                       release_sock(sk);
-                       if (!signal_pending(tsk)) {
+                       if (!signal_pending(current)) {
+                               release_sock(sk);
                                schedule();
                                lock_sock(sk);
                                continue;
                        }
-                       current->state = TASK_RUNNING;
-                       remove_wait_queue(sk->sk_sleep, &wait);
-                       return -ERESTARTSYS;
+                       err = -ERESTARTSYS;
+                       break;
                }
-               current->state = TASK_RUNNING;
-               remove_wait_queue(sk->sk_sleep, &wait);
+               finish_wait(sk->sk_sleep, &wait);
+               if (err)
+                       goto out_release;
        }
 
        if (sk->sk_state != TCP_ESTABLISHED) {
                sock->state = SS_UNCONNECTED;
-               release_sock(sk);
-               return sock_error(sk);  /* Always set at this point */
+               err = sock_error(sk);   /* Always set at this point */
+               goto out_release;
        }
 
        sock->state = SS_CONNECTED;
+
+out_release:
        release_sock(sk);
 
-       return 0;
+       return err;
 }
 
 static int nr_accept(struct socket *sock, struct socket *newsock, int flags)
 {
-       struct task_struct *tsk = current;
-       DECLARE_WAITQUEUE(wait, tsk);
        struct sk_buff *skb;
        struct sock *newsk;
+       DEFINE_WAIT(wait);
        struct sock *sk;
        int err = 0;
 
@@ -765,42 +765,40 @@ static int nr_accept(struct socket *sock, struct socket *newsock, int flags)
        lock_sock(sk);
        if (sk->sk_type != SOCK_SEQPACKET) {
                err = -EOPNOTSUPP;
-               goto out;
+               goto out_release;
        }
 
        if (sk->sk_state != TCP_LISTEN) {
                err = -EINVAL;
-               goto out;
+               goto out_release;
        }
 
        /*
         *      The write queue this time is holding sockets ready to use
         *      hooked into the SABM we saved
         */
-       add_wait_queue(sk->sk_sleep, &wait);
        for (;;) {
+               prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
                skb = skb_dequeue(&sk->sk_receive_queue);
                if (skb)
                        break;
 
-               current->state = TASK_INTERRUPTIBLE;
-               release_sock(sk);
                if (flags & O_NONBLOCK) {
-                       current->state = TASK_RUNNING;
-                       remove_wait_queue(sk->sk_sleep, &wait);
-                       return -EWOULDBLOCK;
+                       err = -EWOULDBLOCK;
+                       break;
                }
-               if (!signal_pending(tsk)) {
+               if (!signal_pending(current)) {
+                       release_sock(sk);
                        schedule();
                        lock_sock(sk);
                        continue;
                }
-               current->state = TASK_RUNNING;
-               remove_wait_queue(sk->sk_sleep, &wait);
-               return -ERESTARTSYS;
+               err = -ERESTARTSYS;
+               break;
        }
-       current->state = TASK_RUNNING;
-       remove_wait_queue(sk->sk_sleep, &wait);
+       finish_wait(sk->sk_sleep, &wait);
+       if (err)
+               goto out_release;
 
        newsk = skb->sk;
        newsk->sk_socket = newsock;
@@ -811,8 +809,9 @@ static int nr_accept(struct socket *sock, struct socket *newsock, int flags)
        sk_acceptq_removed(sk);
        newsock->sk = newsk;
 
-out:
+out_release:
        release_sock(sk);
+
        return err;
 }
 
@@ -878,7 +877,7 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device *dev)
        if (frametype == NR_PROTOEXT &&
            circuit_index == NR_PROTO_IP && circuit_id == NR_PROTO_IP) {
                skb_pull(skb, NR_NETWORK_LEN + NR_TRANSPORT_LEN);
-               skb->h.raw = skb->data;
+               skb_reset_transport_header(skb);
 
                return nr_rx_ip(skb, dev);
        }
@@ -904,7 +903,7 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device *dev)
        }
 
        if (sk != NULL) {
-               skb->h.raw = skb->data;
+               skb_reset_transport_header(skb);
 
                if (frametype == NR_CONNACK && skb->len == 22)
                        nr_sk(sk)->bpqext = 1;
@@ -1074,6 +1073,7 @@ static int nr_sendmsg(struct kiocb *iocb, struct socket *sock,
                goto out;
 
        skb_reserve(skb, size - len);
+       skb_reset_transport_header(skb);
 
        /*
         *      Push down the NET/ROM header
@@ -1094,14 +1094,12 @@ static int nr_sendmsg(struct kiocb *iocb, struct socket *sock,
        /*
         *      Put the data on the end
         */
+       skb_put(skb, len);
 
-       skb->h.raw = skb_put(skb, len);
-
-       asmptr = skb->h.raw;
        SOCK_DEBUG(sk, "NET/ROM: Appending user data\n");
 
        /* User data follows immediately after the NET/ROM transport header */
-       if (memcpy_fromiovec(asmptr, msg->msg_iov, len)) {
+       if (memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len)) {
                kfree_skb(skb);
                err = -EFAULT;
                goto out;
@@ -1149,7 +1147,7 @@ static int nr_recvmsg(struct kiocb *iocb, struct socket *sock,
                return er;
        }
 
-       skb->h.raw = skb->data;
+       skb_reset_transport_header(skb);
        copied     = skb->len;
 
        if (copied > size) {
@@ -1161,7 +1159,8 @@ static int nr_recvmsg(struct kiocb *iocb, struct socket *sock,
 
        if (sax != NULL) {
                sax->sax25_family = AF_NETROM;
-               memcpy(sax->sax25_call.ax25_call, skb->data + 7, AX25_ADDR_LEN);
+               skb_copy_from_linear_data_offset(skb, 7, sax->sax25_call.ax25_call,
+                             AX25_ADDR_LEN);
        }
 
        msg->msg_namelen = sizeof(*sax);
@@ -1209,6 +1208,12 @@ static int nr_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
                release_sock(sk);
                return ret;
 
+       case SIOCGSTAMPNS:
+               lock_sock(sk);
+               ret = sock_get_timestampns(sk, argp);
+               release_sock(sk);
+               return ret;
+
        case SIOCGIFADDR:
        case SIOCSIFADDR:
        case SIOCGIFDSTADDR:
index 9a97ed6..c7b5d93 100644 (file)
@@ -56,8 +56,8 @@ int nr_rx_ip(struct sk_buff *skb, struct net_device *dev)
 
        /* Spoof incoming device */
        skb->dev      = dev;
-       skb->mac.raw  = skb->nh.raw;
-       skb->nh.raw   = skb->data;
+       skb_reset_mac_header(skb);
+       skb_reset_network_header(skb);
        skb->pkt_type = PACKET_HOST;
 
        netif_rx(skb);
index 5560acb..6817648 100644 (file)
@@ -51,10 +51,12 @@ static int nr_queue_rx_frame(struct sock *sk, struct sk_buff *skb, int more)
                if ((skbn = alloc_skb(nr->fraglen, GFP_ATOMIC)) == NULL)
                        return 1;
 
-               skbn->h.raw = skbn->data;
+               skb_reset_transport_header(skbn);
 
                while ((skbo = skb_dequeue(&nr->frag_queue)) != NULL) {
-                       memcpy(skb_put(skbn, skbo->len), skbo->data, skbo->len);
+                       skb_copy_from_linear_data(skbo,
+                                                 skb_put(skbn, skbo->len),
+                                                 skbo->len);
                        kfree_skb(skbo);
                }
 
index e856ae1..f324d5d 100644 (file)
@@ -34,8 +34,8 @@ int nr_loopback_queue(struct sk_buff *skb)
        struct sk_buff *skbn;
 
        if ((skbn = alloc_skb(skb->len, GFP_ATOMIC)) != NULL) {
-               memcpy(skb_put(skbn, skb->len), skb->data, skb->len);
-               skbn->h.raw = skbn->data;
+               skb_copy_from_linear_data(skb, skb_put(skbn, skb->len), skb->len);
+               skb_reset_transport_header(skbn);
 
                skb_queue_tail(&loopback_queue, skbn);
 
index 0cbfb61..e3e6c44 100644 (file)
@@ -40,7 +40,7 @@ void nr_output(struct sock *sk, struct sk_buff *skb)
 
        if (skb->len - NR_TRANSPORT_LEN > NR_MAX_PACKET_SIZE) {
                /* Save a copy of the Transport Header */
-               memcpy(transport, skb->data, NR_TRANSPORT_LEN);
+               skb_copy_from_linear_data(skb, transport, NR_TRANSPORT_LEN);
                skb_pull(skb, NR_TRANSPORT_LEN);
 
                frontlen = skb_headroom(skb);
@@ -54,13 +54,13 @@ void nr_output(struct sock *sk, struct sk_buff *skb)
                        len = (NR_MAX_PACKET_SIZE > skb->len) ? skb->len : NR_MAX_PACKET_SIZE;
 
                        /* Copy the user data */
-                       memcpy(skb_put(skbn, len), skb->data, len);
+                       skb_copy_from_linear_data(skb, skb_put(skbn, len), len);
                        skb_pull(skb, len);
 
                        /* Duplicate the Transport Header */
                        skb_push(skbn, NR_TRANSPORT_LEN);
-                       memcpy(skbn->data, transport, NR_TRANSPORT_LEN);
-
+                       skb_copy_to_linear_data(skbn, transport,
+                                               NR_TRANSPORT_LEN);
                        if (skb->len > 0)
                                skbn->data[4] |= NR_MORE_FLAG;
 
index 07b694d..04e7d0d 100644 (file)
@@ -226,13 +226,13 @@ void __nr_transmit_reply(struct sk_buff *skb, int mine, unsigned char cmdflags)
 
        dptr = skb_put(skbn, NR_NETWORK_LEN + NR_TRANSPORT_LEN);
 
-       memcpy(dptr, skb->data + 7, AX25_ADDR_LEN);
+       skb_copy_from_linear_data_offset(skb, 7, dptr, AX25_ADDR_LEN);
        dptr[6] &= ~AX25_CBIT;
        dptr[6] &= ~AX25_EBIT;
        dptr[6] |= AX25_SSSID_SPARE;
        dptr += AX25_ADDR_LEN;
 
-       memcpy(dptr, skb->data + 0, AX25_ADDR_LEN);
+       skb_copy_from_linear_data(skb, dptr, AX25_ADDR_LEN);
        dptr[6] &= ~AX25_CBIT;
        dptr[6] |= AX25_EBIT;
        dptr[6] |= AX25_SSSID_SPARE;
index 28d47e8..02e401c 100644 (file)
@@ -114,22 +114,22 @@ On receive:
 -----------
 
 Incoming, dev->hard_header!=NULL
-   mac.raw -> ll header
-   data    -> data
+   mac_header -> ll header
+   data       -> data
 
 Outgoing, dev->hard_header!=NULL
-   mac.raw -> ll header
-   data    -> ll header
+   mac_header -> ll header
+   data       -> ll header
 
 Incoming, dev->hard_header==NULL
-   mac.raw -> UNKNOWN position. It is very likely, that it points to ll header.
-             PPP makes it, that is wrong, because introduce assymetry
-             between rx and tx paths.
-   data    -> data
+   mac_header -> UNKNOWN position. It is very likely, that it points to ll
+                header.  PPP makes it, that is wrong, because introduce
+                 assymetry between rx and tx paths.
+   data       -> data
 
 Outgoing, dev->hard_header==NULL
-   mac.raw -> data. ll header is still not built!
-   data    -> data
+   mac_header -> data. ll header is still not built!
+   data       -> data
 
 Resume
   If dev->hard_header==NULL we are unlikely to restore sensible ll header.
@@ -139,12 +139,12 @@ On transmit:
 ------------
 
 dev->hard_header != NULL
-   mac.raw -> ll header
-   data    -> ll header
+   mac_header -> ll header
+   data       -> ll header
 
 dev->hard_header == NULL (ll header is added by device, we cannot control it)
-   mac.raw -> data
-   data -> data
+   mac_header -> data
+   data       -> data
 
    We should set nh.raw on output to correct posistion,
    packet classifier depends on it.
@@ -201,7 +201,8 @@ struct packet_sock {
        struct packet_type      prot_hook;
        spinlock_t              bind_lock;
        unsigned int            running:1,      /* prot_hook is attached*/
-                               auxdata:1;
+                               auxdata:1,
+                               origdev:1;
        int                     ifindex;        /* bound device         */
        __be16                  num;
 #ifdef CONFIG_PACKET_MULTICAST
@@ -284,7 +285,7 @@ static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,  struct
         *      Incoming packets have ll header pulled,
         *      push it back.
         *
-        *      For outgoing ones skb->data == skb->mac.raw
+        *      For outgoing ones skb->data == skb_mac_header(skb)
         *      so that this procedure is noop.
         */
 
@@ -303,7 +304,7 @@ static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,  struct
 
        spkt = &PACKET_SKB_CB(skb)->sa.pkt;
 
-       skb_push(skb, skb->data-skb->mac.raw);
+       skb_push(skb, skb->data - skb_mac_header(skb));
 
        /*
         *      The SOCK_PACKET socket receives _all_ frames.
@@ -401,14 +402,14 @@ static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
         * notable one here. This should really be fixed at the driver level.
         */
        skb_reserve(skb, LL_RESERVED_SPACE(dev));
-       skb->nh.raw = skb->data;
+       skb_reset_network_header(skb);
 
        /* Try to align data part correctly */
        if (dev->hard_header) {
                skb->data -= dev->hard_header_len;
                skb->tail -= dev->hard_header_len;
                if (len < dev->hard_header_len)
-                       skb->nh.raw = skb->data;
+                       skb_reset_network_header(skb);
        }
 
        /* Returns -EFAULT on error */
@@ -488,10 +489,10 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev, struct packet
                   never delivered to user.
                 */
                if (sk->sk_type != SOCK_DGRAM)
-                       skb_push(skb, skb->data - skb->mac.raw);
+                       skb_push(skb, skb->data - skb_mac_header(skb));
                else if (skb->pkt_type == PACKET_OUTGOING) {
                        /* Special case: outgoing packets have ll header at head */
-                       skb_pull(skb, skb->nh.raw - skb->data);
+                       skb_pull(skb, skb_network_offset(skb));
                }
        }
 
@@ -528,7 +529,10 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev, struct packet
        sll->sll_hatype = dev->type;
        sll->sll_protocol = skb->protocol;
        sll->sll_pkttype = skb->pkt_type;
-       sll->sll_ifindex = dev->ifindex;
+       if (unlikely(po->origdev) && skb->pkt_type == PACKET_HOST)
+               sll->sll_ifindex = orig_dev->ifindex;
+       else
+               sll->sll_ifindex = dev->ifindex;
        sll->sll_halen = 0;
 
        if (dev->hard_header_parse)
@@ -582,6 +586,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct packe
        unsigned long status = TP_STATUS_LOSING|TP_STATUS_USER;
        unsigned short macoff, netoff;
        struct sk_buff *copy_skb = NULL;
+       struct timeval tv;
 
        if (skb->pkt_type == PACKET_LOOPBACK)
                goto drop;
@@ -591,10 +596,10 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct packe
 
        if (dev->hard_header) {
                if (sk->sk_type != SOCK_DGRAM)
-                       skb_push(skb, skb->data - skb->mac.raw);
+                       skb_push(skb, skb->data - skb_mac_header(skb));
                else if (skb->pkt_type == PACKET_OUTGOING) {
                        /* Special case: outgoing packets have ll header at head */
-                       skb_pull(skb, skb->nh.raw - skb->data);
+                       skb_pull(skb, skb_network_offset(skb));
                }
        }
 
@@ -612,7 +617,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct packe
        if (sk->sk_type == SOCK_DGRAM) {
                macoff = netoff = TPACKET_ALIGN(TPACKET_HDRLEN) + 16;
        } else {
-               unsigned maclen = skb->nh.raw - skb->data;
+               unsigned maclen = skb_network_offset(skb);
                netoff = TPACKET_ALIGN(TPACKET_HDRLEN + (maclen < 16 ? 16 : maclen));
                macoff = netoff - maclen;
        }
@@ -656,12 +661,13 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct packe
        h->tp_snaplen = snaplen;
        h->tp_mac = macoff;
        h->tp_net = netoff;
-       if (skb->tstamp.off_sec == 0) {
+       if (skb->tstamp.tv64 == 0) {
                __net_timestamp(skb);
                sock_enable_timestamp(sk);
        }
-       h->tp_sec = skb->tstamp.off_sec;
-       h->tp_usec = skb->tstamp.off_usec;
+       tv = ktime_to_timeval(skb->tstamp);
+       h->tp_sec = tv.tv_sec;
+       h->tp_usec = tv.tv_usec;
 
        sll = (struct sockaddr_ll*)((u8*)h + TPACKET_ALIGN(sizeof(*h)));
        sll->sll_halen = 0;
@@ -671,7 +677,10 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct packe
        sll->sll_hatype = dev->type;
        sll->sll_protocol = skb->protocol;
        sll->sll_pkttype = skb->pkt_type;
-       sll->sll_ifindex = dev->ifindex;
+       if (unlikely(po->origdev) && skb->pkt_type == PACKET_HOST)
+               sll->sll_ifindex = orig_dev->ifindex;
+       else
+               sll->sll_ifindex = dev->ifindex;
 
        h->tp_status = status;
        smp_mb();
@@ -766,14 +775,14 @@ static int packet_sendmsg(struct kiocb *iocb, struct socket *sock,
                goto out_unlock;
 
        skb_reserve(skb, LL_RESERVED_SPACE(dev));
-       skb->nh.raw = skb->data;
+       skb_reset_network_header(skb);
 
        if (dev->hard_header) {
                int res;
                err = -EINVAL;
                res = dev->hard_header(skb, dev, ntohs(proto), addr, NULL, len);
                if (sock->type != SOCK_DGRAM) {
-                       skb->tail = skb->data;
+                       skb_reset_tail_pointer(skb);
                        skb->len = 0;
                } else if (res < 0)
                        goto out_free;
@@ -1143,7 +1152,7 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
                aux.tp_len = PACKET_SKB_CB(skb)->origlen;
                aux.tp_snaplen = skb->len;
                aux.tp_mac = 0;
-               aux.tp_net = skb->nh.raw - skb->data;
+               aux.tp_net = skb_network_offset(skb);
 
                put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
        }
@@ -1411,6 +1420,18 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
                po->auxdata = !!val;
                return 0;
        }
+       case PACKET_ORIGDEV:
+       {
+               int val;
+
+               if (optlen < sizeof(val))
+                       return -EINVAL;
+               if (copy_from_user(&val, optval, sizeof(val)))
+                       return -EFAULT;
+
+               po->origdev = !!val;
+               return 0;
+       }
        default:
                return -ENOPROTOOPT;
        }
@@ -1452,6 +1473,13 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
                        len = sizeof(int);
                val = po->auxdata;
 
+               data = &val;
+               break;
+       case PACKET_ORIGDEV:
+               if (len > sizeof(int))
+                       len = sizeof(int);
+               val = po->origdev;
+
                data = &val;
                break;
        default:
@@ -1543,6 +1571,8 @@ static int packet_ioctl(struct socket *sock, unsigned int cmd,
                }
                case SIOCGSTAMP:
                        return sock_get_timestamp(sk, (struct timeval __user *)arg);
+               case SIOCGSTAMPNS:
+                       return sock_get_timestampns(sk, (struct timespec __user *)arg);
 
 #ifdef CONFIG_INET
                case SIOCADDRT:
index f92d531..d476c43 100644 (file)
@@ -812,26 +812,26 @@ rose_try_next_neigh:
         * closed.
         */
        if (sk->sk_state == TCP_SYN_SENT) {
-               struct task_struct *tsk = current;
-               DECLARE_WAITQUEUE(wait, tsk);
+               DEFINE_WAIT(wait);
 
-               add_wait_queue(sk->sk_sleep, &wait);
                for (;;) {
-                       set_current_state(TASK_INTERRUPTIBLE);
+                       prepare_to_wait(sk->sk_sleep, &wait,
+                                       TASK_INTERRUPTIBLE);
                        if (sk->sk_state != TCP_SYN_SENT)
                                break;
-                       release_sock(sk);
-                       if (!signal_pending(tsk)) {
+                       if (!signal_pending(current)) {
+                               release_sock(sk);
                                schedule();
                                lock_sock(sk);
                                continue;
                        }
-                       current->state = TASK_RUNNING;
-                       remove_wait_queue(sk->sk_sleep, &wait);
-                       return -ERESTARTSYS;
+                       err = -ERESTARTSYS;
+                       break;
                }
-               current->state = TASK_RUNNING;
-               remove_wait_queue(sk->sk_sleep, &wait);
+               finish_wait(sk->sk_sleep, &wait);
+
+               if (err)
+                       goto out_release;
        }
 
        if (sk->sk_state != TCP_ESTABLISHED) {
@@ -856,10 +856,9 @@ out_release:
 
 static int rose_accept(struct socket *sock, struct socket *newsock, int flags)
 {
-       struct task_struct *tsk = current;
-       DECLARE_WAITQUEUE(wait, tsk);
        struct sk_buff *skb;
        struct sock *newsk;
+       DEFINE_WAIT(wait);
        struct sock *sk;
        int err = 0;
 
@@ -869,42 +868,41 @@ static int rose_accept(struct socket *sock, struct socket *newsock, int flags)
        lock_sock(sk);
        if (sk->sk_type != SOCK_SEQPACKET) {
                err = -EOPNOTSUPP;
-               goto out;
+               goto out_release;
        }
 
        if (sk->sk_state != TCP_LISTEN) {
                err = -EINVAL;
-               goto out;
+               goto out_release;
        }
 
        /*
         *      The write queue this time is holding sockets ready to use
         *      hooked into the SABM we saved
         */
-       add_wait_queue(sk->sk_sleep, &wait);
        for (;;) {
+               prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+
                skb = skb_dequeue(&sk->sk_receive_queue);
                if (skb)
                        break;
 
-               current->state = TASK_INTERRUPTIBLE;
-               release_sock(sk);
                if (flags & O_NONBLOCK) {
-                       current->state = TASK_RUNNING;
-                       remove_wait_queue(sk->sk_sleep, &wait);
-                       return -EWOULDBLOCK;
+                       err = -EWOULDBLOCK;
+                       break;
                }
-               if (!signal_pending(tsk)) {
+               if (!signal_pending(current)) {
+                       release_sock(sk);
                        schedule();
                        lock_sock(sk);
                        continue;
                }
-               current->state = TASK_RUNNING;
-               remove_wait_queue(sk->sk_sleep, &wait);
-               return -ERESTARTSYS;
+               err = -ERESTARTSYS;
+               break;
        }
-       current->state = TASK_RUNNING;
-       remove_wait_queue(sk->sk_sleep, &wait);
+       finish_wait(sk->sk_sleep, &wait);
+       if (err)
+               goto out_release;
 
        newsk = skb->sk;
        newsk->sk_socket = newsock;
@@ -916,7 +914,7 @@ static int rose_accept(struct socket *sock, struct socket *newsock, int flags)
        sk->sk_ack_backlog--;
        newsock->sk = newsk;
 
-out:
+out_release:
        release_sock(sk);
 
        return err;
@@ -1105,9 +1103,10 @@ static int rose_sendmsg(struct kiocb *iocb, struct socket *sock,
         */
        SOCK_DEBUG(sk, "ROSE: Appending user data\n");
 
-       asmptr = skb->h.raw = skb_put(skb, len);
+       skb_reset_transport_header(skb);
+       skb_put(skb, len);
 
-       err = memcpy_fromiovec(asmptr, msg->msg_iov, len);
+       err = memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len);
        if (err) {
                kfree_skb(skb);
                return err;
@@ -1155,7 +1154,7 @@ static int rose_sendmsg(struct kiocb *iocb, struct socket *sock,
                int lg;
 
                /* Save a copy of the Header */
-               memcpy(header, skb->data, ROSE_MIN_LEN);
+               skb_copy_from_linear_data(skb, header, ROSE_MIN_LEN);
                skb_pull(skb, ROSE_MIN_LEN);
 
                frontlen = skb_headroom(skb);
@@ -1175,12 +1174,12 @@ static int rose_sendmsg(struct kiocb *iocb, struct socket *sock,
                        lg = (ROSE_PACLEN > skb->len) ? skb->len : ROSE_PACLEN;
 
                        /* Copy the user data */
-                       memcpy(skb_put(skbn, lg), skb->data, lg);
+                       skb_copy_from_linear_data(skb, skb_put(skbn, lg), lg);
                        skb_pull(skb, lg);
 
                        /* Duplicate the Header */
                        skb_push(skbn, ROSE_MIN_LEN);
-                       memcpy(skbn->data, header, ROSE_MIN_LEN);
+                       skb_copy_to_linear_data(skbn, header, ROSE_MIN_LEN);
 
                        if (skb->len > 0)
                                skbn->data[2] |= M_BIT;
@@ -1234,7 +1233,7 @@ static int rose_recvmsg(struct kiocb *iocb, struct socket *sock,
                *asmptr = qbit;
        }
 
-       skb->h.raw = skb->data;
+       skb_reset_transport_header(skb);
        copied     = skb->len;
 
        if (copied > size) {
@@ -1296,6 +1295,9 @@ static int rose_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
        case SIOCGSTAMP:
                return sock_get_timestamp(sk, (struct timeval __user *) argp);
 
+       case SIOCGSTAMPNS:
+               return sock_get_timestampns(sk, (struct timespec __user *) argp);
+
        case SIOCGIFADDR:
        case SIOCSIFADDR:
        case SIOCGIFDSTADDR:
index 3e41bd9..cd01642 100644 (file)
@@ -77,7 +77,7 @@ static void rose_loopback_timer(unsigned long param)
                dest      = (rose_address *)(skb->data + 4);
                lci_o     = 0xFFF - lci_i;
 
-               skb->h.raw = skb->data;
+               skb_reset_transport_header(skb);
 
                sk = rose_find_socket(lci_o, &rose_loopback_neigh);
                if (sk) {
index a1233e1..1f9aefd 100644 (file)
@@ -906,7 +906,7 @@ int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25)
                        }
                }
                else {
-                       skb->h.raw = skb->data;
+                       skb_reset_transport_header(skb);
                        res = rose_process_rx_frame(sk, skb);
                        goto out;
                }
diff --git a/net/rxrpc/Kconfig b/net/rxrpc/Kconfig
new file mode 100644 (file)
index 0000000..d72380e
--- /dev/null
@@ -0,0 +1,37 @@
+#
+# RxRPC session sockets
+#
+
+config AF_RXRPC
+       tristate "RxRPC session sockets"
+       depends on EXPERIMENTAL
+       help
+         Say Y or M here to include support for RxRPC session sockets (just
+         the transport part, not the presentation part: (un)marshalling is
+         left to the application).
+
+         These are used for AFS kernel filesystem and userspace utilities.
+
+         This module at the moment only supports client operations and is
+         currently incomplete.
+
+         See Documentation/networking/rxrpc.txt.
+
+
+config AF_RXRPC_DEBUG
+       bool "RxRPC dynamic debugging"
+       depends on AF_RXRPC
+       help
+         Say Y here to make runtime controllable debugging messages appear.
+
+         See Documentation/networking/rxrpc.txt.
+
+
+config RXKAD
+       tristate "RxRPC Kerberos security"
+       depends on AF_RXRPC && KEYS
+       help
+         Provide kerberos 4 and AFS kaserver security handling for AF_RXRPC
+         through the use of the key retention service.
+
+         See Documentation/networking/rxrpc.txt.
index 6efcb6f..c46867c 100644 (file)
@@ -1,25 +1,29 @@
 #
-# Makefile for Linux kernel Rx RPC
+# Makefile for Linux kernel RxRPC
 #
 
-#CFLAGS += -finstrument-functions
-
-rxrpc-objs := \
-       call.o \
-       connection.o \
-       krxiod.o \
-       krxsecd.o \
-       krxtimod.o \
-       main.o \
-       peer.o \
-       rxrpc_syms.o \
-       transport.o
+af-rxrpc-objs := \
+       af_rxrpc.o \
+       ar-accept.o \
+       ar-ack.o \
+       ar-call.o \
+       ar-connection.o \
+       ar-connevent.o \
+       ar-error.o \
+       ar-input.o \
+       ar-key.o \
+       ar-local.o \
+       ar-output.o \
+       ar-peer.o \
+       ar-recvmsg.o \
+       ar-security.o \
+       ar-skbuff.o \
+       ar-transport.o
 
 ifeq ($(CONFIG_PROC_FS),y)
-rxrpc-objs += proc.o
-endif
-ifeq ($(CONFIG_SYSCTL),y)
-rxrpc-objs += sysctl.o
+af-rxrpc-objs += ar-proc.o
 endif
 
-obj-$(CONFIG_RXRPC) := rxrpc.o
+obj-$(CONFIG_AF_RXRPC) += af-rxrpc.o
+
+obj-$(CONFIG_RXKAD) += rxkad.o
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
new file mode 100644 (file)
index 0000000..2c57df9
--- /dev/null
@@ -0,0 +1,879 @@
+/* AF_RXRPC implementation
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/net.h>
+#include <linux/skbuff.h>
+#include <linux/poll.h>
+#include <linux/proc_fs.h>
+#include <net/sock.h>
+#include <net/af_rxrpc.h>
+#include "ar-internal.h"
+
+MODULE_DESCRIPTION("RxRPC network protocol");
+MODULE_AUTHOR("Red Hat, Inc.");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_NETPROTO(PF_RXRPC);
+
+unsigned rxrpc_debug; // = RXRPC_DEBUG_KPROTO;
+module_param_named(debug, rxrpc_debug, uint, S_IWUSR | S_IRUGO);
+MODULE_PARM_DESC(rxrpc_debug, "RxRPC debugging mask");
+
+static int sysctl_rxrpc_max_qlen __read_mostly = 10;
+
+static struct proto rxrpc_proto;
+static const struct proto_ops rxrpc_rpc_ops;
+
+/* local epoch for detecting local-end reset */
+__be32 rxrpc_epoch;
+
+/* current debugging ID */
+atomic_t rxrpc_debug_id;
+
+/* count of skbs currently in use */
+atomic_t rxrpc_n_skbs;
+
+struct workqueue_struct *rxrpc_workqueue;
+
+static void rxrpc_sock_destructor(struct sock *);
+
+/*
+ * see if an RxRPC socket is currently writable
+ */
+static inline int rxrpc_writable(struct sock *sk)
+{
+       return atomic_read(&sk->sk_wmem_alloc) < (size_t) sk->sk_sndbuf;
+}
+
+/*
+ * wait for write bufferage to become available
+ */
+static void rxrpc_write_space(struct sock *sk)
+{
+       _enter("%p", sk);
+       read_lock(&sk->sk_callback_lock);
+       if (rxrpc_writable(sk)) {
+               if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
+                       wake_up_interruptible(sk->sk_sleep);
+               sk_wake_async(sk, 2, POLL_OUT);
+       }
+       read_unlock(&sk->sk_callback_lock);
+}
+
+/*
+ * validate an RxRPC address
+ */
+static int rxrpc_validate_address(struct rxrpc_sock *rx,
+                                 struct sockaddr_rxrpc *srx,
+                                 int len)
+{
+       if (len < sizeof(struct sockaddr_rxrpc))
+               return -EINVAL;
+
+       if (srx->srx_family != AF_RXRPC)
+               return -EAFNOSUPPORT;
+
+       if (srx->transport_type != SOCK_DGRAM)
+               return -ESOCKTNOSUPPORT;
+
+       len -= offsetof(struct sockaddr_rxrpc, transport);
+       if (srx->transport_len < sizeof(sa_family_t) ||
+           srx->transport_len > len)
+               return -EINVAL;
+
+       if (srx->transport.family != rx->proto)
+               return -EAFNOSUPPORT;
+
+       switch (srx->transport.family) {
+       case AF_INET:
+               _debug("INET: %x @ %u.%u.%u.%u",
+                      ntohs(srx->transport.sin.sin_port),
+                      NIPQUAD(srx->transport.sin.sin_addr));
+               if (srx->transport_len > 8)
+                       memset((void *)&srx->transport + 8, 0,
+                              srx->transport_len - 8);
+               break;
+
+       case AF_INET6:
+       default:
+               return -EAFNOSUPPORT;
+       }
+
+       return 0;
+}
+
+/*
+ * bind a local address to an RxRPC socket
+ */
+static int rxrpc_bind(struct socket *sock, struct sockaddr *saddr, int len)
+{
+       struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *) saddr;
+       struct sock *sk = sock->sk;
+       struct rxrpc_local *local;
+       struct rxrpc_sock *rx = rxrpc_sk(sk), *prx;
+       __be16 service_id;
+       int ret;
+
+       _enter("%p,%p,%d", rx, saddr, len);
+
+       ret = rxrpc_validate_address(rx, srx, len);
+       if (ret < 0)
+               goto error;
+
+       lock_sock(&rx->sk);
+
+       if (rx->sk.sk_state != RXRPC_UNCONNECTED) {
+               ret = -EINVAL;
+               goto error_unlock;
+       }
+
+       memcpy(&rx->srx, srx, sizeof(rx->srx));
+
+       /* find a local transport endpoint if we don't have one already */
+       local = rxrpc_lookup_local(&rx->srx);
+       if (IS_ERR(local)) {
+               ret = PTR_ERR(local);
+               goto error_unlock;
+       }
+
+       rx->local = local;
+       if (srx->srx_service) {
+               service_id = htons(srx->srx_service);
+               write_lock_bh(&local->services_lock);
+               list_for_each_entry(prx, &local->services, listen_link) {
+                       if (prx->service_id == service_id)
+                               goto service_in_use;
+               }
+
+               rx->service_id = service_id;
+               list_add_tail(&rx->listen_link, &local->services);
+               write_unlock_bh(&local->services_lock);
+
+               rx->sk.sk_state = RXRPC_SERVER_BOUND;
+       } else {
+               rx->sk.sk_state = RXRPC_CLIENT_BOUND;
+       }
+
+       release_sock(&rx->sk);
+       _leave(" = 0");
+       return 0;
+
+service_in_use:
+       ret = -EADDRINUSE;
+       write_unlock_bh(&local->services_lock);
+error_unlock:
+       release_sock(&rx->sk);
+error:
+       _leave(" = %d", ret);
+       return ret;
+}
+
+/*
+ * set the number of pending calls permitted on a listening socket
+ */
+static int rxrpc_listen(struct socket *sock, int backlog)
+{
+       struct sock *sk = sock->sk;
+       struct rxrpc_sock *rx = rxrpc_sk(sk);
+       int ret;
+
+       _enter("%p,%d", rx, backlog);
+
+       lock_sock(&rx->sk);
+
+       switch (rx->sk.sk_state) {
+       case RXRPC_UNCONNECTED:
+               ret = -EADDRNOTAVAIL;
+               break;
+       case RXRPC_CLIENT_BOUND:
+       case RXRPC_CLIENT_CONNECTED:
+       default:
+               ret = -EBUSY;
+               break;
+       case RXRPC_SERVER_BOUND:
+               ASSERT(rx->local != NULL);
+               sk->sk_max_ack_backlog = backlog;
+               rx->sk.sk_state = RXRPC_SERVER_LISTENING;
+               ret = 0;
+               break;
+       }
+
+       release_sock(&rx->sk);
+       _leave(" = %d", ret);
+       return ret;
+}
+
+/*
+ * find a transport by address
+ */
+static struct rxrpc_transport *rxrpc_name_to_transport(struct socket *sock,
+                                                      struct sockaddr *addr,
+                                                      int addr_len, int flags,
+                                                      gfp_t gfp)
+{
+       struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *) addr;
+       struct rxrpc_transport *trans;
+       struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
+       struct rxrpc_peer *peer;
+
+       _enter("%p,%p,%d,%d", rx, addr, addr_len, flags);
+
+       ASSERT(rx->local != NULL);
+       ASSERT(rx->sk.sk_state > RXRPC_UNCONNECTED);
+
+       if (rx->srx.transport_type != srx->transport_type)
+               return ERR_PTR(-ESOCKTNOSUPPORT);
+       if (rx->srx.transport.family != srx->transport.family)
+               return ERR_PTR(-EAFNOSUPPORT);
+
+       /* find a remote transport endpoint from the local one */
+       peer = rxrpc_get_peer(srx, gfp);
+       if (IS_ERR(peer))
+               return ERR_PTR(PTR_ERR(peer));
+
+       /* find a transport */
+       trans = rxrpc_get_transport(rx->local, peer, gfp);
+       rxrpc_put_peer(peer);
+       _leave(" = %p", trans);
+       return trans;
+}
+
+/**
+ * rxrpc_kernel_begin_call - Allow a kernel service to begin a call
+ * @sock: The socket on which to make the call
+ * @srx: The address of the peer to contact (defaults to socket setting)
+ * @key: The security context to use (defaults to socket setting)
+ * @user_call_ID: The ID to use
+ *
+ * Allow a kernel service to begin a call on the nominated socket.  This just
+ * sets up all the internal tracking structures and allocates connection and
+ * call IDs as appropriate.  The call to be used is returned.
+ *
+ * The default socket destination address and security may be overridden by
+ * supplying @srx and @key.
+ */
+struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
+                                          struct sockaddr_rxrpc *srx,
+                                          struct key *key,
+                                          unsigned long user_call_ID,
+                                          gfp_t gfp)
+{
+       struct rxrpc_conn_bundle *bundle;
+       struct rxrpc_transport *trans;
+       struct rxrpc_call *call;
+       struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
+       __be16 service_id;
+
+       _enter(",,%x,%lx", key_serial(key), user_call_ID);
+
+       lock_sock(&rx->sk);
+
+       if (srx) {
+               trans = rxrpc_name_to_transport(sock, (struct sockaddr *) srx,
+                                               sizeof(*srx), 0, gfp);
+               if (IS_ERR(trans)) {
+                       call = ERR_PTR(PTR_ERR(trans));
+                       trans = NULL;
+                       goto out;
+               }
+       } else {
+               trans = rx->trans;
+               if (!trans) {
+                       call = ERR_PTR(-ENOTCONN);
+                       goto out;
+               }
+               atomic_inc(&trans->usage);
+       }
+
+       service_id = rx->service_id;
+       if (srx)
+               service_id = htons(srx->srx_service);
+
+       if (!key)
+               key = rx->key;
+       if (key && !key->payload.data)
+               key = NULL; /* a no-security key */
+
+       bundle = rxrpc_get_bundle(rx, trans, key, service_id, gfp);
+       if (IS_ERR(bundle)) {
+               call = ERR_PTR(PTR_ERR(bundle));
+               goto out;
+       }
+
+       call = rxrpc_get_client_call(rx, trans, bundle, user_call_ID, true,
+                                    gfp);
+       rxrpc_put_bundle(trans, bundle);
+out:
+       rxrpc_put_transport(trans);
+       release_sock(&rx->sk);
+       _leave(" = %p", call);
+       return call;
+}
+
+EXPORT_SYMBOL(rxrpc_kernel_begin_call);
+
+/**
+ * rxrpc_kernel_end_call - Allow a kernel service to end a call it was using
+ * @call: The call to end
+ *
+ * Allow a kernel service to end a call it was using.  The call must be
+ * complete before this is called (the call should be aborted if necessary).
+ */
+void rxrpc_kernel_end_call(struct rxrpc_call *call)
+{
+       _enter("%d{%d}", call->debug_id, atomic_read(&call->usage));
+       rxrpc_remove_user_ID(call->socket, call);
+       rxrpc_put_call(call);
+}
+
+EXPORT_SYMBOL(rxrpc_kernel_end_call);
+
+/**
+ * rxrpc_kernel_intercept_rx_messages - Intercept received RxRPC messages
+ * @sock: The socket to intercept received messages on
+ * @interceptor: The function to pass the messages to
+ *
+ * Allow a kernel service to intercept messages heading for the Rx queue on an
+ * RxRPC socket.  They get passed to the specified function instead.
+ * @interceptor should free the socket buffers it is given.  @interceptor is
+ * called with the socket receive queue spinlock held and softirqs disabled -
+ * this ensures that the messages will be delivered in the right order.
+ */
+void rxrpc_kernel_intercept_rx_messages(struct socket *sock,
+                                       rxrpc_interceptor_t interceptor)
+{
+       struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
+
+       _enter("");
+       rx->interceptor = interceptor;
+}
+
+EXPORT_SYMBOL(rxrpc_kernel_intercept_rx_messages);
+
+/*
+ * connect an RxRPC socket
+ * - this just targets it at a specific destination; no actual connection
+ *   negotiation takes place
+ */
+static int rxrpc_connect(struct socket *sock, struct sockaddr *addr,
+                        int addr_len, int flags)
+{
+       struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *) addr;
+       struct sock *sk = sock->sk;
+       struct rxrpc_transport *trans;
+       struct rxrpc_local *local;
+       struct rxrpc_sock *rx = rxrpc_sk(sk);
+       int ret;
+
+       _enter("%p,%p,%d,%d", rx, addr, addr_len, flags);
+
+       ret = rxrpc_validate_address(rx, srx, addr_len);
+       if (ret < 0) {
+               _leave(" = %d [bad addr]", ret);
+               return ret;
+       }
+
+       lock_sock(&rx->sk);
+
+       switch (rx->sk.sk_state) {
+       case RXRPC_UNCONNECTED:
+               /* find a local transport endpoint if we don't have one already */
+               ASSERTCMP(rx->local, ==, NULL);
+               rx->srx.srx_family = AF_RXRPC;
+               rx->srx.srx_service = 0;
+               rx->srx.transport_type = srx->transport_type;
+               rx->srx.transport_len = sizeof(sa_family_t);
+               rx->srx.transport.family = srx->transport.family;
+               local = rxrpc_lookup_local(&rx->srx);
+               if (IS_ERR(local)) {
+                       release_sock(&rx->sk);
+                       return PTR_ERR(local);
+               }
+               rx->local = local;
+               rx->sk.sk_state = RXRPC_CLIENT_BOUND;
+       case RXRPC_CLIENT_BOUND:
+               break;
+       case RXRPC_CLIENT_CONNECTED:
+               release_sock(&rx->sk);
+               return -EISCONN;
+       default:
+               release_sock(&rx->sk);
+               return -EBUSY; /* server sockets can't connect as well */
+       }
+
+       trans = rxrpc_name_to_transport(sock, addr, addr_len, flags,
+                                       GFP_KERNEL);
+       if (IS_ERR(trans)) {
+               release_sock(&rx->sk);
+               _leave(" = %ld", PTR_ERR(trans));
+               return PTR_ERR(trans);
+       }
+
+       rx->trans = trans;
+       rx->service_id = htons(srx->srx_service);
+       rx->sk.sk_state = RXRPC_CLIENT_CONNECTED;
+
+       release_sock(&rx->sk);
+       return 0;
+}
+
+/*
+ * send a message through an RxRPC socket
+ * - in a client this does a number of things:
+ *   - finds/sets up a connection for the security specified (if any)
+ *   - initiates a call (ID in control data)
+ *   - ends the request phase of a call (if MSG_MORE is not set)
+ *   - sends a call data packet
+ *   - may send an abort (abort code in control data)
+ */
+static int rxrpc_sendmsg(struct kiocb *iocb, struct socket *sock,
+                        struct msghdr *m, size_t len)
+{
+       struct rxrpc_transport *trans;
+       struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
+       int ret;
+
+       _enter(",{%d},,%zu", rx->sk.sk_state, len);
+
+       if (m->msg_flags & MSG_OOB)
+               return -EOPNOTSUPP;
+
+       if (m->msg_name) {
+               ret = rxrpc_validate_address(rx, m->msg_name, m->msg_namelen);
+               if (ret < 0) {
+                       _leave(" = %d [bad addr]", ret);
+                       return ret;
+               }
+       }
+
+       trans = NULL;
+       lock_sock(&rx->sk);
+
+       if (m->msg_name) {
+               ret = -EISCONN;
+               trans = rxrpc_name_to_transport(sock, m->msg_name,
+                                               m->msg_namelen, 0, GFP_KERNEL);
+               if (IS_ERR(trans)) {
+                       ret = PTR_ERR(trans);
+                       trans = NULL;
+                       goto out;
+               }
+       } else {
+               trans = rx->trans;
+               if (trans)
+                       atomic_inc(&trans->usage);
+       }
+
+       switch (rx->sk.sk_state) {
+       case RXRPC_SERVER_LISTENING:
+               if (!m->msg_name) {
+                       ret = rxrpc_server_sendmsg(iocb, rx, m, len);
+                       break;
+               }
+       case RXRPC_SERVER_BOUND:
+       case RXRPC_CLIENT_BOUND:
+               if (!m->msg_name) {
+                       ret = -ENOTCONN;
+                       break;
+               }
+       case RXRPC_CLIENT_CONNECTED:
+               ret = rxrpc_client_sendmsg(iocb, rx, trans, m, len);
+               break;
+       default:
+               ret = -ENOTCONN;
+               break;
+       }
+
+out:
+       release_sock(&rx->sk);
+       if (trans)
+               rxrpc_put_transport(trans);
+       _leave(" = %d", ret);
+       return ret;
+}
+
+/*
+ * set RxRPC socket options
+ */
+static int rxrpc_setsockopt(struct socket *sock, int level, int optname,
+                           char __user *optval, int optlen)
+{
+       struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
+       unsigned min_sec_level;
+       int ret;
+
+       _enter(",%d,%d,,%d", level, optname, optlen);
+
+       lock_sock(&rx->sk);
+       ret = -EOPNOTSUPP;
+
+       if (level == SOL_RXRPC) {
+               switch (optname) {
+               case RXRPC_EXCLUSIVE_CONNECTION:
+                       ret = -EINVAL;
+                       if (optlen != 0)
+                               goto error;
+                       ret = -EISCONN;
+                       if (rx->sk.sk_state != RXRPC_UNCONNECTED)
+                               goto error;
+                       set_bit(RXRPC_SOCK_EXCLUSIVE_CONN, &rx->flags);
+                       goto success;
+
+               case RXRPC_SECURITY_KEY:
+                       ret = -EINVAL;
+                       if (rx->key)
+                               goto error;
+                       ret = -EISCONN;
+                       if (rx->sk.sk_state != RXRPC_UNCONNECTED)
+                               goto error;
+                       ret = rxrpc_request_key(rx, optval, optlen);
+                       goto error;
+
+               case RXRPC_SECURITY_KEYRING:
+                       ret = -EINVAL;
+                       if (rx->key)
+                               goto error;
+                       ret = -EISCONN;
+                       if (rx->sk.sk_state != RXRPC_UNCONNECTED)
+                               goto error;
+                       ret = rxrpc_server_keyring(rx, optval, optlen);
+                       goto error;
+
+               case RXRPC_MIN_SECURITY_LEVEL:
+                       ret = -EINVAL;
+                       if (optlen != sizeof(unsigned))
+                               goto error;
+                       ret = -EISCONN;
+                       if (rx->sk.sk_state != RXRPC_UNCONNECTED)
+                               goto error;
+                       ret = get_user(min_sec_level,
+                                      (unsigned __user *) optval);
+                       if (ret < 0)
+                               goto error;
+                       ret = -EINVAL;
+                       if (min_sec_level > RXRPC_SECURITY_MAX)
+                               goto error;
+                       rx->min_sec_level = min_sec_level;
+                       goto success;
+
+               default:
+                       break;
+               }
+       }
+
+success:
+       ret = 0;
+error:
+       release_sock(&rx->sk);
+       return ret;
+}
+
+/*
+ * permit an RxRPC socket to be polled
+ */
+static unsigned int rxrpc_poll(struct file *file, struct socket *sock,
+                              poll_table *wait)
+{
+       unsigned int mask;
+       struct sock *sk = sock->sk;
+
+       poll_wait(file, sk->sk_sleep, wait);
+       mask = 0;
+
+       /* the socket is readable if there are any messages waiting on the Rx
+        * queue */
+       if (!skb_queue_empty(&sk->sk_receive_queue))
+               mask |= POLLIN | POLLRDNORM;
+
+       /* the socket is writable if there is space to add new data to the
+        * socket; there is no guarantee that any particular call in progress
+        * on the socket may have space in the Tx ACK window */
+       if (rxrpc_writable(sk))
+               mask |= POLLOUT | POLLWRNORM;
+
+       return mask;
+}
+
+/*
+ * create an RxRPC socket
+ */
+static int rxrpc_create(struct socket *sock, int protocol)
+{
+       struct rxrpc_sock *rx;
+       struct sock *sk;
+
+       _enter("%p,%d", sock, protocol);
+
+       /* we support transport protocol UDP only */
+       if (protocol != PF_INET)
+               return -EPROTONOSUPPORT;
+
+       if (sock->type != SOCK_DGRAM)
+               return -ESOCKTNOSUPPORT;
+
+       sock->ops = &rxrpc_rpc_ops;
+       sock->state = SS_UNCONNECTED;
+
+       sk = sk_alloc(PF_RXRPC, GFP_KERNEL, &rxrpc_proto, 1);
+       if (!sk)
+               return -ENOMEM;
+
+       sock_init_data(sock, sk);
+       sk->sk_state            = RXRPC_UNCONNECTED;
+       sk->sk_write_space      = rxrpc_write_space;
+       sk->sk_max_ack_backlog  = sysctl_rxrpc_max_qlen;
+       sk->sk_destruct         = rxrpc_sock_destructor;
+
+       rx = rxrpc_sk(sk);
+       rx->proto = protocol;
+       rx->calls = RB_ROOT;
+
+       INIT_LIST_HEAD(&rx->listen_link);
+       INIT_LIST_HEAD(&rx->secureq);
+       INIT_LIST_HEAD(&rx->acceptq);
+       rwlock_init(&rx->call_lock);
+       memset(&rx->srx, 0, sizeof(rx->srx));
+
+       _leave(" = 0 [%p]", rx);
+       return 0;
+}
+
+/*
+ * RxRPC socket destructor
+ */
+static void rxrpc_sock_destructor(struct sock *sk)
+{
+       _enter("%p", sk);
+
+       rxrpc_purge_queue(&sk->sk_receive_queue);
+
+       BUG_TRAP(!atomic_read(&sk->sk_wmem_alloc));
+       BUG_TRAP(sk_unhashed(sk));
+       BUG_TRAP(!sk->sk_socket);
+
+       if (!sock_flag(sk, SOCK_DEAD)) {
+               printk("Attempt to release alive rxrpc socket: %p\n", sk);
+               return;
+       }
+}
+
+/*
+ * release an RxRPC socket
+ */
+static int rxrpc_release_sock(struct sock *sk)
+{
+       struct rxrpc_sock *rx = rxrpc_sk(sk);
+
+       _enter("%p{%d,%d}", sk, sk->sk_state, atomic_read(&sk->sk_refcnt));
+
+       /* declare the socket closed for business */
+       sock_orphan(sk);
+       sk->sk_shutdown = SHUTDOWN_MASK;
+
+       spin_lock_bh(&sk->sk_receive_queue.lock);
+       sk->sk_state = RXRPC_CLOSE;
+       spin_unlock_bh(&sk->sk_receive_queue.lock);
+
+       ASSERTCMP(rx->listen_link.next, !=, LIST_POISON1);
+
+       if (!list_empty(&rx->listen_link)) {
+               write_lock_bh(&rx->local->services_lock);
+               list_del(&rx->listen_link);
+               write_unlock_bh(&rx->local->services_lock);
+       }
+
+       /* try to flush out this socket */
+       rxrpc_release_calls_on_socket(rx);
+       flush_workqueue(rxrpc_workqueue);
+       rxrpc_purge_queue(&sk->sk_receive_queue);
+
+       if (rx->conn) {
+               rxrpc_put_connection(rx->conn);
+               rx->conn = NULL;
+       }
+
+       if (rx->bundle) {
+               rxrpc_put_bundle(rx->trans, rx->bundle);
+               rx->bundle = NULL;
+       }
+       if (rx->trans) {
+               rxrpc_put_transport(rx->trans);
+               rx->trans = NULL;
+       }
+       if (rx->local) {
+               rxrpc_put_local(rx->local);
+               rx->local = NULL;
+       }
+
+       key_put(rx->key);
+       rx->key = NULL;
+       key_put(rx->securities);
+       rx->securities = NULL;
+       sock_put(sk);
+
+       _leave(" = 0");
+       return 0;
+}
+
+/*
+ * release an RxRPC BSD socket on close() or equivalent
+ */
+static int rxrpc_release(struct socket *sock)
+{
+       struct sock *sk = sock->sk;
+
+       _enter("%p{%p}", sock, sk);
+
+       if (!sk)
+               return 0;
+
+       sock->sk = NULL;
+
+       return rxrpc_release_sock(sk);
+}
+
+/*
+ * RxRPC network protocol
+ */
+static const struct proto_ops rxrpc_rpc_ops = {
+       .family         = PF_UNIX,
+       .owner          = THIS_MODULE,
+       .release        = rxrpc_release,
+       .bind           = rxrpc_bind,
+       .connect        = rxrpc_connect,
+       .socketpair     = sock_no_socketpair,
+       .accept         = sock_no_accept,
+       .getname        = sock_no_getname,
+       .poll           = rxrpc_poll,
+       .ioctl          = sock_no_ioctl,
+       .listen         = rxrpc_listen,
+       .shutdown       = sock_no_shutdown,
+       .setsockopt     = rxrpc_setsockopt,
+       .getsockopt     = sock_no_getsockopt,
+       .sendmsg        = rxrpc_sendmsg,
+       .recvmsg        = rxrpc_recvmsg,
+       .mmap           = sock_no_mmap,
+       .sendpage       = sock_no_sendpage,
+};
+
+static struct proto rxrpc_proto = {
+       .name           = "RXRPC",
+       .owner          = THIS_MODULE,
+       .obj_size       = sizeof(struct rxrpc_sock),
+       .max_header     = sizeof(struct rxrpc_header),
+};
+
+static struct net_proto_family rxrpc_family_ops = {
+       .family = PF_RXRPC,
+       .create = rxrpc_create,
+       .owner  = THIS_MODULE,
+};
+
+/*
+ * initialise and register the RxRPC protocol
+ */
+static int __init af_rxrpc_init(void)
+{
+       struct sk_buff *dummy_skb;
+       int ret = -1;
+
+       BUILD_BUG_ON(sizeof(struct rxrpc_skb_priv) > sizeof(dummy_skb->cb));
+
+       rxrpc_epoch = htonl(xtime.tv_sec);
+
+       ret = -ENOMEM;
+       rxrpc_call_jar = kmem_cache_create(
+               "rxrpc_call_jar", sizeof(struct rxrpc_call), 0,
+               SLAB_HWCACHE_ALIGN, NULL, NULL);
+       if (!rxrpc_call_jar) {
+               printk(KERN_NOTICE "RxRPC: Failed to allocate call jar\n");
+               goto error_call_jar;
+       }
+
+       rxrpc_workqueue = create_workqueue("krxrpcd");
+       if (!rxrpc_workqueue) {
+               printk(KERN_NOTICE "RxRPC: Failed to allocate work queue\n");
+               goto error_work_queue;
+       }
+
+       ret = proto_register(&rxrpc_proto, 1);
+        if (ret < 0) {
+                printk(KERN_CRIT "RxRPC: Cannot register protocol\n");
+               goto error_proto;
+       }
+
+       ret = sock_register(&rxrpc_family_ops);
+       if (ret < 0) {
+                printk(KERN_CRIT "RxRPC: Cannot register socket family\n");
+               goto error_sock;
+       }
+
+       ret = register_key_type(&key_type_rxrpc);
+       if (ret < 0) {
+                printk(KERN_CRIT "RxRPC: Cannot register client key type\n");
+               goto error_key_type;
+       }
+
+       ret = register_key_type(&key_type_rxrpc_s);
+       if (ret < 0) {
+                printk(KERN_CRIT "RxRPC: Cannot register server key type\n");
+               goto error_key_type_s;
+       }
+
+#ifdef CONFIG_PROC_FS
+       proc_net_fops_create("rxrpc_calls", 0, &rxrpc_call_seq_fops);
+       proc_net_fops_create("rxrpc_conns", 0, &rxrpc_connection_seq_fops);
+#endif
+       return 0;
+
+error_key_type_s:
+       unregister_key_type(&key_type_rxrpc);
+error_key_type:
+       sock_unregister(PF_RXRPC);
+error_sock:
+       proto_unregister(&rxrpc_proto);
+error_proto:
+       destroy_workqueue(rxrpc_workqueue);
+error_work_queue:
+       kmem_cache_destroy(rxrpc_call_jar);
+error_call_jar:
+       return ret;
+}
+
+/*
+ * unregister the RxRPC protocol
+ */
+static void __exit af_rxrpc_exit(void)
+{
+       _enter("");
+       unregister_key_type(&key_type_rxrpc_s);
+       unregister_key_type(&key_type_rxrpc);
+       sock_unregister(PF_RXRPC);
+       proto_unregister(&rxrpc_proto);
+       rxrpc_destroy_all_calls();
+       rxrpc_destroy_all_connections();
+       rxrpc_destroy_all_transports();
+       rxrpc_destroy_all_peers();
+       rxrpc_destroy_all_locals();
+
+       ASSERTCMP(atomic_read(&rxrpc_n_skbs), ==, 0);
+
+       _debug("flush scheduled work");
+       flush_workqueue(rxrpc_workqueue);
+       proc_net_remove("rxrpc_conns");
+       proc_net_remove("rxrpc_calls");
+       destroy_workqueue(rxrpc_workqueue);
+       kmem_cache_destroy(rxrpc_call_jar);
+       _leave("");
+}
+
+module_init(af_rxrpc_init);
+module_exit(af_rxrpc_exit);
diff --git a/net/rxrpc/ar-accept.c b/net/rxrpc/ar-accept.c
new file mode 100644 (file)
index 0000000..92a87fd
--- /dev/null
@@ -0,0 +1,504 @@
+/* incoming call handling
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/net.h>
+#include <linux/skbuff.h>
+#include <linux/errqueue.h>
+#include <linux/udp.h>
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <linux/icmp.h>
+#include <net/sock.h>
+#include <net/af_rxrpc.h>
+#include <net/ip.h>
+#include "ar-internal.h"
+
+/*
+ * generate a connection-level abort
+ */
+static int rxrpc_busy(struct rxrpc_local *local, struct sockaddr_rxrpc *srx,
+                     struct rxrpc_header *hdr)
+{
+       struct msghdr msg;
+       struct kvec iov[1];
+       size_t len;
+       int ret;
+
+       _enter("%d,,", local->debug_id);
+
+       msg.msg_name    = &srx->transport.sin;
+       msg.msg_namelen = sizeof(srx->transport.sin);
+       msg.msg_control = NULL;
+       msg.msg_controllen = 0;
+       msg.msg_flags   = 0;
+
+       hdr->seq        = 0;
+       hdr->type       = RXRPC_PACKET_TYPE_BUSY;
+       hdr->flags      = 0;
+       hdr->userStatus = 0;
+       hdr->_rsvd      = 0;
+
+       iov[0].iov_base = hdr;
+       iov[0].iov_len  = sizeof(*hdr);
+
+       len = iov[0].iov_len;
+
+       hdr->serial = htonl(1);
+       _proto("Tx BUSY %%%u", ntohl(hdr->serial));
+
+       ret = kernel_sendmsg(local->socket, &msg, iov, 1, len);
+       if (ret < 0) {
+               _leave(" = -EAGAIN [sendmsg failed: %d]", ret);
+               return -EAGAIN;
+       }
+
+       _leave(" = 0");
+       return 0;
+}
+
+/*
+ * accept an incoming call that needs peer, transport and/or connection setting
+ * up
+ */
+static int rxrpc_accept_incoming_call(struct rxrpc_local *local,
+                                     struct rxrpc_sock *rx,
+                                     struct sk_buff *skb,
+                                     struct sockaddr_rxrpc *srx)
+{
+       struct rxrpc_connection *conn;
+       struct rxrpc_transport *trans;
+       struct rxrpc_skb_priv *sp, *nsp;
+       struct rxrpc_peer *peer;
+       struct rxrpc_call *call;
+       struct sk_buff *notification;
+       int ret;
+
+       _enter("");
+
+       sp = rxrpc_skb(skb);
+
+       /* get a notification message to send to the server app */
+       notification = alloc_skb(0, GFP_NOFS);
+       rxrpc_new_skb(notification);
+       notification->mark = RXRPC_SKB_MARK_NEW_CALL;
+
+       peer = rxrpc_get_peer(srx, GFP_NOIO);
+       if (IS_ERR(peer)) {
+               _debug("no peer");
+               ret = -EBUSY;
+               goto error;
+       }
+
+       trans = rxrpc_get_transport(local, peer, GFP_NOIO);
+       rxrpc_put_peer(peer);
+       if (!trans) {
+               _debug("no trans");
+               ret = -EBUSY;
+               goto error;
+       }
+
+       conn = rxrpc_incoming_connection(trans, &sp->hdr, GFP_NOIO);
+       rxrpc_put_transport(trans);
+       if (IS_ERR(conn)) {
+               _debug("no conn");
+               ret = PTR_ERR(conn);
+               goto error;
+       }
+
+       call = rxrpc_incoming_call(rx, conn, &sp->hdr, GFP_NOIO);
+       rxrpc_put_connection(conn);
+       if (IS_ERR(call)) {
+               _debug("no call");
+               ret = PTR_ERR(call);
+               goto error;
+       }
+
+       /* attach the call to the socket */
+       read_lock_bh(&local->services_lock);
+       if (rx->sk.sk_state == RXRPC_CLOSE)
+               goto invalid_service;
+
+       write_lock(&rx->call_lock);
+       if (!test_and_set_bit(RXRPC_CALL_INIT_ACCEPT, &call->flags)) {
+               rxrpc_get_call(call);
+
+               spin_lock(&call->conn->state_lock);
+               if (sp->hdr.securityIndex > 0 &&
+                   call->conn->state == RXRPC_CONN_SERVER_UNSECURED) {
+                       _debug("await conn sec");
+                       list_add_tail(&call->accept_link, &rx->secureq);
+                       call->conn->state = RXRPC_CONN_SERVER_CHALLENGING;
+                       atomic_inc(&call->conn->usage);
+                       set_bit(RXRPC_CONN_CHALLENGE, &call->conn->events);
+                       rxrpc_queue_conn(call->conn);
+               } else {
+                       _debug("conn ready");
+                       call->state = RXRPC_CALL_SERVER_ACCEPTING;
+                       list_add_tail(&call->accept_link, &rx->acceptq);
+                       rxrpc_get_call(call);
+                       nsp = rxrpc_skb(notification);
+                       nsp->call = call;
+
+                       ASSERTCMP(atomic_read(&call->usage), >=, 3);
+
+                       _debug("notify");
+                       spin_lock(&call->lock);
+                       ret = rxrpc_queue_rcv_skb(call, notification, true,
+                                                 false);
+                       spin_unlock(&call->lock);
+                       notification = NULL;
+                       if (ret < 0)
+                               BUG();
+               }
+               spin_unlock(&call->conn->state_lock);
+
+               _debug("queued");
+       }
+       write_unlock(&rx->call_lock);
+
+       _debug("process");
+       rxrpc_fast_process_packet(call, skb);
+
+       _debug("done");
+       read_unlock_bh(&local->services_lock);
+       rxrpc_free_skb(notification);
+       rxrpc_put_call(call);
+       _leave(" = 0");
+       return 0;
+
+invalid_service:
+       _debug("invalid");
+       read_unlock_bh(&local->services_lock);
+
+       read_lock_bh(&call->state_lock);
+       if (!test_bit(RXRPC_CALL_RELEASE, &call->flags) &&
+           !test_and_set_bit(RXRPC_CALL_RELEASE, &call->events)) {
+               rxrpc_get_call(call);
+               rxrpc_queue_call(call);
+       }
+       read_unlock_bh(&call->state_lock);
+       rxrpc_put_call(call);
+       ret = -ECONNREFUSED;
+error:
+       rxrpc_free_skb(notification);
+       _leave(" = %d", ret);
+       return ret;
+}
+
+/*
+ * accept incoming calls that need peer, transport and/or connection setting up
+ * - the packets we get are all incoming client DATA packets that have seq == 1
+ */
+void rxrpc_accept_incoming_calls(struct work_struct *work)
+{
+       struct rxrpc_local *local =
+               container_of(work, struct rxrpc_local, acceptor);
+       struct rxrpc_skb_priv *sp;
+       struct sockaddr_rxrpc srx;
+       struct rxrpc_sock *rx;
+       struct sk_buff *skb;
+       __be16 service_id;
+       int ret;
+
+       _enter("%d", local->debug_id);
+
+       read_lock_bh(&rxrpc_local_lock);
+       if (atomic_read(&local->usage) > 0)
+               rxrpc_get_local(local);
+       else
+               local = NULL;
+       read_unlock_bh(&rxrpc_local_lock);
+       if (!local) {
+               _leave(" [local dead]");
+               return;
+       }
+
+process_next_packet:
+       skb = skb_dequeue(&local->accept_queue);
+       if (!skb) {
+               rxrpc_put_local(local);
+               _leave("\n");
+               return;
+       }
+
+       _net("incoming call skb %p", skb);
+
+       sp = rxrpc_skb(skb);
+
+       /* determine the remote address */
+       memset(&srx, 0, sizeof(srx));
+       srx.srx_family = AF_RXRPC;
+       srx.transport.family = local->srx.transport.family;
+       srx.transport_type = local->srx.transport_type;
+       switch (srx.transport.family) {
+       case AF_INET:
+               srx.transport_len = sizeof(struct sockaddr_in);
+               srx.transport.sin.sin_port = udp_hdr(skb)->source;
+               srx.transport.sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
+               break;
+       default:
+               goto busy;
+       }
+
+       /* get the socket providing the service */
+       service_id = sp->hdr.serviceId;
+       read_lock_bh(&local->services_lock);
+       list_for_each_entry(rx, &local->services, listen_link) {
+               if (rx->service_id == service_id &&
+                   rx->sk.sk_state != RXRPC_CLOSE)
+                       goto found_service;
+       }
+       read_unlock_bh(&local->services_lock);
+       goto invalid_service;
+
+found_service:
+       _debug("found service %hd", ntohs(rx->service_id));
+       if (sk_acceptq_is_full(&rx->sk))
+               goto backlog_full;
+       sk_acceptq_added(&rx->sk);
+       sock_hold(&rx->sk);
+       read_unlock_bh(&local->services_lock);
+
+       ret = rxrpc_accept_incoming_call(local, rx, skb, &srx);
+       if (ret < 0)
+               sk_acceptq_removed(&rx->sk);
+       sock_put(&rx->sk);
+       switch (ret) {
+       case -ECONNRESET: /* old calls are ignored */
+       case -ECONNABORTED: /* aborted calls are reaborted or ignored */
+       case 0:
+               goto process_next_packet;
+       case -ECONNREFUSED:
+               goto invalid_service;
+       case -EBUSY:
+               goto busy;
+       case -EKEYREJECTED:
+               goto security_mismatch;
+       default:
+               BUG();
+       }
+
+backlog_full:
+       read_unlock_bh(&local->services_lock);
+busy:
+       rxrpc_busy(local, &srx, &sp->hdr);
+       rxrpc_free_skb(skb);
+       goto process_next_packet;
+
+invalid_service:
+       skb->priority = RX_INVALID_OPERATION;
+       rxrpc_reject_packet(local, skb);
+       goto process_next_packet;
+
+       /* can't change connection security type mid-flow */
+security_mismatch:
+       skb->priority = RX_PROTOCOL_ERROR;
+       rxrpc_reject_packet(local, skb);
+       goto process_next_packet;
+}
+
+/*
+ * handle acceptance of a call by userspace
+ * - assign the user call ID to the call at the front of the queue
+ */
+struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *rx,
+                                    unsigned long user_call_ID)
+{
+       struct rxrpc_call *call;
+       struct rb_node *parent, **pp;
+       int ret;
+
+       _enter(",%lx", user_call_ID);
+
+       ASSERT(!irqs_disabled());
+
+       write_lock(&rx->call_lock);
+
+       ret = -ENODATA;
+       if (list_empty(&rx->acceptq))
+               goto out;
+
+       /* check the user ID isn't already in use */
+       ret = -EBADSLT;
+       pp = &rx->calls.rb_node;
+       parent = NULL;
+       while (*pp) {
+               parent = *pp;
+               call = rb_entry(parent, struct rxrpc_call, sock_node);
+
+               if (user_call_ID < call->user_call_ID)
+                       pp = &(*pp)->rb_left;
+               else if (user_call_ID > call->user_call_ID)
+                       pp = &(*pp)->rb_right;
+               else
+                       goto out;
+       }
+
+       /* dequeue the first call and check it's still valid */
+       call = list_entry(rx->acceptq.next, struct rxrpc_call, accept_link);
+       list_del_init(&call->accept_link);
+       sk_acceptq_removed(&rx->sk);
+
+       write_lock_bh(&call->state_lock);
+       switch (call->state) {
+       case RXRPC_CALL_SERVER_ACCEPTING:
+               call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
+               break;
+       case RXRPC_CALL_REMOTELY_ABORTED:
+       case RXRPC_CALL_LOCALLY_ABORTED:
+               ret = -ECONNABORTED;
+               goto out_release;
+       case RXRPC_CALL_NETWORK_ERROR:
+               ret = call->conn->error;
+               goto out_release;
+       case RXRPC_CALL_DEAD:
+               ret = -ETIME;
+               goto out_discard;
+       default:
+               BUG();
+       }
+
+       /* formalise the acceptance */
+       call->user_call_ID = user_call_ID;
+       rb_link_node(&call->sock_node, parent, pp);
+       rb_insert_color(&call->sock_node, &rx->calls);
+       if (test_and_set_bit(RXRPC_CALL_HAS_USERID, &call->flags))
+               BUG();
+       if (test_and_set_bit(RXRPC_CALL_ACCEPTED, &call->events))
+               BUG();
+       rxrpc_queue_call(call);
+
+       rxrpc_get_call(call);
+       write_unlock_bh(&call->state_lock);
+       write_unlock(&rx->call_lock);
+       _leave(" = %p{%d}", call, call->debug_id);
+       return call;
+
+       /* if the call is already dying or dead, then we leave the socket's ref
+        * on it to be released by rxrpc_dead_call_expired() as induced by
+        * rxrpc_release_call() */
+out_release:
+       _debug("release %p", call);
+       if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) &&
+           !test_and_set_bit(RXRPC_CALL_RELEASE, &call->events))
+               rxrpc_queue_call(call);
+out_discard:
+       write_unlock_bh(&call->state_lock);
+       _debug("discard %p", call);
+out:
+       write_unlock(&rx->call_lock);
+       _leave(" = %d", ret);
+       return ERR_PTR(ret);
+}
+
+/*
+ * handle rejectance of a call by userspace
+ * - reject the call at the front of the queue
+ */
+int rxrpc_reject_call(struct rxrpc_sock *rx)
+{
+       struct rxrpc_call *call;
+       int ret;
+
+       _enter("");
+
+       ASSERT(!irqs_disabled());
+
+       write_lock(&rx->call_lock);
+
+       ret = -ENODATA;
+       if (list_empty(&rx->acceptq))
+               goto out;
+
+       /* dequeue the first call and check it's still valid */
+       call = list_entry(rx->acceptq.next, struct rxrpc_call, accept_link);
+       list_del_init(&call->accept_link);
+       sk_acceptq_removed(&rx->sk);
+
+       write_lock_bh(&call->state_lock);
+       switch (call->state) {
+       case RXRPC_CALL_SERVER_ACCEPTING:
+               call->state = RXRPC_CALL_SERVER_BUSY;
+               if (test_and_set_bit(RXRPC_CALL_REJECT_BUSY, &call->events))
+                       rxrpc_queue_call(call);
+               ret = 0;
+               goto out_release;
+       case RXRPC_CALL_REMOTELY_ABORTED:
+       case RXRPC_CALL_LOCALLY_ABORTED:
+               ret = -ECONNABORTED;
+               goto out_release;
+       case RXRPC_CALL_NETWORK_ERROR:
+               ret = call->conn->error;
+               goto out_release;
+       case RXRPC_CALL_DEAD:
+               ret = -ETIME;
+               goto out_discard;
+       default:
+               BUG();
+       }
+
+       /* if the call is already dying or dead, then we leave the socket's ref
+        * on it to be released by rxrpc_dead_call_expired() as induced by
+        * rxrpc_release_call() */
+out_release:
+       _debug("release %p", call);
+       if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) &&
+           !test_and_set_bit(RXRPC_CALL_RELEASE, &call->events))
+               rxrpc_queue_call(call);
+out_discard:
+       write_unlock_bh(&call->state_lock);
+       _debug("discard %p", call);
+out:
+       write_unlock(&rx->call_lock);
+       _leave(" = %d", ret);
+       return ret;
+}
+
+/**
+ * rxrpc_kernel_accept_call - Allow a kernel service to accept an incoming call
+ * @sock: The socket on which the impending call is waiting
+ * @user_call_ID: The tag to attach to the call
+ *
+ * Allow a kernel service to accept an incoming call, assuming the incoming
+ * call is still valid.
+ */
+struct rxrpc_call *rxrpc_kernel_accept_call(struct socket *sock,
+                                           unsigned long user_call_ID)
+{
+       struct rxrpc_call *call;
+
+       _enter(",%lx", user_call_ID);
+       call = rxrpc_accept_call(rxrpc_sk(sock->sk), user_call_ID);
+       _leave(" = %p", call);
+       return call;
+}
+
+EXPORT_SYMBOL(rxrpc_kernel_accept_call);
+
+/**
+ * rxrpc_kernel_reject_call - Allow a kernel service to reject an incoming call
+ * @sock: The socket on which the impending call is waiting
+ *
+ * Allow a kernel service to reject an incoming call with a BUSY message,
+ * assuming the incoming call is still valid.
+ */
+int rxrpc_kernel_reject_call(struct socket *sock)
+{
+       int ret;
+
+       _enter("");
+       ret = rxrpc_reject_call(rxrpc_sk(sock->sk));
+       _leave(" = %d", ret);
+       return ret;
+}
+
+EXPORT_SYMBOL(rxrpc_kernel_reject_call);
diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c
new file mode 100644 (file)
index 0000000..fc07a92
--- /dev/null
@@ -0,0 +1,1250 @@
+/* Management of Tx window, Tx resend, ACKs and out-of-sequence reception
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/circ_buf.h>
+#include <linux/net.h>
+#include <linux/skbuff.h>
+#include <linux/udp.h>
+#include <net/sock.h>
+#include <net/af_rxrpc.h>
+#include "ar-internal.h"
+
+static unsigned rxrpc_ack_defer = 1;
+
+static const char *rxrpc_acks[] = {
+       "---", "REQ", "DUP", "OOS", "WIN", "MEM", "PNG", "PNR", "DLY", "IDL",
+       "-?-"
+};
+
+static const s8 rxrpc_ack_priority[] = {
+       [0]                             = 0,
+       [RXRPC_ACK_DELAY]               = 1,
+       [RXRPC_ACK_REQUESTED]           = 2,
+       [RXRPC_ACK_IDLE]                = 3,
+       [RXRPC_ACK_PING_RESPONSE]       = 4,
+       [RXRPC_ACK_DUPLICATE]           = 5,
+       [RXRPC_ACK_OUT_OF_SEQUENCE]     = 6,
+       [RXRPC_ACK_EXCEEDS_WINDOW]      = 7,
+       [RXRPC_ACK_NOSPACE]             = 8,
+};
+
+/*
+ * propose an ACK be sent
+ */
+void __rxrpc_propose_ACK(struct rxrpc_call *call, uint8_t ack_reason,
+                        __be32 serial, bool immediate)
+{
+       unsigned long expiry;
+       s8 prior = rxrpc_ack_priority[ack_reason];
+
+       ASSERTCMP(prior, >, 0);
+
+       _enter("{%d},%s,%%%x,%u",
+              call->debug_id, rxrpc_acks[ack_reason], ntohl(serial),
+              immediate);
+
+       if (prior < rxrpc_ack_priority[call->ackr_reason]) {
+               if (immediate)
+                       goto cancel_timer;
+               return;
+       }
+
+       /* update DELAY, IDLE, REQUESTED and PING_RESPONSE ACK serial
+        * numbers */
+       if (prior == rxrpc_ack_priority[call->ackr_reason]) {
+               if (prior <= 4)
+                       call->ackr_serial = serial;
+               if (immediate)
+                       goto cancel_timer;
+               return;
+       }
+
+       call->ackr_reason = ack_reason;
+       call->ackr_serial = serial;
+
+       switch (ack_reason) {
+       case RXRPC_ACK_DELAY:
+               _debug("run delay timer");
+               call->ack_timer.expires = jiffies + rxrpc_ack_timeout * HZ;
+               add_timer(&call->ack_timer);
+               return;
+
+       case RXRPC_ACK_IDLE:
+               if (!immediate) {
+                       _debug("run defer timer");
+                       expiry = 1;
+                       goto run_timer;
+               }
+               goto cancel_timer;
+
+       case RXRPC_ACK_REQUESTED:
+               if (!rxrpc_ack_defer)
+                       goto cancel_timer;
+               if (!immediate || serial == cpu_to_be32(1)) {
+                       _debug("run defer timer");
+                       expiry = rxrpc_ack_defer;
+                       goto run_timer;
+               }
+
+       default:
+               _debug("immediate ACK");
+               goto cancel_timer;
+       }
+
+run_timer:
+       expiry += jiffies;
+       if (!timer_pending(&call->ack_timer) ||
+           time_after(call->ack_timer.expires, expiry))
+               mod_timer(&call->ack_timer, expiry);
+       return;
+
+cancel_timer:
+       _debug("cancel timer %%%u", ntohl(serial));
+       try_to_del_timer_sync(&call->ack_timer);
+       read_lock_bh(&call->state_lock);
+       if (call->state <= RXRPC_CALL_COMPLETE &&
+           !test_and_set_bit(RXRPC_CALL_ACK, &call->events))
+               rxrpc_queue_call(call);
+       read_unlock_bh(&call->state_lock);
+}
+
+/*
+ * propose an ACK be sent, locking the call structure
+ */
+void rxrpc_propose_ACK(struct rxrpc_call *call, uint8_t ack_reason,
+                      __be32 serial, bool immediate)
+{
+       s8 prior = rxrpc_ack_priority[ack_reason];
+
+       if (prior > rxrpc_ack_priority[call->ackr_reason]) {
+               spin_lock_bh(&call->lock);
+               __rxrpc_propose_ACK(call, ack_reason, serial, immediate);
+               spin_unlock_bh(&call->lock);
+       }
+}
+
+/*
+ * set the resend timer
+ */
+static void rxrpc_set_resend(struct rxrpc_call *call, u8 resend,
+                            unsigned long resend_at)
+{
+       read_lock_bh(&call->state_lock);
+       if (call->state >= RXRPC_CALL_COMPLETE)
+               resend = 0;
+
+       if (resend & 1) {
+               _debug("SET RESEND");
+               set_bit(RXRPC_CALL_RESEND, &call->events);
+       }
+
+       if (resend & 2) {
+               _debug("MODIFY RESEND TIMER");
+               set_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
+               mod_timer(&call->resend_timer, resend_at);
+       } else {
+               _debug("KILL RESEND TIMER");
+               del_timer_sync(&call->resend_timer);
+               clear_bit(RXRPC_CALL_RESEND_TIMER, &call->events);
+               clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
+       }
+       read_unlock_bh(&call->state_lock);
+}
+
+/*
+ * resend packets
+ */
+static void rxrpc_resend(struct rxrpc_call *call)
+{
+       struct rxrpc_skb_priv *sp;
+       struct rxrpc_header *hdr;
+       struct sk_buff *txb;
+       unsigned long *p_txb, resend_at;
+       int loop, stop;
+       u8 resend;
+
+       _enter("{%d,%d,%d,%d},",
+              call->acks_hard, call->acks_unacked,
+              atomic_read(&call->sequence),
+              CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
+
+       stop = 0;
+       resend = 0;
+       resend_at = 0;
+
+       for (loop = call->acks_tail;
+            loop != call->acks_head || stop;
+            loop = (loop + 1) &  (call->acks_winsz - 1)
+            ) {
+               p_txb = call->acks_window + loop;
+               smp_read_barrier_depends();
+               if (*p_txb & 1)
+                       continue;
+
+               txb = (struct sk_buff *) *p_txb;
+               sp = rxrpc_skb(txb);
+
+               if (sp->need_resend) {
+                       sp->need_resend = 0;
+
+                       /* each Tx packet has a new serial number */
+                       sp->hdr.serial =
+                               htonl(atomic_inc_return(&call->conn->serial));
+
+                       hdr = (struct rxrpc_header *) txb->head;
+                       hdr->serial = sp->hdr.serial;
+
+                       _proto("Tx DATA %%%u { #%d }",
+                              ntohl(sp->hdr.serial), ntohl(sp->hdr.seq));
+                       if (rxrpc_send_packet(call->conn->trans, txb) < 0) {
+                               stop = 0;
+                               sp->resend_at = jiffies + 3;
+                       } else {
+                               sp->resend_at =
+                                       jiffies + rxrpc_resend_timeout * HZ;
+                       }
+               }
+
+               if (time_after_eq(jiffies + 1, sp->resend_at)) {
+                       sp->need_resend = 1;
+                       resend |= 1;
+               } else if (resend & 2) {
+                       if (time_before(sp->resend_at, resend_at))
+                               resend_at = sp->resend_at;
+               } else {
+                       resend_at = sp->resend_at;
+                       resend |= 2;
+               }
+       }
+
+       rxrpc_set_resend(call, resend, resend_at);
+       _leave("");
+}
+
+/*
+ * handle resend timer expiry
+ */
+static void rxrpc_resend_timer(struct rxrpc_call *call)
+{
+       struct rxrpc_skb_priv *sp;
+       struct sk_buff *txb;
+       unsigned long *p_txb, resend_at;
+       int loop;
+       u8 resend;
+
+       _enter("%d,%d,%d",
+              call->acks_tail, call->acks_unacked, call->acks_head);
+
+       resend = 0;
+       resend_at = 0;
+
+       for (loop = call->acks_unacked;
+            loop != call->acks_head;
+            loop = (loop + 1) &  (call->acks_winsz - 1)
+            ) {
+               p_txb = call->acks_window + loop;
+               smp_read_barrier_depends();
+               txb = (struct sk_buff *) (*p_txb & ~1);
+               sp = rxrpc_skb(txb);
+
+               ASSERT(!(*p_txb & 1));
+
+               if (sp->need_resend) {
+                       ;
+               } else if (time_after_eq(jiffies + 1, sp->resend_at)) {
+                       sp->need_resend = 1;
+                       resend |= 1;
+               } else if (resend & 2) {
+                       if (time_before(sp->resend_at, resend_at))
+                               resend_at = sp->resend_at;
+               } else {
+                       resend_at = sp->resend_at;
+                       resend |= 2;
+               }
+       }
+
+       rxrpc_set_resend(call, resend, resend_at);
+       _leave("");
+}
+
+/*
+ * process soft ACKs of our transmitted packets
+ * - these indicate packets the peer has or has not received, but hasn't yet
+ *   given to the consumer, and so can still be discarded and re-requested
+ */
+static int rxrpc_process_soft_ACKs(struct rxrpc_call *call,
+                                  struct rxrpc_ackpacket *ack,
+                                  struct sk_buff *skb)
+{
+       struct rxrpc_skb_priv *sp;
+       struct sk_buff *txb;
+       unsigned long *p_txb, resend_at;
+       int loop;
+       u8 sacks[RXRPC_MAXACKS], resend;
+
+       _enter("{%d,%d},{%d},",
+              call->acks_hard,
+              CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz),
+              ack->nAcks);
+
+       if (skb_copy_bits(skb, 0, sacks, ack->nAcks) < 0)
+               goto protocol_error;
+
+       resend = 0;
+       resend_at = 0;
+       for (loop = 0; loop < ack->nAcks; loop++) {
+               p_txb = call->acks_window;
+               p_txb += (call->acks_tail + loop) & (call->acks_winsz - 1);
+               smp_read_barrier_depends();
+               txb = (struct sk_buff *) (*p_txb & ~1);
+               sp = rxrpc_skb(txb);
+
+               switch (sacks[loop]) {
+               case RXRPC_ACK_TYPE_ACK:
+                       sp->need_resend = 0;
+                       *p_txb |= 1;
+                       break;
+               case RXRPC_ACK_TYPE_NACK:
+                       sp->need_resend = 1;
+                       *p_txb &= ~1;
+                       resend = 1;
+                       break;
+               default:
+                       _debug("Unsupported ACK type %d", sacks[loop]);
+                       goto protocol_error;
+               }
+       }
+
+       smp_mb();
+       call->acks_unacked = (call->acks_tail + loop) & (call->acks_winsz - 1);
+
+       /* anything not explicitly ACK'd is implicitly NACK'd, but may just not
+        * have been received or processed yet by the far end */
+       for (loop = call->acks_unacked;
+            loop != call->acks_head;
+            loop = (loop + 1) &  (call->acks_winsz - 1)
+            ) {
+               p_txb = call->acks_window + loop;
+               smp_read_barrier_depends();
+               txb = (struct sk_buff *) (*p_txb & ~1);
+               sp = rxrpc_skb(txb);
+
+               if (*p_txb & 1) {
+                       /* packet must have been discarded */
+                       sp->need_resend = 1;
+                       *p_txb &= ~1;
+                       resend |= 1;
+               } else if (sp->need_resend) {
+                       ;
+               } else if (time_after_eq(jiffies + 1, sp->resend_at)) {
+                       sp->need_resend = 1;
+                       resend |= 1;
+               } else if (resend & 2) {
+                       if (time_before(sp->resend_at, resend_at))
+                               resend_at = sp->resend_at;
+               } else {
+                       resend_at = sp->resend_at;
+                       resend |= 2;
+               }
+       }
+
+       rxrpc_set_resend(call, resend, resend_at);
+       _leave(" = 0");
+       return 0;
+
+protocol_error:
+       _leave(" = -EPROTO");
+       return -EPROTO;
+}
+
+/*
+ * discard hard-ACK'd packets from the Tx window
+ */
+static void rxrpc_rotate_tx_window(struct rxrpc_call *call, u32 hard)
+{
+       struct rxrpc_skb_priv *sp;
+       unsigned long _skb;
+       int tail = call->acks_tail, old_tail;
+       int win = CIRC_CNT(call->acks_head, tail, call->acks_winsz);
+
+       _enter("{%u,%u},%u", call->acks_hard, win, hard);
+
+       ASSERTCMP(hard - call->acks_hard, <=, win);
+
+       while (call->acks_hard < hard) {
+               smp_read_barrier_depends();
+               _skb = call->acks_window[tail] & ~1;
+               sp = rxrpc_skb((struct sk_buff *) _skb);
+               rxrpc_free_skb((struct sk_buff *) _skb);
+               old_tail = tail;
+               tail = (tail + 1) & (call->acks_winsz - 1);
+               call->acks_tail = tail;
+               if (call->acks_unacked == old_tail)
+                       call->acks_unacked = tail;
+               call->acks_hard++;
+       }
+
+       wake_up(&call->tx_waitq);
+}
+
+/*
+ * clear the Tx window in the event of a failure
+ */
+static void rxrpc_clear_tx_window(struct rxrpc_call *call)
+{
+       rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
+}
+
+/*
+ * drain the out of sequence received packet queue into the packet Rx queue
+ */
+static int rxrpc_drain_rx_oos_queue(struct rxrpc_call *call)
+{
+       struct rxrpc_skb_priv *sp;
+       struct sk_buff *skb;
+       bool terminal;
+       int ret;
+
+       _enter("{%d,%d}", call->rx_data_post, call->rx_first_oos);
+
+       spin_lock_bh(&call->lock);
+
+       ret = -ECONNRESET;
+       if (test_bit(RXRPC_CALL_RELEASED, &call->flags))
+               goto socket_unavailable;
+
+       skb = skb_dequeue(&call->rx_oos_queue);
+       if (skb) {
+               sp = rxrpc_skb(skb);
+
+               _debug("drain OOS packet %d [%d]",
+                      ntohl(sp->hdr.seq), call->rx_first_oos);
+
+               if (ntohl(sp->hdr.seq) != call->rx_first_oos) {
+                       skb_queue_head(&call->rx_oos_queue, skb);
+                       call->rx_first_oos = ntohl(rxrpc_skb(skb)->hdr.seq);
+                       _debug("requeue %p {%u}", skb, call->rx_first_oos);
+               } else {
+                       skb->mark = RXRPC_SKB_MARK_DATA;
+                       terminal = ((sp->hdr.flags & RXRPC_LAST_PACKET) &&
+                               !(sp->hdr.flags & RXRPC_CLIENT_INITIATED));
+                       ret = rxrpc_queue_rcv_skb(call, skb, true, terminal);
+                       BUG_ON(ret < 0);
+                       _debug("drain #%u", call->rx_data_post);
+                       call->rx_data_post++;
+
+                       /* find out what the next packet is */
+                       skb = skb_peek(&call->rx_oos_queue);
+                       if (skb)
+                               call->rx_first_oos =
+                                       ntohl(rxrpc_skb(skb)->hdr.seq);
+                       else
+                               call->rx_first_oos = 0;
+                       _debug("peek %p {%u}", skb, call->rx_first_oos);
+               }
+       }
+
+       ret = 0;
+socket_unavailable:
+       spin_unlock_bh(&call->lock);
+       _leave(" = %d", ret);
+       return ret;
+}
+
+/*
+ * insert an out of sequence packet into the buffer
+ */
+static void rxrpc_insert_oos_packet(struct rxrpc_call *call,
+                                   struct sk_buff *skb)
+{
+       struct rxrpc_skb_priv *sp, *psp;
+       struct sk_buff *p;
+       u32 seq;
+
+       sp = rxrpc_skb(skb);
+       seq = ntohl(sp->hdr.seq);
+       _enter(",,{%u}", seq);
+
+       skb->destructor = rxrpc_packet_destructor;
+       ASSERTCMP(sp->call, ==, NULL);
+       sp->call = call;
+       rxrpc_get_call(call);
+
+       /* insert into the buffer in sequence order */
+       spin_lock_bh(&call->lock);
+
+       skb_queue_walk(&call->rx_oos_queue, p) {
+               psp = rxrpc_skb(p);
+               if (ntohl(psp->hdr.seq) > seq) {
+                       _debug("insert oos #%u before #%u",
+                              seq, ntohl(psp->hdr.seq));
+                       skb_insert(p, skb, &call->rx_oos_queue);
+                       goto inserted;
+               }
+       }
+
+       _debug("append oos #%u", seq);
+       skb_queue_tail(&call->rx_oos_queue, skb);
+inserted:
+
+       /* we might now have a new front to the queue */
+       if (call->rx_first_oos == 0 || seq < call->rx_first_oos)
+               call->rx_first_oos = seq;
+
+       read_lock(&call->state_lock);
+       if (call->state < RXRPC_CALL_COMPLETE &&
+           call->rx_data_post == call->rx_first_oos) {
+               _debug("drain rx oos now");
+               set_bit(RXRPC_CALL_DRAIN_RX_OOS, &call->events);
+       }
+       read_unlock(&call->state_lock);
+
+       spin_unlock_bh(&call->lock);
+       _leave(" [stored #%u]", call->rx_first_oos);
+}
+
+/*
+ * clear the Tx window on final ACK reception
+ */
+static void rxrpc_zap_tx_window(struct rxrpc_call *call)
+{
+       struct rxrpc_skb_priv *sp;
+       struct sk_buff *skb;
+       unsigned long _skb, *acks_window;
+       uint8_t winsz = call->acks_winsz;
+       int tail;
+
+       acks_window = call->acks_window;
+       call->acks_window = NULL;
+
+       while (CIRC_CNT(call->acks_head, call->acks_tail, winsz) > 0) {
+               tail = call->acks_tail;
+               smp_read_barrier_depends();
+               _skb = acks_window[tail] & ~1;
+               smp_mb();
+               call->acks_tail = (call->acks_tail + 1) & (winsz - 1);
+
+               skb = (struct sk_buff *) _skb;
+               sp = rxrpc_skb(skb);
+               _debug("+++ clear Tx %u", ntohl(sp->hdr.seq));
+               rxrpc_free_skb(skb);
+       }
+
+       kfree(acks_window);
+}
+
+/*
+ * process packets in the reception queue
+ */
+static int rxrpc_process_rx_queue(struct rxrpc_call *call,
+                                 u32 *_abort_code)
+{
+       struct rxrpc_ackpacket ack;
+       struct rxrpc_skb_priv *sp;
+       struct sk_buff *skb;
+       bool post_ACK;
+       int latest;
+       u32 hard, tx;
+
+       _enter("");
+
+process_further:
+       skb = skb_dequeue(&call->rx_queue);
+       if (!skb)
+               return -EAGAIN;
+
+       _net("deferred skb %p", skb);
+
+       sp = rxrpc_skb(skb);
+
+       _debug("process %s [st %d]", rxrpc_pkts[sp->hdr.type], call->state);
+
+       post_ACK = false;
+
+       switch (sp->hdr.type) {
+               /* data packets that wind up here have been received out of
+                * order, need security processing or are jumbo packets */
+       case RXRPC_PACKET_TYPE_DATA:
+               _proto("OOSQ DATA %%%u { #%u }",
+                      ntohl(sp->hdr.serial), ntohl(sp->hdr.seq));
+
+               /* secured packets must be verified and possibly decrypted */
+               if (rxrpc_verify_packet(call, skb, _abort_code) < 0)
+                       goto protocol_error;
+
+               rxrpc_insert_oos_packet(call, skb);
+               goto process_further;
+
+               /* partial ACK to process */
+       case RXRPC_PACKET_TYPE_ACK:
+               if (skb_copy_bits(skb, 0, &ack, sizeof(ack)) < 0) {
+                       _debug("extraction failure");
+                       goto protocol_error;
+               }
+               if (!skb_pull(skb, sizeof(ack)))
+                       BUG();
+
+               latest = ntohl(sp->hdr.serial);
+               hard = ntohl(ack.firstPacket);
+               tx = atomic_read(&call->sequence);
+
+               _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
+                      latest,
+                      ntohs(ack.maxSkew),
+                      hard,
+                      ntohl(ack.previousPacket),
+                      ntohl(ack.serial),
+                      rxrpc_acks[ack.reason],
+                      ack.nAcks);
+
+               if (ack.reason == RXRPC_ACK_PING) {
+                       _proto("Rx ACK %%%u PING Request", latest);
+                       rxrpc_propose_ACK(call, RXRPC_ACK_PING_RESPONSE,
+                                         sp->hdr.serial, true);
+               }
+
+               /* discard any out-of-order or duplicate ACKs */
+               if (latest - call->acks_latest <= 0) {
+                       _debug("discard ACK %d <= %d",
+                              latest, call->acks_latest);
+                       goto discard;
+               }
+               call->acks_latest = latest;
+
+               if (call->state != RXRPC_CALL_CLIENT_SEND_REQUEST &&
+                   call->state != RXRPC_CALL_CLIENT_AWAIT_REPLY &&
+                   call->state != RXRPC_CALL_SERVER_SEND_REPLY &&
+                   call->state != RXRPC_CALL_SERVER_AWAIT_ACK)
+                       goto discard;
+
+               _debug("Tx=%d H=%u S=%d", tx, call->acks_hard, call->state);
+
+               if (hard > 0) {
+                       if (hard - 1 > tx) {
+                               _debug("hard-ACK'd packet %d not transmitted"
+                                      " (%d top)",
+                                      hard - 1, tx);
+                               goto protocol_error;
+                       }
+
+                       if ((call->state == RXRPC_CALL_CLIENT_AWAIT_REPLY ||
+                            call->state == RXRPC_CALL_SERVER_AWAIT_ACK) &&
+                           hard > tx)
+                               goto all_acked;
+
+                       smp_rmb();
+                       rxrpc_rotate_tx_window(call, hard - 1);
+               }
+
+               if (ack.nAcks > 0) {
+                       if (hard - 1 + ack.nAcks > tx) {
+                               _debug("soft-ACK'd packet %d+%d not"
+                                      " transmitted (%d top)",
+                                      hard - 1, ack.nAcks, tx);
+                               goto protocol_error;
+                       }
+
+                       if (rxrpc_process_soft_ACKs(call, &ack, skb) < 0)
+                               goto protocol_error;
+               }
+               goto discard;
+
+               /* complete ACK to process */
+       case RXRPC_PACKET_TYPE_ACKALL:
+               goto all_acked;
+
+               /* abort and busy are handled elsewhere */
+       case RXRPC_PACKET_TYPE_BUSY:
+       case RXRPC_PACKET_TYPE_ABORT:
+               BUG();
+
+               /* connection level events - also handled elsewhere */
+       case RXRPC_PACKET_TYPE_CHALLENGE:
+       case RXRPC_PACKET_TYPE_RESPONSE:
+       case RXRPC_PACKET_TYPE_DEBUG:
+               BUG();
+       }
+
+       /* if we've had a hard ACK that covers all the packets we've sent, then
+        * that ends that phase of the operation */
+all_acked:
+       write_lock_bh(&call->state_lock);
+       _debug("ack all %d", call->state);
+
+       switch (call->state) {
+       case RXRPC_CALL_CLIENT_AWAIT_REPLY:
+               call->state = RXRPC_CALL_CLIENT_RECV_REPLY;
+               break;
+       case RXRPC_CALL_SERVER_AWAIT_ACK:
+               _debug("srv complete");
+               call->state = RXRPC_CALL_COMPLETE;
+               post_ACK = true;
+               break;
+       case RXRPC_CALL_CLIENT_SEND_REQUEST:
+       case RXRPC_CALL_SERVER_RECV_REQUEST:
+               goto protocol_error_unlock; /* can't occur yet */
+       default:
+               write_unlock_bh(&call->state_lock);
+               goto discard; /* assume packet left over from earlier phase */
+       }
+
+       write_unlock_bh(&call->state_lock);
+
+       /* if all the packets we sent are hard-ACK'd, then we can discard
+        * whatever we've got left */
+       _debug("clear Tx %d",
+              CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
+
+       del_timer_sync(&call->resend_timer);
+       clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
+       clear_bit(RXRPC_CALL_RESEND_TIMER, &call->events);
+
+       if (call->acks_window)
+               rxrpc_zap_tx_window(call);
+
+       if (post_ACK) {
+               /* post the final ACK message for userspace to pick up */
+               _debug("post ACK");
+               skb->mark = RXRPC_SKB_MARK_FINAL_ACK;
+               sp->call = call;
+               rxrpc_get_call(call);
+               spin_lock_bh(&call->lock);
+               if (rxrpc_queue_rcv_skb(call, skb, true, true) < 0)
+                       BUG();
+               spin_unlock_bh(&call->lock);
+               goto process_further;
+       }
+
+discard:
+       rxrpc_free_skb(skb);
+       goto process_further;
+
+protocol_error_unlock:
+       write_unlock_bh(&call->state_lock);
+protocol_error:
+       rxrpc_free_skb(skb);
+       _leave(" = -EPROTO");
+       return -EPROTO;
+}
+
+/*
+ * post a message to the socket Rx queue for recvmsg() to pick up
+ */
+static int rxrpc_post_message(struct rxrpc_call *call, u32 mark, u32 error,
+                             bool fatal)
+{
+       struct rxrpc_skb_priv *sp;
+       struct sk_buff *skb;
+       int ret;
+
+       _enter("{%d,%lx},%u,%u,%d",
+              call->debug_id, call->flags, mark, error, fatal);
+
+       /* remove timers and things for fatal messages */
+       if (fatal) {
+               del_timer_sync(&call->resend_timer);
+               del_timer_sync(&call->ack_timer);
+               clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
+       }
+
+       if (mark != RXRPC_SKB_MARK_NEW_CALL &&
+           !test_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
+               _leave("[no userid]");
+               return 0;
+       }
+
+       if (!test_bit(RXRPC_CALL_TERMINAL_MSG, &call->flags)) {
+               skb = alloc_skb(0, GFP_NOFS);
+               if (!skb)
+                       return -ENOMEM;
+
+               rxrpc_new_skb(skb);
+
+               skb->mark = mark;
+
+               sp = rxrpc_skb(skb);
+               memset(sp, 0, sizeof(*sp));
+               sp->error = error;
+               sp->call = call;
+               rxrpc_get_call(call);
+
+               spin_lock_bh(&call->lock);
+               ret = rxrpc_queue_rcv_skb(call, skb, true, fatal);
+               spin_unlock_bh(&call->lock);
+               if (ret < 0)
+                       BUG();
+       }
+
+       return 0;
+}
+
+/*
+ * handle background processing of incoming call packets and ACK / abort
+ * generation
+ */
+void rxrpc_process_call(struct work_struct *work)
+{
+       struct rxrpc_call *call =
+               container_of(work, struct rxrpc_call, processor);
+       struct rxrpc_ackpacket ack;
+       struct rxrpc_ackinfo ackinfo;
+       struct rxrpc_header hdr;
+       struct msghdr msg;
+       struct kvec iov[5];
+       unsigned long bits;
+       __be32 data;
+       size_t len;
+       int genbit, loop, nbit, ioc, ret;
+       u32 abort_code = RX_PROTOCOL_ERROR;
+       u8 *acks = NULL;
+
+       //printk("\n--------------------\n");
+       _enter("{%d,%s,%lx} [%lu]",
+              call->debug_id, rxrpc_call_states[call->state], call->events,
+              (jiffies - call->creation_jif) / (HZ / 10));
+
+       if (test_and_set_bit(RXRPC_CALL_PROC_BUSY, &call->flags)) {
+               _debug("XXXXXXXXXXXXX RUNNING ON MULTIPLE CPUS XXXXXXXXXXXXX");
+               return;
+       }
+
+       /* there's a good chance we're going to have to send a message, so set
+        * one up in advance */
+       msg.msg_name    = &call->conn->trans->peer->srx.transport.sin;
+       msg.msg_namelen = sizeof(call->conn->trans->peer->srx.transport.sin);
+       msg.msg_control = NULL;
+       msg.msg_controllen = 0;
+       msg.msg_flags   = 0;
+
+       hdr.epoch       = call->conn->epoch;
+       hdr.cid         = call->cid;
+       hdr.callNumber  = call->call_id;
+       hdr.seq         = 0;
+       hdr.type        = RXRPC_PACKET_TYPE_ACK;
+       hdr.flags       = call->conn->out_clientflag;
+       hdr.userStatus  = 0;
+       hdr.securityIndex = call->conn->security_ix;
+       hdr._rsvd       = 0;
+       hdr.serviceId   = call->conn->service_id;
+
+       memset(iov, 0, sizeof(iov));
+       iov[0].iov_base = &hdr;
+       iov[0].iov_len  = sizeof(hdr);
+
+       /* deal with events of a final nature */
+       if (test_bit(RXRPC_CALL_RELEASE, &call->events)) {
+               rxrpc_release_call(call);
+               clear_bit(RXRPC_CALL_RELEASE, &call->events);
+       }
+
+       if (test_bit(RXRPC_CALL_RCVD_ERROR, &call->events)) {
+               int error;
+
+               clear_bit(RXRPC_CALL_CONN_ABORT, &call->events);
+               clear_bit(RXRPC_CALL_REJECT_BUSY, &call->events);
+               clear_bit(RXRPC_CALL_ABORT, &call->events);
+
+               error = call->conn->trans->peer->net_error;
+               _debug("post net error %d", error);
+
+               if (rxrpc_post_message(call, RXRPC_SKB_MARK_NET_ERROR,
+                                      error, true) < 0)
+                       goto no_mem;
+               clear_bit(RXRPC_CALL_RCVD_ERROR, &call->events);
+               goto kill_ACKs;
+       }
+
+       if (test_bit(RXRPC_CALL_CONN_ABORT, &call->events)) {
+               ASSERTCMP(call->state, >, RXRPC_CALL_COMPLETE);
+
+               clear_bit(RXRPC_CALL_REJECT_BUSY, &call->events);
+               clear_bit(RXRPC_CALL_ABORT, &call->events);
+
+               _debug("post conn abort");
+
+               if (rxrpc_post_message(call, RXRPC_SKB_MARK_LOCAL_ERROR,
+                                      call->conn->error, true) < 0)
+                       goto no_mem;
+               clear_bit(RXRPC_CALL_CONN_ABORT, &call->events);
+               goto kill_ACKs;
+       }
+
+       if (test_bit(RXRPC_CALL_REJECT_BUSY, &call->events)) {
+               hdr.type = RXRPC_PACKET_TYPE_BUSY;
+               genbit = RXRPC_CALL_REJECT_BUSY;
+               goto send_message;
+       }
+
+       if (test_bit(RXRPC_CALL_ABORT, &call->events)) {
+               ASSERTCMP(call->state, >, RXRPC_CALL_COMPLETE);
+
+               if (rxrpc_post_message(call, RXRPC_SKB_MARK_LOCAL_ERROR,
+                                      ECONNABORTED, true) < 0)
+                       goto no_mem;
+               hdr.type = RXRPC_PACKET_TYPE_ABORT;
+               data = htonl(call->abort_code);
+               iov[1].iov_base = &data;
+               iov[1].iov_len = sizeof(data);
+               genbit = RXRPC_CALL_ABORT;
+               goto send_message;
+       }
+
+       if (test_bit(RXRPC_CALL_ACK_FINAL, &call->events)) {
+               hdr.type = RXRPC_PACKET_TYPE_ACKALL;
+               genbit = RXRPC_CALL_ACK_FINAL;
+               goto send_message;
+       }
+
+       if (call->events & ((1 << RXRPC_CALL_RCVD_BUSY) |
+                           (1 << RXRPC_CALL_RCVD_ABORT))
+           ) {
+               u32 mark;
+
+               if (test_bit(RXRPC_CALL_RCVD_ABORT, &call->events))
+                       mark = RXRPC_SKB_MARK_REMOTE_ABORT;
+               else
+                       mark = RXRPC_SKB_MARK_BUSY;
+
+               _debug("post abort/busy");
+               rxrpc_clear_tx_window(call);
+               if (rxrpc_post_message(call, mark, ECONNABORTED, true) < 0)
+                       goto no_mem;
+
+               clear_bit(RXRPC_CALL_RCVD_BUSY, &call->events);
+               clear_bit(RXRPC_CALL_RCVD_ABORT, &call->events);
+               goto kill_ACKs;
+       }
+
+       if (test_and_clear_bit(RXRPC_CALL_RCVD_ACKALL, &call->events)) {
+               _debug("do implicit ackall");
+               rxrpc_clear_tx_window(call);
+       }
+
+       if (test_bit(RXRPC_CALL_LIFE_TIMER, &call->events)) {
+               write_lock_bh(&call->state_lock);
+               if (call->state <= RXRPC_CALL_COMPLETE) {
+                       call->state = RXRPC_CALL_LOCALLY_ABORTED;
+                       call->abort_code = RX_CALL_TIMEOUT;
+                       set_bit(RXRPC_CALL_ABORT, &call->events);
+               }
+               write_unlock_bh(&call->state_lock);
+
+               _debug("post timeout");
+               if (rxrpc_post_message(call, RXRPC_SKB_MARK_LOCAL_ERROR,
+                                      ETIME, true) < 0)
+                       goto no_mem;
+
+               clear_bit(RXRPC_CALL_LIFE_TIMER, &call->events);
+               goto kill_ACKs;
+       }
+
+       /* deal with assorted inbound messages */
+       if (!skb_queue_empty(&call->rx_queue)) {
+               switch (rxrpc_process_rx_queue(call, &abort_code)) {
+               case 0:
+               case -EAGAIN:
+                       break;
+               case -ENOMEM:
+                       goto no_mem;
+               case -EKEYEXPIRED:
+               case -EKEYREJECTED:
+               case -EPROTO:
+                       rxrpc_abort_call(call, abort_code);
+                       goto kill_ACKs;
+               }
+       }
+
+       /* handle resending */
+       if (test_and_clear_bit(RXRPC_CALL_RESEND_TIMER, &call->events))
+               rxrpc_resend_timer(call);
+       if (test_and_clear_bit(RXRPC_CALL_RESEND, &call->events))
+               rxrpc_resend(call);
+
+       /* consider sending an ordinary ACK */
+       if (test_bit(RXRPC_CALL_ACK, &call->events)) {
+               __be32 pad;
+
+               _debug("send ACK: window: %d - %d { %lx }",
+                      call->rx_data_eaten, call->ackr_win_top,
+                      call->ackr_window[0]);
+
+               if (call->state > RXRPC_CALL_SERVER_ACK_REQUEST &&
+                   call->ackr_reason != RXRPC_ACK_PING_RESPONSE) {
+                       /* ACK by sending reply DATA packet in this state */
+                       clear_bit(RXRPC_CALL_ACK, &call->events);
+                       goto maybe_reschedule;
+               }
+
+               genbit = RXRPC_CALL_ACK;
+
+               acks = kzalloc(call->ackr_win_top - call->rx_data_eaten,
+                              GFP_NOFS);
+               if (!acks)
+                       goto no_mem;
+
+               //hdr.flags     = RXRPC_SLOW_START_OK;
+               ack.bufferSpace = htons(8);
+               ack.maxSkew     = 0;
+               ack.serial      = 0;
+               ack.reason      = 0;
+
+               ackinfo.rxMTU   = htonl(5692);
+//             ackinfo.rxMTU   = htonl(call->conn->trans->peer->maxdata);
+               ackinfo.maxMTU  = htonl(call->conn->trans->peer->maxdata);
+               ackinfo.rwind   = htonl(32);
+               ackinfo.jumbo_max = htonl(4);
+
+               spin_lock_bh(&call->lock);
+               ack.reason = call->ackr_reason;
+               ack.serial = call->ackr_serial;
+               ack.previousPacket = call->ackr_prev_seq;
+               ack.firstPacket = htonl(call->rx_data_eaten + 1);
+
+               ack.nAcks = 0;
+               for (loop = 0; loop < RXRPC_ACKR_WINDOW_ASZ; loop++) {
+                       nbit = loop * BITS_PER_LONG;
+                       for (bits = call->ackr_window[loop]; bits; bits >>= 1
+                            ) {
+                               _debug("- l=%d n=%d b=%lx", loop, nbit, bits);
+                               if (bits & 1) {
+                                       acks[nbit] = RXRPC_ACK_TYPE_ACK;
+                                       ack.nAcks = nbit + 1;
+                               }
+                               nbit++;
+                       }
+               }
+               call->ackr_reason = 0;
+               spin_unlock_bh(&call->lock);
+
+               pad = 0;
+
+               iov[1].iov_base = &ack;
+               iov[1].iov_len  = sizeof(ack);
+               iov[2].iov_base = acks;
+               iov[2].iov_len  = ack.nAcks;
+               iov[3].iov_base = &pad;
+               iov[3].iov_len  = 3;
+               iov[4].iov_base = &ackinfo;
+               iov[4].iov_len  = sizeof(ackinfo);
+
+               switch (ack.reason) {
+               case RXRPC_ACK_REQUESTED:
+               case RXRPC_ACK_DUPLICATE:
+               case RXRPC_ACK_OUT_OF_SEQUENCE:
+               case RXRPC_ACK_EXCEEDS_WINDOW:
+               case RXRPC_ACK_NOSPACE:
+               case RXRPC_ACK_PING:
+               case RXRPC_ACK_PING_RESPONSE:
+                       goto send_ACK_with_skew;
+               case RXRPC_ACK_DELAY:
+               case RXRPC_ACK_IDLE:
+                       goto send_ACK;
+               }
+       }
+
+       /* handle completion of security negotiations on an incoming
+        * connection */
+       if (test_and_clear_bit(RXRPC_CALL_SECURED, &call->events)) {
+               _debug("secured");
+               spin_lock_bh(&call->lock);
+
+               if (call->state == RXRPC_CALL_SERVER_SECURING) {
+                       _debug("securing");
+                       write_lock(&call->conn->lock);
+                       if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) &&
+                           !test_bit(RXRPC_CALL_RELEASE, &call->events)) {
+                               _debug("not released");
+                               call->state = RXRPC_CALL_SERVER_ACCEPTING;
+                               list_move_tail(&call->accept_link,
+                                              &call->socket->acceptq);
+                       }
+                       write_unlock(&call->conn->lock);
+                       read_lock(&call->state_lock);
+                       if (call->state < RXRPC_CALL_COMPLETE)
+                               set_bit(RXRPC_CALL_POST_ACCEPT, &call->events);
+                       read_unlock(&call->state_lock);
+               }
+
+               spin_unlock_bh(&call->lock);
+               if (!test_bit(RXRPC_CALL_POST_ACCEPT, &call->events))
+                       goto maybe_reschedule;
+       }
+
+       /* post a notification of an acceptable connection to the app */
+       if (test_bit(RXRPC_CALL_POST_ACCEPT, &call->events)) {
+               _debug("post accept");
+               if (rxrpc_post_message(call, RXRPC_SKB_MARK_NEW_CALL,
+                                      0, false) < 0)
+                       goto no_mem;
+               clear_bit(RXRPC_CALL_POST_ACCEPT, &call->events);
+               goto maybe_reschedule;
+       }
+
+       /* handle incoming call acceptance */
+       if (test_and_clear_bit(RXRPC_CALL_ACCEPTED, &call->events)) {
+               _debug("accepted");
+               ASSERTCMP(call->rx_data_post, ==, 0);
+               call->rx_data_post = 1;
+               read_lock_bh(&call->state_lock);
+               if (call->state < RXRPC_CALL_COMPLETE)
+                       set_bit(RXRPC_CALL_DRAIN_RX_OOS, &call->events);
+               read_unlock_bh(&call->state_lock);
+       }
+
+       /* drain the out of sequence received packet queue into the packet Rx
+        * queue */
+       if (test_and_clear_bit(RXRPC_CALL_DRAIN_RX_OOS, &call->events)) {
+               while (call->rx_data_post == call->rx_first_oos)
+                       if (rxrpc_drain_rx_oos_queue(call) < 0)
+                               break;
+               goto maybe_reschedule;
+       }
+
+       /* other events may have been raised since we started checking */
+       goto maybe_reschedule;
+
+send_ACK_with_skew:
+       ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
+                           ntohl(ack.serial));
+send_ACK:
+       hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
+       _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
+              ntohl(hdr.serial),
+              ntohs(ack.maxSkew),
+              ntohl(ack.firstPacket),
+              ntohl(ack.previousPacket),
+              ntohl(ack.serial),
+              rxrpc_acks[ack.reason],
+              ack.nAcks);
+
+       del_timer_sync(&call->ack_timer);
+       if (ack.nAcks > 0)
+               set_bit(RXRPC_CALL_TX_SOFT_ACK, &call->flags);
+       goto send_message_2;
+
+send_message:
+       _debug("send message");
+
+       hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
+       _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial));
+send_message_2:
+
+       len = iov[0].iov_len;
+       ioc = 1;
+       if (iov[4].iov_len) {
+               ioc = 5;
+               len += iov[4].iov_len;
+               len += iov[3].iov_len;
+               len += iov[2].iov_len;
+               len += iov[1].iov_len;
+       } else if (iov[3].iov_len) {
+               ioc = 4;
+               len += iov[3].iov_len;
+               len += iov[2].iov_len;
+               len += iov[1].iov_len;
+       } else if (iov[2].iov_len) {
+               ioc = 3;
+               len += iov[2].iov_len;
+               len += iov[1].iov_len;
+       } else if (iov[1].iov_len) {
+               ioc = 2;
+               len += iov[1].iov_len;
+       }
+
+       ret = kernel_sendmsg(call->conn->trans->local->socket,
+                            &msg, iov, ioc, len);
+       if (ret < 0) {
+               _debug("sendmsg failed: %d", ret);
+               read_lock_bh(&call->state_lock);
+               if (call->state < RXRPC_CALL_DEAD)
+                       rxrpc_queue_call(call);
+               read_unlock_bh(&call->state_lock);
+               goto error;
+       }
+
+       switch (genbit) {
+       case RXRPC_CALL_ABORT:
+               clear_bit(genbit, &call->events);
+               clear_bit(RXRPC_CALL_RCVD_ABORT, &call->events);
+               goto kill_ACKs;
+
+       case RXRPC_CALL_ACK_FINAL:
+               write_lock_bh(&call->state_lock);
+               if (call->state == RXRPC_CALL_CLIENT_FINAL_ACK)
+                       call->state = RXRPC_CALL_COMPLETE;
+               write_unlock_bh(&call->state_lock);
+               goto kill_ACKs;
+
+       default:
+               clear_bit(genbit, &call->events);
+               switch (call->state) {
+               case RXRPC_CALL_CLIENT_AWAIT_REPLY:
+               case RXRPC_CALL_CLIENT_RECV_REPLY:
+               case RXRPC_CALL_SERVER_RECV_REQUEST:
+               case RXRPC_CALL_SERVER_ACK_REQUEST:
+                       _debug("start ACK timer");
+                       rxrpc_propose_ACK(call, RXRPC_ACK_DELAY,
+                                         call->ackr_serial, false);
+               default:
+                       break;
+               }
+               goto maybe_reschedule;
+       }
+
+kill_ACKs:
+       del_timer_sync(&call->ack_timer);
+       if (test_and_clear_bit(RXRPC_CALL_ACK_FINAL, &call->events))
+               rxrpc_put_call(call);
+       clear_bit(RXRPC_CALL_ACK, &call->events);
+
+maybe_reschedule:
+       if (call->events || !skb_queue_empty(&call->rx_queue)) {
+               read_lock_bh(&call->state_lock);
+               if (call->state < RXRPC_CALL_DEAD)
+                       rxrpc_queue_call(call);
+               read_unlock_bh(&call->state_lock);
+       }
+
+       /* don't leave aborted connections on the accept queue */
+       if (call->state >= RXRPC_CALL_COMPLETE &&
+           !list_empty(&call->accept_link)) {
+               _debug("X unlinking once-pending call %p { e=%lx f=%lx c=%x }",
+                      call, call->events, call->flags,
+                      ntohl(call->conn->cid));
+
+               read_lock_bh(&call->state_lock);
+               if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) &&
+                   !test_and_set_bit(RXRPC_CALL_RELEASE, &call->events))
+                       rxrpc_queue_call(call);
+               read_unlock_bh(&call->state_lock);
+       }
+
+error:
+       clear_bit(RXRPC_CALL_PROC_BUSY, &call->flags);
+       kfree(acks);
+
+       /* because we don't want two CPUs both processing the work item for one
+        * call at the same time, we use a flag to note when it's busy; however
+        * this means there's a race between clearing the flag and setting the
+        * work pending bit and the work item being processed again */
+       if (call->events && !work_pending(&call->processor)) {
+               _debug("jumpstart %x", ntohl(call->conn->cid));
+               rxrpc_queue_call(call);
+       }
+
+       _leave("");
+       return;
+
+no_mem:
+       _debug("out of memory");
+       goto maybe_reschedule;
+}
diff --git a/net/rxrpc/ar-call.c b/net/rxrpc/ar-call.c
new file mode 100644 (file)
index 0000000..4d92d88
--- /dev/null
@@ -0,0 +1,804 @@
+/* RxRPC individual remote procedure call handling
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/circ_buf.h>
+#include <net/sock.h>
+#include <net/af_rxrpc.h>
+#include "ar-internal.h"
+
+struct kmem_cache *rxrpc_call_jar;
+LIST_HEAD(rxrpc_calls);
+DEFINE_RWLOCK(rxrpc_call_lock);
+static unsigned rxrpc_call_max_lifetime = 60;
+static unsigned rxrpc_dead_call_timeout = 2;
+
+static void rxrpc_destroy_call(struct work_struct *work);
+static void rxrpc_call_life_expired(unsigned long _call);
+static void rxrpc_dead_call_expired(unsigned long _call);
+static void rxrpc_ack_time_expired(unsigned long _call);
+static void rxrpc_resend_time_expired(unsigned long _call);
+
+/*
+ * allocate a new call
+ */
+static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
+{
+       struct rxrpc_call *call;
+
+       call = kmem_cache_zalloc(rxrpc_call_jar, gfp);
+       if (!call)
+               return NULL;
+
+       call->acks_winsz = 16;
+       call->acks_window = kmalloc(call->acks_winsz * sizeof(unsigned long),
+                                   gfp);
+       if (!call->acks_window) {
+               kmem_cache_free(rxrpc_call_jar, call);
+               return NULL;
+       }
+
+       setup_timer(&call->lifetimer, &rxrpc_call_life_expired,
+                   (unsigned long) call);
+       setup_timer(&call->deadspan, &rxrpc_dead_call_expired,
+                   (unsigned long) call);
+       setup_timer(&call->ack_timer, &rxrpc_ack_time_expired,
+                   (unsigned long) call);
+       setup_timer(&call->resend_timer, &rxrpc_resend_time_expired,
+                   (unsigned long) call);
+       INIT_WORK(&call->destroyer, &rxrpc_destroy_call);
+       INIT_WORK(&call->processor, &rxrpc_process_call);
+       INIT_LIST_HEAD(&call->accept_link);
+       skb_queue_head_init(&call->rx_queue);
+       skb_queue_head_init(&call->rx_oos_queue);
+       init_waitqueue_head(&call->tx_waitq);
+       spin_lock_init(&call->lock);
+       rwlock_init(&call->state_lock);
+       atomic_set(&call->usage, 1);
+       call->debug_id = atomic_inc_return(&rxrpc_debug_id);
+       call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
+
+       memset(&call->sock_node, 0xed, sizeof(call->sock_node));
+
+       call->rx_data_expect = 1;
+       call->rx_data_eaten = 0;
+       call->rx_first_oos = 0;
+       call->ackr_win_top = call->rx_data_eaten + 1 + RXRPC_MAXACKS;
+       call->creation_jif = jiffies;
+       return call;
+}
+
+/*
+ * allocate a new client call and attempt to to get a connection slot for it
+ */
+static struct rxrpc_call *rxrpc_alloc_client_call(
+       struct rxrpc_sock *rx,
+       struct rxrpc_transport *trans,
+       struct rxrpc_conn_bundle *bundle,
+       gfp_t gfp)
+{
+       struct rxrpc_call *call;
+       int ret;
+
+       _enter("");
+
+       ASSERT(rx != NULL);
+       ASSERT(trans != NULL);
+       ASSERT(bundle != NULL);
+
+       call = rxrpc_alloc_call(gfp);
+       if (!call)
+               return ERR_PTR(-ENOMEM);
+
+       sock_hold(&rx->sk);
+       call->socket = rx;
+       call->rx_data_post = 1;
+
+       ret = rxrpc_connect_call(rx, trans, bundle, call, gfp);
+       if (ret < 0) {
+               kmem_cache_free(rxrpc_call_jar, call);
+               return ERR_PTR(ret);
+       }
+
+       spin_lock(&call->conn->trans->peer->lock);
+       list_add(&call->error_link, &call->conn->trans->peer->error_targets);
+       spin_unlock(&call->conn->trans->peer->lock);
+
+       call->lifetimer.expires = jiffies + rxrpc_call_max_lifetime * HZ;
+       add_timer(&call->lifetimer);
+
+       _leave(" = %p", call);
+       return call;
+}
+
+/*
+ * set up a call for the given data
+ * - called in process context with IRQs enabled
+ */
+struct rxrpc_call *rxrpc_get_client_call(struct rxrpc_sock *rx,
+                                        struct rxrpc_transport *trans,
+                                        struct rxrpc_conn_bundle *bundle,
+                                        unsigned long user_call_ID,
+                                        int create,
+                                        gfp_t gfp)
+{
+       struct rxrpc_call *call, *candidate;
+       struct rb_node *p, *parent, **pp;
+
+       _enter("%p,%d,%d,%lx,%d",
+              rx, trans ? trans->debug_id : -1, bundle ? bundle->debug_id : -1,
+              user_call_ID, create);
+
+       /* search the extant calls first for one that matches the specified
+        * user ID */
+       read_lock(&rx->call_lock);
+
+       p = rx->calls.rb_node;
+       while (p) {
+               call = rb_entry(p, struct rxrpc_call, sock_node);
+
+               if (user_call_ID < call->user_call_ID)
+                       p = p->rb_left;
+               else if (user_call_ID > call->user_call_ID)
+                       p = p->rb_right;
+               else
+                       goto found_extant_call;
+       }
+
+       read_unlock(&rx->call_lock);
+
+       if (!create || !trans)
+               return ERR_PTR(-EBADSLT);
+
+       /* not yet present - create a candidate for a new record and then
+        * redo the search */
+       candidate = rxrpc_alloc_client_call(rx, trans, bundle, gfp);
+       if (IS_ERR(candidate)) {
+               _leave(" = %ld", PTR_ERR(candidate));
+               return candidate;
+       }
+
+       candidate->user_call_ID = user_call_ID;
+       __set_bit(RXRPC_CALL_HAS_USERID, &candidate->flags);
+
+       write_lock(&rx->call_lock);
+
+       pp = &rx->calls.rb_node;
+       parent = NULL;
+       while (*pp) {
+               parent = *pp;
+               call = rb_entry(parent, struct rxrpc_call, sock_node);
+
+               if (user_call_ID < call->user_call_ID)
+                       pp = &(*pp)->rb_left;
+               else if (user_call_ID > call->user_call_ID)
+                       pp = &(*pp)->rb_right;
+               else
+                       goto found_extant_second;
+       }
+
+       /* second search also failed; add the new call */
+       call = candidate;
+       candidate = NULL;
+       rxrpc_get_call(call);
+
+       rb_link_node(&call->sock_node, parent, pp);
+       rb_insert_color(&call->sock_node, &rx->calls);
+       write_unlock(&rx->call_lock);
+
+       write_lock_bh(&rxrpc_call_lock);
+       list_add_tail(&call->link, &rxrpc_calls);
+       write_unlock_bh(&rxrpc_call_lock);
+
+       _net("CALL new %d on CONN %d", call->debug_id, call->conn->debug_id);
+
+       _leave(" = %p [new]", call);
+       return call;
+
+       /* we found the call in the list immediately */
+found_extant_call:
+       rxrpc_get_call(call);
+       read_unlock(&rx->call_lock);
+       _leave(" = %p [extant %d]", call, atomic_read(&call->usage));
+       return call;
+
+       /* we found the call on the second time through the list */
+found_extant_second:
+       rxrpc_get_call(call);
+       write_unlock(&rx->call_lock);
+       rxrpc_put_call(candidate);
+       _leave(" = %p [second %d]", call, atomic_read(&call->usage));
+       return call;
+}
+
+/*
+ * set up an incoming call
+ * - called in process context with IRQs enabled
+ */
+struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *rx,
+                                      struct rxrpc_connection *conn,
+                                      struct rxrpc_header *hdr,
+                                      gfp_t gfp)
+{
+       struct rxrpc_call *call, *candidate;
+       struct rb_node **p, *parent;
+       __be32 call_id;
+
+       _enter(",%d,,%x", conn->debug_id, gfp);
+
+       ASSERT(rx != NULL);
+
+       candidate = rxrpc_alloc_call(gfp);
+       if (!candidate)
+               return ERR_PTR(-EBUSY);
+
+       candidate->socket = rx;
+       candidate->conn = conn;
+       candidate->cid = hdr->cid;
+       candidate->call_id = hdr->callNumber;
+       candidate->channel = ntohl(hdr->cid) & RXRPC_CHANNELMASK;
+       candidate->rx_data_post = 0;
+       candidate->state = RXRPC_CALL_SERVER_ACCEPTING;
+       if (conn->security_ix > 0)
+               candidate->state = RXRPC_CALL_SERVER_SECURING;
+
+       write_lock_bh(&conn->lock);
+
+       /* set the channel for this call */
+       call = conn->channels[candidate->channel];
+       _debug("channel[%u] is %p", candidate->channel, call);
+       if (call && call->call_id == hdr->callNumber) {
+               /* already set; must've been a duplicate packet */
+               _debug("extant call [%d]", call->state);
+               ASSERTCMP(call->conn, ==, conn);
+
+               read_lock(&call->state_lock);
+               switch (call->state) {
+               case RXRPC_CALL_LOCALLY_ABORTED:
+                       if (!test_and_set_bit(RXRPC_CALL_ABORT, &call->events))
+                               rxrpc_queue_call(call);
+               case RXRPC_CALL_REMOTELY_ABORTED:
+                       read_unlock(&call->state_lock);
+                       goto aborted_call;
+               default:
+                       rxrpc_get_call(call);
+                       read_unlock(&call->state_lock);
+                       goto extant_call;
+               }
+       }
+
+       if (call) {
+               /* it seems the channel is still in use from the previous call
+                * - ditch the old binding if its call is now complete */
+               _debug("CALL: %u { %s }",
+                      call->debug_id, rxrpc_call_states[call->state]);
+
+               if (call->state >= RXRPC_CALL_COMPLETE) {
+                       conn->channels[call->channel] = NULL;
+               } else {
+                       write_unlock_bh(&conn->lock);
+                       kmem_cache_free(rxrpc_call_jar, candidate);
+                       _leave(" = -EBUSY");
+                       return ERR_PTR(-EBUSY);
+               }
+       }
+
+       /* check the call number isn't duplicate */
+       _debug("check dup");
+       call_id = hdr->callNumber;
+       p = &conn->calls.rb_node;
+       parent = NULL;
+       while (*p) {
+               parent = *p;
+               call = rb_entry(parent, struct rxrpc_call, conn_node);
+
+               if (call_id < call->call_id)
+                       p = &(*p)->rb_left;
+               else if (call_id > call->call_id)
+                       p = &(*p)->rb_right;
+               else
+                       goto old_call;
+       }
+
+       /* make the call available */
+       _debug("new call");
+       call = candidate;
+       candidate = NULL;
+       rb_link_node(&call->conn_node, parent, p);
+       rb_insert_color(&call->conn_node, &conn->calls);
+       conn->channels[call->channel] = call;
+       sock_hold(&rx->sk);
+       atomic_inc(&conn->usage);
+       write_unlock_bh(&conn->lock);
+
+       spin_lock(&conn->trans->peer->lock);
+       list_add(&call->error_link, &conn->trans->peer->error_targets);
+       spin_unlock(&conn->trans->peer->lock);
+
+       write_lock_bh(&rxrpc_call_lock);
+       list_add_tail(&call->link, &rxrpc_calls);
+       write_unlock_bh(&rxrpc_call_lock);
+
+       _net("CALL incoming %d on CONN %d", call->debug_id, call->conn->debug_id);
+
+       call->lifetimer.expires = jiffies + rxrpc_call_max_lifetime * HZ;
+       add_timer(&call->lifetimer);
+       _leave(" = %p {%d} [new]", call, call->debug_id);
+       return call;
+
+extant_call:
+       write_unlock_bh(&conn->lock);
+       kmem_cache_free(rxrpc_call_jar, candidate);
+       _leave(" = %p {%d} [extant]", call, call ? call->debug_id : -1);
+       return call;
+
+aborted_call:
+       write_unlock_bh(&conn->lock);
+       kmem_cache_free(rxrpc_call_jar, candidate);
+       _leave(" = -ECONNABORTED");
+       return ERR_PTR(-ECONNABORTED);
+
+old_call:
+       write_unlock_bh(&conn->lock);
+       kmem_cache_free(rxrpc_call_jar, candidate);
+       _leave(" = -ECONNRESET [old]");
+       return ERR_PTR(-ECONNRESET);
+}
+
+/*
+ * find an extant server call
+ * - called in process context with IRQs enabled
+ */
+struct rxrpc_call *rxrpc_find_server_call(struct rxrpc_sock *rx,
+                                         unsigned long user_call_ID)
+{
+       struct rxrpc_call *call;
+       struct rb_node *p;
+
+       _enter("%p,%lx", rx, user_call_ID);
+
+       /* search the extant calls for one that matches the specified user
+        * ID */
+       read_lock(&rx->call_lock);
+
+       p = rx->calls.rb_node;
+       while (p) {
+               call = rb_entry(p, struct rxrpc_call, sock_node);
+
+               if (user_call_ID < call->user_call_ID)
+                       p = p->rb_left;
+               else if (user_call_ID > call->user_call_ID)
+                       p = p->rb_right;
+               else
+                       goto found_extant_call;
+       }
+
+       read_unlock(&rx->call_lock);
+       _leave(" = NULL");
+       return NULL;
+
+       /* we found the call in the list immediately */
+found_extant_call:
+       rxrpc_get_call(call);
+       read_unlock(&rx->call_lock);
+       _leave(" = %p [%d]", call, atomic_read(&call->usage));
+       return call;
+}
+
+/*
+ * detach a call from a socket and set up for release
+ */
+void rxrpc_release_call(struct rxrpc_call *call)
+{
+       struct rxrpc_connection *conn = call->conn;
+       struct rxrpc_sock *rx = call->socket;
+
+       _enter("{%d,%d,%d,%d}",
+              call->debug_id, atomic_read(&call->usage),
+              atomic_read(&call->ackr_not_idle),
+              call->rx_first_oos);
+
+       spin_lock_bh(&call->lock);
+       if (test_and_set_bit(RXRPC_CALL_RELEASED, &call->flags))
+               BUG();
+       spin_unlock_bh(&call->lock);
+
+       /* dissociate from the socket
+        * - the socket's ref on the call is passed to the death timer
+        */
+       _debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, conn);
+
+       write_lock_bh(&rx->call_lock);
+       if (!list_empty(&call->accept_link)) {
+               _debug("unlinking once-pending call %p { e=%lx f=%lx }",
+                      call, call->events, call->flags);
+               ASSERT(!test_bit(RXRPC_CALL_HAS_USERID, &call->flags));
+               list_del_init(&call->accept_link);
+               sk_acceptq_removed(&rx->sk);
+       } else if (test_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
+               rb_erase(&call->sock_node, &rx->calls);
+               memset(&call->sock_node, 0xdd, sizeof(call->sock_node));
+               clear_bit(RXRPC_CALL_HAS_USERID, &call->flags);
+       }
+       write_unlock_bh(&rx->call_lock);
+
+       /* free up the channel for reuse */
+       spin_lock(&conn->trans->client_lock);
+       write_lock_bh(&conn->lock);
+       write_lock(&call->state_lock);
+
+       if (conn->channels[call->channel] == call)
+               conn->channels[call->channel] = NULL;
+
+       if (conn->out_clientflag && conn->bundle) {
+               conn->avail_calls++;
+               switch (conn->avail_calls) {
+               case 1:
+                       list_move_tail(&conn->bundle_link,
+                                      &conn->bundle->avail_conns);
+               case 2 ... RXRPC_MAXCALLS - 1:
+                       ASSERT(conn->channels[0] == NULL ||
+                              conn->channels[1] == NULL ||
+                              conn->channels[2] == NULL ||
+                              conn->channels[3] == NULL);
+                       break;
+               case RXRPC_MAXCALLS:
+                       list_move_tail(&conn->bundle_link,
+                                      &conn->bundle->unused_conns);
+                       ASSERT(conn->channels[0] == NULL &&
+                              conn->channels[1] == NULL &&
+                              conn->channels[2] == NULL &&
+                              conn->channels[3] == NULL);
+                       break;
+               default:
+                       printk(KERN_ERR "RxRPC: conn->avail_calls=%d\n",
+                              conn->avail_calls);
+                       BUG();
+               }
+       }
+
+       spin_unlock(&conn->trans->client_lock);
+
+       if (call->state < RXRPC_CALL_COMPLETE &&
+           call->state != RXRPC_CALL_CLIENT_FINAL_ACK) {
+               _debug("+++ ABORTING STATE %d +++\n", call->state);
+               call->state = RXRPC_CALL_LOCALLY_ABORTED;
+               call->abort_code = RX_CALL_DEAD;
+               set_bit(RXRPC_CALL_ABORT, &call->events);
+               rxrpc_queue_call(call);
+       }
+       write_unlock(&call->state_lock);
+       write_unlock_bh(&conn->lock);
+
+       /* clean up the Rx queue */
+       if (!skb_queue_empty(&call->rx_queue) ||
+           !skb_queue_empty(&call->rx_oos_queue)) {
+               struct rxrpc_skb_priv *sp;
+               struct sk_buff *skb;
+
+               _debug("purge Rx queues");
+
+               spin_lock_bh(&call->lock);
+               while ((skb = skb_dequeue(&call->rx_queue)) ||
+                      (skb = skb_dequeue(&call->rx_oos_queue))) {
+                       sp = rxrpc_skb(skb);
+                       if (sp->call) {
+                               ASSERTCMP(sp->call, ==, call);
+                               rxrpc_put_call(call);
+                               sp->call = NULL;
+                       }
+                       skb->destructor = NULL;
+                       spin_unlock_bh(&call->lock);
+
+                       _debug("- zap %s %%%u #%u",
+                              rxrpc_pkts[sp->hdr.type],
+                              ntohl(sp->hdr.serial),
+                              ntohl(sp->hdr.seq));
+                       rxrpc_free_skb(skb);
+                       spin_lock_bh(&call->lock);
+               }
+               spin_unlock_bh(&call->lock);
+
+               ASSERTCMP(call->state, !=, RXRPC_CALL_COMPLETE);
+       }
+
+       del_timer_sync(&call->resend_timer);
+       del_timer_sync(&call->ack_timer);
+       del_timer_sync(&call->lifetimer);
+       call->deadspan.expires = jiffies + rxrpc_dead_call_timeout * HZ;
+       add_timer(&call->deadspan);
+
+       _leave("");
+}
+
+/*
+ * handle a dead call being ready for reaping
+ */
+static void rxrpc_dead_call_expired(unsigned long _call)
+{
+       struct rxrpc_call *call = (struct rxrpc_call *) _call;
+
+       _enter("{%d}", call->debug_id);
+
+       write_lock_bh(&call->state_lock);
+       call->state = RXRPC_CALL_DEAD;
+       write_unlock_bh(&call->state_lock);
+       rxrpc_put_call(call);
+}
+
+/*
+ * mark a call as to be released, aborting it if it's still in progress
+ * - called with softirqs disabled
+ */
+static void rxrpc_mark_call_released(struct rxrpc_call *call)
+{
+       bool sched;
+
+       write_lock(&call->state_lock);
+       if (call->state < RXRPC_CALL_DEAD) {
+               sched = false;
+               if (call->state < RXRPC_CALL_COMPLETE) {
+                       _debug("abort call %p", call);
+                       call->state = RXRPC_CALL_LOCALLY_ABORTED;
+                       call->abort_code = RX_CALL_DEAD;
+                       if (!test_and_set_bit(RXRPC_CALL_ABORT, &call->events))
+                               sched = true;
+               }
+               if (!test_and_set_bit(RXRPC_CALL_RELEASE, &call->events))
+                       sched = true;
+               if (sched)
+                       rxrpc_queue_call(call);
+       }
+       write_unlock(&call->state_lock);
+}
+
+/*
+ * release all the calls associated with a socket
+ */
+void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx)
+{
+       struct rxrpc_call *call;
+       struct rb_node *p;
+
+       _enter("%p", rx);
+
+       read_lock_bh(&rx->call_lock);
+
+       /* mark all the calls as no longer wanting incoming packets */
+       for (p = rb_first(&rx->calls); p; p = rb_next(p)) {
+               call = rb_entry(p, struct rxrpc_call, sock_node);
+               rxrpc_mark_call_released(call);
+       }
+
+       /* kill the not-yet-accepted incoming calls */
+       list_for_each_entry(call, &rx->secureq, accept_link) {
+               rxrpc_mark_call_released(call);
+       }
+
+       list_for_each_entry(call, &rx->acceptq, accept_link) {
+               rxrpc_mark_call_released(call);
+       }
+
+       read_unlock_bh(&rx->call_lock);
+       _leave("");
+}
+
+/*
+ * release a call
+ */
+void __rxrpc_put_call(struct rxrpc_call *call)
+{
+       ASSERT(call != NULL);
+
+       _enter("%p{u=%d}", call, atomic_read(&call->usage));
+
+       ASSERTCMP(atomic_read(&call->usage), >, 0);
+
+       if (atomic_dec_and_test(&call->usage)) {
+               _debug("call %d dead", call->debug_id);
+               ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
+               rxrpc_queue_work(&call->destroyer);
+       }
+       _leave("");
+}
+
+/*
+ * clean up a call
+ */
+static void rxrpc_cleanup_call(struct rxrpc_call *call)
+{
+       _net("DESTROY CALL %d", call->debug_id);
+
+       ASSERT(call->socket);
+
+       memset(&call->sock_node, 0xcd, sizeof(call->sock_node));
+
+       del_timer_sync(&call->lifetimer);
+       del_timer_sync(&call->deadspan);
+       del_timer_sync(&call->ack_timer);
+       del_timer_sync(&call->resend_timer);
+
+       ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags));
+       ASSERTCMP(call->events, ==, 0);
+       if (work_pending(&call->processor)) {
+               _debug("defer destroy");
+               rxrpc_queue_work(&call->destroyer);
+               return;
+       }
+
+       if (call->conn) {
+               spin_lock(&call->conn->trans->peer->lock);
+               list_del(&call->error_link);
+               spin_unlock(&call->conn->trans->peer->lock);
+
+               write_lock_bh(&call->conn->lock);
+               rb_erase(&call->conn_node, &call->conn->calls);
+               write_unlock_bh(&call->conn->lock);
+               rxrpc_put_connection(call->conn);
+       }
+
+       if (call->acks_window) {
+               _debug("kill Tx window %d",
+                      CIRC_CNT(call->acks_head, call->acks_tail,
+                               call->acks_winsz));
+               smp_mb();
+               while (CIRC_CNT(call->acks_head, call->acks_tail,
+                               call->acks_winsz) > 0) {
+                       struct rxrpc_skb_priv *sp;
+                       unsigned long _skb;
+
+                       _skb = call->acks_window[call->acks_tail] & ~1;
+                       sp = rxrpc_skb((struct sk_buff *) _skb);
+                       _debug("+++ clear Tx %u", ntohl(sp->hdr.seq));
+                       rxrpc_free_skb((struct sk_buff *) _skb);
+                       call->acks_tail =
+                               (call->acks_tail + 1) & (call->acks_winsz - 1);
+               }
+
+               kfree(call->acks_window);
+       }
+
+       rxrpc_free_skb(call->tx_pending);
+
+       rxrpc_purge_queue(&call->rx_queue);
+       ASSERT(skb_queue_empty(&call->rx_oos_queue));
+       sock_put(&call->socket->sk);
+       kmem_cache_free(rxrpc_call_jar, call);
+}
+
+/*
+ * destroy a call
+ */
+static void rxrpc_destroy_call(struct work_struct *work)
+{
+       struct rxrpc_call *call =
+               container_of(work, struct rxrpc_call, destroyer);
+
+       _enter("%p{%d,%d,%p}",
+              call, atomic_read(&call->usage), call->channel, call->conn);
+
+       ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
+
+       write_lock_bh(&rxrpc_call_lock);
+       list_del_init(&call->link);
+       write_unlock_bh(&rxrpc_call_lock);
+
+       rxrpc_cleanup_call(call);
+       _leave("");
+}
+
+/*
+ * preemptively destroy all the call records from a transport endpoint rather
+ * than waiting for them to time out
+ */
+void __exit rxrpc_destroy_all_calls(void)
+{
+       struct rxrpc_call *call;
+
+       _enter("");
+       write_lock_bh(&rxrpc_call_lock);
+
+       while (!list_empty(&rxrpc_calls)) {
+               call = list_entry(rxrpc_calls.next, struct rxrpc_call, link);
+               _debug("Zapping call %p", call);
+
+               list_del_init(&call->link);
+
+               switch (atomic_read(&call->usage)) {
+               case 0:
+                       ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
+                       break;
+               case 1:
+                       if (del_timer_sync(&call->deadspan) != 0 &&
+                           call->state != RXRPC_CALL_DEAD)
+                               rxrpc_dead_call_expired((unsigned long) call);
+                       if (call->state != RXRPC_CALL_DEAD)
+                               break;
+               default:
+                       printk(KERN_ERR "RXRPC:"
+                              " Call %p still in use (%d,%d,%s,%lx,%lx)!\n",
+                              call, atomic_read(&call->usage),
+                              atomic_read(&call->ackr_not_idle),
+                              rxrpc_call_states[call->state],
+                              call->flags, call->events);
+                       if (!skb_queue_empty(&call->rx_queue))
+                               printk(KERN_ERR"RXRPC: Rx queue occupied\n");
+                       if (!skb_queue_empty(&call->rx_oos_queue))
+                               printk(KERN_ERR"RXRPC: OOS queue occupied\n");
+                       break;
+               }
+
+               write_unlock_bh(&rxrpc_call_lock);
+               cond_resched();
+               write_lock_bh(&rxrpc_call_lock);
+       }
+
+       write_unlock_bh(&rxrpc_call_lock);
+       _leave("");
+}
+
+/*
+ * handle call lifetime being exceeded
+ */
+static void rxrpc_call_life_expired(unsigned long _call)
+{
+       struct rxrpc_call *call = (struct rxrpc_call *) _call;
+
+       if (call->state >= RXRPC_CALL_COMPLETE)
+               return;
+
+       _enter("{%d}", call->debug_id);
+       read_lock_bh(&call->state_lock);
+       if (call->state < RXRPC_CALL_COMPLETE) {
+               set_bit(RXRPC_CALL_LIFE_TIMER, &call->events);
+               rxrpc_queue_call(call);
+       }
+       read_unlock_bh(&call->state_lock);
+}
+
+/*
+ * handle resend timer expiry
+ */
+static void rxrpc_resend_time_expired(unsigned long _call)
+{
+       struct rxrpc_call *call = (struct rxrpc_call *) _call;
+
+       _enter("{%d}", call->debug_id);
+
+       if (call->state >= RXRPC_CALL_COMPLETE)
+               return;
+
+       read_lock_bh(&call->state_lock);
+       clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
+       if (call->state < RXRPC_CALL_COMPLETE &&
+           !test_and_set_bit(RXRPC_CALL_RESEND_TIMER, &call->events))
+               rxrpc_queue_call(call);
+       read_unlock_bh(&call->state_lock);
+}
+
+/*
+ * handle ACK timer expiry
+ */
+static void rxrpc_ack_time_expired(unsigned long _call)
+{
+       struct rxrpc_call *call = (struct rxrpc_call *) _call;
+
+       _enter("{%d}", call->debug_id);
+
+       if (call->state >= RXRPC_CALL_COMPLETE)
+               return;
+
+       read_lock_bh(&call->state_lock);
+       if (call->state < RXRPC_CALL_COMPLETE &&
+           !test_and_set_bit(RXRPC_CALL_ACK, &call->events))
+               rxrpc_queue_call(call);
+       read_unlock_bh(&call->state_lock);
+}
diff --git a/net/rxrpc/ar-connection.c b/net/rxrpc/ar-connection.c
new file mode 100644 (file)
index 0000000..43cb3e0
--- /dev/null
@@ -0,0 +1,911 @@
+/* RxRPC virtual connection handler
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/net.h>
+#include <linux/skbuff.h>
+#include <linux/crypto.h>
+#include <net/sock.h>
+#include <net/af_rxrpc.h>
+#include "ar-internal.h"
+
+static void rxrpc_connection_reaper(struct work_struct *work);
+
+LIST_HEAD(rxrpc_connections);
+DEFINE_RWLOCK(rxrpc_connection_lock);
+static unsigned long rxrpc_connection_timeout = 10 * 60;
+static DECLARE_DELAYED_WORK(rxrpc_connection_reap, rxrpc_connection_reaper);
+
+/*
+ * allocate a new client connection bundle
+ */
+static struct rxrpc_conn_bundle *rxrpc_alloc_bundle(gfp_t gfp)
+{
+       struct rxrpc_conn_bundle *bundle;
+
+       _enter("");
+
+       bundle = kzalloc(sizeof(struct rxrpc_conn_bundle), gfp);
+       if (bundle) {
+               INIT_LIST_HEAD(&bundle->unused_conns);
+               INIT_LIST_HEAD(&bundle->avail_conns);
+               INIT_LIST_HEAD(&bundle->busy_conns);
+               init_waitqueue_head(&bundle->chanwait);
+               atomic_set(&bundle->usage, 1);
+       }
+
+       _leave(" = %p", bundle);
+       return bundle;
+}
+
+/*
+ * compare bundle parameters with what we're looking for
+ * - return -ve, 0 or +ve
+ */
+static inline
+int rxrpc_cmp_bundle(const struct rxrpc_conn_bundle *bundle,
+                    struct key *key, __be16 service_id)
+{
+       return (bundle->service_id - service_id) ?:
+               ((unsigned long) bundle->key - (unsigned long) key);
+}
+
+/*
+ * get bundle of client connections that a client socket can make use of
+ */
+struct rxrpc_conn_bundle *rxrpc_get_bundle(struct rxrpc_sock *rx,
+                                          struct rxrpc_transport *trans,
+                                          struct key *key,
+                                          __be16 service_id,
+                                          gfp_t gfp)
+{
+       struct rxrpc_conn_bundle *bundle, *candidate;
+       struct rb_node *p, *parent, **pp;
+
+       _enter("%p{%x},%x,%hx,",
+              rx, key_serial(key), trans->debug_id, ntohl(service_id));
+
+       if (rx->trans == trans && rx->bundle) {
+               atomic_inc(&rx->bundle->usage);
+               return rx->bundle;
+       }
+
+       /* search the extant bundles first for one that matches the specified
+        * user ID */
+       spin_lock(&trans->client_lock);
+
+       p = trans->bundles.rb_node;
+       while (p) {
+               bundle = rb_entry(p, struct rxrpc_conn_bundle, node);
+
+               if (rxrpc_cmp_bundle(bundle, key, service_id) < 0)
+                       p = p->rb_left;
+               else if (rxrpc_cmp_bundle(bundle, key, service_id) > 0)
+                       p = p->rb_right;
+               else
+                       goto found_extant_bundle;
+       }
+
+       spin_unlock(&trans->client_lock);
+
+       /* not yet present - create a candidate for a new record and then
+        * redo the search */
+       candidate = rxrpc_alloc_bundle(gfp);
+       if (!candidate) {
+               _leave(" = -ENOMEM");
+               return ERR_PTR(-ENOMEM);
+       }
+
+       candidate->key = key_get(key);
+       candidate->service_id = service_id;
+
+       spin_lock(&trans->client_lock);
+
+       pp = &trans->bundles.rb_node;
+       parent = NULL;
+       while (*pp) {
+               parent = *pp;
+               bundle = rb_entry(parent, struct rxrpc_conn_bundle, node);
+
+               if (rxrpc_cmp_bundle(bundle, key, service_id) < 0)
+                       pp = &(*pp)->rb_left;
+               else if (rxrpc_cmp_bundle(bundle, key, service_id) > 0)
+                       pp = &(*pp)->rb_right;
+               else
+                       goto found_extant_second;
+       }
+
+       /* second search also failed; add the new bundle */
+       bundle = candidate;
+       candidate = NULL;
+
+       rb_link_node(&bundle->node, parent, pp);
+       rb_insert_color(&bundle->node, &trans->bundles);
+       spin_unlock(&trans->client_lock);
+       _net("BUNDLE new on trans %d", trans->debug_id);
+       if (!rx->bundle && rx->sk.sk_state == RXRPC_CLIENT_CONNECTED) {
+               atomic_inc(&bundle->usage);
+               rx->bundle = bundle;
+       }
+       _leave(" = %p [new]", bundle);
+       return bundle;
+
+       /* we found the bundle in the list immediately */
+found_extant_bundle:
+       atomic_inc(&bundle->usage);
+       spin_unlock(&trans->client_lock);
+       _net("BUNDLE old on trans %d", trans->debug_id);
+       if (!rx->bundle && rx->sk.sk_state == RXRPC_CLIENT_CONNECTED) {
+               atomic_inc(&bundle->usage);
+               rx->bundle = bundle;
+       }
+       _leave(" = %p [extant %d]", bundle, atomic_read(&bundle->usage));
+       return bundle;
+
+       /* we found the bundle on the second time through the list */
+found_extant_second:
+       atomic_inc(&bundle->usage);
+       spin_unlock(&trans->client_lock);
+       kfree(candidate);
+       _net("BUNDLE old2 on trans %d", trans->debug_id);
+       if (!rx->bundle && rx->sk.sk_state == RXRPC_CLIENT_CONNECTED) {
+               atomic_inc(&bundle->usage);
+               rx->bundle = bundle;
+       }
+       _leave(" = %p [second %d]", bundle, atomic_read(&bundle->usage));
+       return bundle;
+}
+
+/*
+ * release a bundle
+ */
+void rxrpc_put_bundle(struct rxrpc_transport *trans,
+                     struct rxrpc_conn_bundle *bundle)
+{
+       _enter("%p,%p{%d}",trans, bundle, atomic_read(&bundle->usage));
+
+       if (atomic_dec_and_lock(&bundle->usage, &trans->client_lock)) {
+               _debug("Destroy bundle");
+               rb_erase(&bundle->node, &trans->bundles);
+               spin_unlock(&trans->client_lock);
+               ASSERT(list_empty(&bundle->unused_conns));
+               ASSERT(list_empty(&bundle->avail_conns));
+               ASSERT(list_empty(&bundle->busy_conns));
+               ASSERTCMP(bundle->num_conns, ==, 0);
+               key_put(bundle->key);
+               kfree(bundle);
+       }
+
+       _leave("");
+}
+
+/*
+ * allocate a new connection
+ */
+static struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
+{
+       struct rxrpc_connection *conn;
+
+       _enter("");
+
+       conn = kzalloc(sizeof(struct rxrpc_connection), gfp);
+       if (conn) {
+               INIT_WORK(&conn->processor, &rxrpc_process_connection);
+               INIT_LIST_HEAD(&conn->bundle_link);
+               conn->calls = RB_ROOT;
+               skb_queue_head_init(&conn->rx_queue);
+               rwlock_init(&conn->lock);
+               spin_lock_init(&conn->state_lock);
+               atomic_set(&conn->usage, 1);
+               conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
+               conn->avail_calls = RXRPC_MAXCALLS;
+               conn->size_align = 4;
+               conn->header_size = sizeof(struct rxrpc_header);
+       }
+
+       _leave(" = %p{%d}", conn, conn->debug_id);
+       return conn;
+}
+
+/*
+ * assign a connection ID to a connection and add it to the transport's
+ * connection lookup tree
+ * - called with transport client lock held
+ */
+static void rxrpc_assign_connection_id(struct rxrpc_connection *conn)
+{
+       struct rxrpc_connection *xconn;
+       struct rb_node *parent, **p;
+       __be32 epoch;
+       u32 real_conn_id;
+
+       _enter("");
+
+       epoch = conn->epoch;
+
+       write_lock_bh(&conn->trans->conn_lock);
+
+       conn->trans->conn_idcounter += RXRPC_CID_INC;
+       if (conn->trans->conn_idcounter < RXRPC_CID_INC)
+               conn->trans->conn_idcounter = RXRPC_CID_INC;
+       real_conn_id = conn->trans->conn_idcounter;
+
+attempt_insertion:
+       parent = NULL;
+       p = &conn->trans->client_conns.rb_node;
+
+       while (*p) {
+               parent = *p;
+               xconn = rb_entry(parent, struct rxrpc_connection, node);
+
+               if (epoch < xconn->epoch)
+                       p = &(*p)->rb_left;
+               else if (epoch > xconn->epoch)
+                       p = &(*p)->rb_right;
+               else if (real_conn_id < xconn->real_conn_id)
+                       p = &(*p)->rb_left;
+               else if (real_conn_id > xconn->real_conn_id)
+                       p = &(*p)->rb_right;
+               else
+                       goto id_exists;
+       }
+
+       /* we've found a suitable hole - arrange for this connection to occupy
+        * it */
+       rb_link_node(&conn->node, parent, p);
+       rb_insert_color(&conn->node, &conn->trans->client_conns);
+
+       conn->real_conn_id = real_conn_id;
+       conn->cid = htonl(real_conn_id);
+       write_unlock_bh(&conn->trans->conn_lock);
+       _leave(" [CONNID %x CID %x]", real_conn_id, ntohl(conn->cid));
+       return;
+
+       /* we found a connection with the proposed ID - walk the tree from that
+        * point looking for the next unused ID */
+id_exists:
+       for (;;) {
+               real_conn_id += RXRPC_CID_INC;
+               if (real_conn_id < RXRPC_CID_INC) {
+                       real_conn_id = RXRPC_CID_INC;
+                       conn->trans->conn_idcounter = real_conn_id;
+                       goto attempt_insertion;
+               }
+
+               parent = rb_next(parent);
+               if (!parent)
+                       goto attempt_insertion;
+
+               xconn = rb_entry(parent, struct rxrpc_connection, node);
+               if (epoch < xconn->epoch ||
+                   real_conn_id < xconn->real_conn_id)
+                       goto attempt_insertion;
+       }
+}
+
+/*
+ * add a call to a connection's call-by-ID tree
+ */
+static void rxrpc_add_call_ID_to_conn(struct rxrpc_connection *conn,
+                                     struct rxrpc_call *call)
+{
+       struct rxrpc_call *xcall;
+       struct rb_node *parent, **p;
+       __be32 call_id;
+
+       write_lock_bh(&conn->lock);
+
+       call_id = call->call_id;
+       p = &conn->calls.rb_node;
+       parent = NULL;
+       while (*p) {
+               parent = *p;
+               xcall = rb_entry(parent, struct rxrpc_call, conn_node);
+
+               if (call_id < xcall->call_id)
+                       p = &(*p)->rb_left;
+               else if (call_id > xcall->call_id)
+                       p = &(*p)->rb_right;
+               else
+                       BUG();
+       }
+
+       rb_link_node(&call->conn_node, parent, p);
+       rb_insert_color(&call->conn_node, &conn->calls);
+
+       write_unlock_bh(&conn->lock);
+}
+
+/*
+ * connect a call on an exclusive connection
+ */
+static int rxrpc_connect_exclusive(struct rxrpc_sock *rx,
+                                  struct rxrpc_transport *trans,
+                                  __be16 service_id,
+                                  struct rxrpc_call *call,
+                                  gfp_t gfp)
+{
+       struct rxrpc_connection *conn;
+       int chan, ret;
+
+       _enter("");
+
+       conn = rx->conn;
+       if (!conn) {
+               /* not yet present - create a candidate for a new connection
+                * and then redo the check */
+               conn = rxrpc_alloc_connection(gfp);
+               if (IS_ERR(conn)) {
+                       _leave(" = %ld", PTR_ERR(conn));
+                       return PTR_ERR(conn);
+               }
+
+               conn->trans = trans;
+               conn->bundle = NULL;
+               conn->service_id = service_id;
+               conn->epoch = rxrpc_epoch;
+               conn->in_clientflag = 0;
+               conn->out_clientflag = RXRPC_CLIENT_INITIATED;
+               conn->cid = 0;
+               conn->state = RXRPC_CONN_CLIENT;
+               conn->avail_calls = RXRPC_MAXCALLS - 1;
+               conn->security_level = rx->min_sec_level;
+               conn->key = key_get(rx->key);
+
+               ret = rxrpc_init_client_conn_security(conn);
+               if (ret < 0) {
+                       key_put(conn->key);
+                       kfree(conn);
+                       _leave(" = %d [key]", ret);
+                       return ret;
+               }
+
+               write_lock_bh(&rxrpc_connection_lock);
+               list_add_tail(&conn->link, &rxrpc_connections);
+               write_unlock_bh(&rxrpc_connection_lock);
+
+               spin_lock(&trans->client_lock);
+               atomic_inc(&trans->usage);
+
+               _net("CONNECT EXCL new %d on TRANS %d",
+                    conn->debug_id, conn->trans->debug_id);
+
+               rxrpc_assign_connection_id(conn);
+               rx->conn = conn;
+       }
+
+       /* we've got a connection with a free channel and we can now attach the
+        * call to it
+        * - we're holding the transport's client lock
+        * - we're holding a reference on the connection
+        */
+       for (chan = 0; chan < RXRPC_MAXCALLS; chan++)
+               if (!conn->channels[chan])
+                       goto found_channel;
+       goto no_free_channels;
+
+found_channel:
+       atomic_inc(&conn->usage);
+       conn->channels[chan] = call;
+       call->conn = conn;
+       call->channel = chan;
+       call->cid = conn->cid | htonl(chan);
+       call->call_id = htonl(++conn->call_counter);
+
+       _net("CONNECT client on conn %d chan %d as call %x",
+            conn->debug_id, chan, ntohl(call->call_id));
+
+       spin_unlock(&trans->client_lock);
+
+       rxrpc_add_call_ID_to_conn(conn, call);
+       _leave(" = 0");
+       return 0;
+
+no_free_channels:
+       spin_unlock(&trans->client_lock);
+       _leave(" = -ENOSR");
+       return -ENOSR;
+}
+
+/*
+ * find a connection for a call
+ * - called in process context with IRQs enabled
+ */
+int rxrpc_connect_call(struct rxrpc_sock *rx,
+                      struct rxrpc_transport *trans,
+                      struct rxrpc_conn_bundle *bundle,
+                      struct rxrpc_call *call,
+                      gfp_t gfp)
+{
+       struct rxrpc_connection *conn, *candidate;
+       int chan, ret;
+
+       DECLARE_WAITQUEUE(myself, current);
+
+       _enter("%p,%lx,", rx, call->user_call_ID);
+
+       if (test_bit(RXRPC_SOCK_EXCLUSIVE_CONN, &rx->flags))
+               return rxrpc_connect_exclusive(rx, trans, bundle->service_id,
+                                              call, gfp);
+
+       spin_lock(&trans->client_lock);
+       for (;;) {
+               /* see if the bundle has a call slot available */
+               if (!list_empty(&bundle->avail_conns)) {
+                       _debug("avail");
+                       conn = list_entry(bundle->avail_conns.next,
+                                         struct rxrpc_connection,
+                                         bundle_link);
+                       if (--conn->avail_calls == 0)
+                               list_move(&conn->bundle_link,
+                                         &bundle->busy_conns);
+                       ASSERTCMP(conn->avail_calls, <, RXRPC_MAXCALLS);
+                       ASSERT(conn->channels[0] == NULL ||
+                              conn->channels[1] == NULL ||
+                              conn->channels[2] == NULL ||
+                              conn->channels[3] == NULL);
+                       atomic_inc(&conn->usage);
+                       break;
+               }
+
+               if (!list_empty(&bundle->unused_conns)) {
+                       _debug("unused");
+                       conn = list_entry(bundle->unused_conns.next,
+                                         struct rxrpc_connection,
+                                         bundle_link);
+                       ASSERTCMP(conn->avail_calls, ==, RXRPC_MAXCALLS);
+                       conn->avail_calls = RXRPC_MAXCALLS - 1;
+                       ASSERT(conn->channels[0] == NULL &&
+                              conn->channels[1] == NULL &&
+                              conn->channels[2] == NULL &&
+                              conn->channels[3] == NULL);
+                       atomic_inc(&conn->usage);
+                       list_move(&conn->bundle_link, &bundle->avail_conns);
+                       break;
+               }
+
+               /* need to allocate a new connection */
+               _debug("get new conn [%d]", bundle->num_conns);
+
+               spin_unlock(&trans->client_lock);
+
+               if (signal_pending(current))
+                       goto interrupted;
+
+               if (bundle->num_conns >= 20) {
+                       _debug("too many conns");
+
+                       if (!(gfp & __GFP_WAIT)) {
+                               _leave(" = -EAGAIN");
+                               return -EAGAIN;
+                       }
+
+                       add_wait_queue(&bundle->chanwait, &myself);
+                       for (;;) {
+                               set_current_state(TASK_INTERRUPTIBLE);
+                               if (bundle->num_conns < 20 ||
+                                   !list_empty(&bundle->unused_conns) ||
+                                   !list_empty(&bundle->avail_conns))
+                                       break;
+                               if (signal_pending(current))
+                                       goto interrupted_dequeue;
+                               schedule();
+                       }
+                       remove_wait_queue(&bundle->chanwait, &myself);
+                       __set_current_state(TASK_RUNNING);
+                       spin_lock(&trans->client_lock);
+                       continue;
+               }
+
+               /* not yet present - create a candidate for a new connection and then
+                * redo the check */
+               candidate = rxrpc_alloc_connection(gfp);
+               if (IS_ERR(candidate)) {
+                       _leave(" = %ld", PTR_ERR(candidate));
+                       return PTR_ERR(candidate);
+               }
+
+               candidate->trans = trans;
+               candidate->bundle = bundle;
+               candidate->service_id = bundle->service_id;
+               candidate->epoch = rxrpc_epoch;
+               candidate->in_clientflag = 0;
+               candidate->out_clientflag = RXRPC_CLIENT_INITIATED;
+               candidate->cid = 0;
+               candidate->state = RXRPC_CONN_CLIENT;
+               candidate->avail_calls = RXRPC_MAXCALLS;
+               candidate->security_level = rx->min_sec_level;
+               candidate->key = key_get(bundle->key);
+
+               ret = rxrpc_init_client_conn_security(candidate);
+               if (ret < 0) {
+                       key_put(candidate->key);
+                       kfree(candidate);
+                       _leave(" = %d [key]", ret);
+                       return ret;
+               }
+
+               write_lock_bh(&rxrpc_connection_lock);
+               list_add_tail(&candidate->link, &rxrpc_connections);
+               write_unlock_bh(&rxrpc_connection_lock);
+
+               spin_lock(&trans->client_lock);
+
+               list_add(&candidate->bundle_link, &bundle->unused_conns);
+               bundle->num_conns++;
+               atomic_inc(&bundle->usage);
+               atomic_inc(&trans->usage);
+
+               _net("CONNECT new %d on TRANS %d",
+                    candidate->debug_id, candidate->trans->debug_id);
+
+               rxrpc_assign_connection_id(candidate);
+               if (candidate->security)
+                       candidate->security->prime_packet_security(candidate);
+
+               /* leave the candidate lurking in zombie mode attached to the
+                * bundle until we're ready for it */
+               rxrpc_put_connection(candidate);
+               candidate = NULL;
+       }
+
+       /* we've got a connection with a free channel and we can now attach the
+        * call to it
+        * - we're holding the transport's client lock
+        * - we're holding a reference on the connection
+        * - we're holding a reference on the bundle
+        */
+       for (chan = 0; chan < RXRPC_MAXCALLS; chan++)
+               if (!conn->channels[chan])
+                       goto found_channel;
+       ASSERT(conn->channels[0] == NULL ||
+              conn->channels[1] == NULL ||
+              conn->channels[2] == NULL ||
+              conn->channels[3] == NULL);
+       BUG();
+
+found_channel:
+       conn->channels[chan] = call;
+       call->conn = conn;
+       call->channel = chan;
+       call->cid = conn->cid | htonl(chan);
+       call->call_id = htonl(++conn->call_counter);
+
+       _net("CONNECT client on conn %d chan %d as call %x",
+            conn->debug_id, chan, ntohl(call->call_id));
+
+       ASSERTCMP(conn->avail_calls, <, RXRPC_MAXCALLS);
+       spin_unlock(&trans->client_lock);
+
+       rxrpc_add_call_ID_to_conn(conn, call);
+
+       _leave(" = 0");
+       return 0;
+
+interrupted_dequeue:
+       remove_wait_queue(&bundle->chanwait, &myself);
+       __set_current_state(TASK_RUNNING);
+interrupted:
+       _leave(" = -ERESTARTSYS");
+       return -ERESTARTSYS;
+}
+
+/*
+ * get a record of an incoming connection
+ */
+struct rxrpc_connection *
+rxrpc_incoming_connection(struct rxrpc_transport *trans,
+                         struct rxrpc_header *hdr,
+                         gfp_t gfp)
+{
+       struct rxrpc_connection *conn, *candidate = NULL;
+       struct rb_node *p, **pp;
+       const char *new = "old";
+       __be32 epoch;
+       u32 conn_id;
+
+       _enter("");
+
+       ASSERT(hdr->flags & RXRPC_CLIENT_INITIATED);
+
+       epoch = hdr->epoch;
+       conn_id = ntohl(hdr->cid) & RXRPC_CIDMASK;
+
+       /* search the connection list first */
+       read_lock_bh(&trans->conn_lock);
+
+       p = trans->server_conns.rb_node;
+       while (p) {
+               conn = rb_entry(p, struct rxrpc_connection, node);
+
+               _debug("maybe %x", conn->real_conn_id);
+
+               if (epoch < conn->epoch)
+                       p = p->rb_left;
+               else if (epoch > conn->epoch)
+                       p = p->rb_right;
+               else if (conn_id < conn->real_conn_id)
+                       p = p->rb_left;
+               else if (conn_id > conn->real_conn_id)
+                       p = p->rb_right;
+               else
+                       goto found_extant_connection;
+       }
+       read_unlock_bh(&trans->conn_lock);
+
+       /* not yet present - create a candidate for a new record and then
+        * redo the search */
+       candidate = rxrpc_alloc_connection(gfp);
+       if (!candidate) {
+               _leave(" = -ENOMEM");
+               return ERR_PTR(-ENOMEM);
+       }
+
+       candidate->trans = trans;
+       candidate->epoch = hdr->epoch;
+       candidate->cid = hdr->cid & __constant_cpu_to_be32(RXRPC_CIDMASK);
+       candidate->service_id = hdr->serviceId;
+       candidate->security_ix = hdr->securityIndex;
+       candidate->in_clientflag = RXRPC_CLIENT_INITIATED;
+       candidate->out_clientflag = 0;
+       candidate->real_conn_id = conn_id;
+       candidate->state = RXRPC_CONN_SERVER;
+       if (candidate->service_id)
+               candidate->state = RXRPC_CONN_SERVER_UNSECURED;
+
+       write_lock_bh(&trans->conn_lock);
+
+       pp = &trans->server_conns.rb_node;
+       p = NULL;
+       while (*pp) {
+               p = *pp;
+               conn = rb_entry(p, struct rxrpc_connection, node);
+
+               if (epoch < conn->epoch)
+                       pp = &(*pp)->rb_left;
+               else if (epoch > conn->epoch)
+                       pp = &(*pp)->rb_right;
+               else if (conn_id < conn->real_conn_id)
+                       pp = &(*pp)->rb_left;
+               else if (conn_id > conn->real_conn_id)
+                       pp = &(*pp)->rb_right;
+               else
+                       goto found_extant_second;
+       }
+
+       /* we can now add the new candidate to the list */
+       conn = candidate;
+       candidate = NULL;
+       rb_link_node(&conn->node, p, pp);
+       rb_insert_color(&conn->node, &trans->server_conns);
+       atomic_inc(&conn->trans->usage);
+
+       write_unlock_bh(&trans->conn_lock);
+
+       write_lock_bh(&rxrpc_connection_lock);
+       list_add_tail(&conn->link, &rxrpc_connections);
+       write_unlock_bh(&rxrpc_connection_lock);
+
+       new = "new";
+
+success:
+       _net("CONNECTION %s %d {%x}", new, conn->debug_id, conn->real_conn_id);
+
+       _leave(" = %p {u=%d}", conn, atomic_read(&conn->usage));
+       return conn;
+
+       /* we found the connection in the list immediately */
+found_extant_connection:
+       if (hdr->securityIndex != conn->security_ix) {
+               read_unlock_bh(&trans->conn_lock);
+               goto security_mismatch;
+       }
+       atomic_inc(&conn->usage);
+       read_unlock_bh(&trans->conn_lock);
+       goto success;
+
+       /* we found the connection on the second time through the list */
+found_extant_second:
+       if (hdr->securityIndex != conn->security_ix) {
+               write_unlock_bh(&trans->conn_lock);
+               goto security_mismatch;
+       }
+       atomic_inc(&conn->usage);
+       write_unlock_bh(&trans->conn_lock);
+       kfree(candidate);
+       goto success;
+
+security_mismatch:
+       kfree(candidate);
+       _leave(" = -EKEYREJECTED");
+       return ERR_PTR(-EKEYREJECTED);
+}
+
+/*
+ * find a connection based on transport and RxRPC connection ID for an incoming
+ * packet
+ */
+struct rxrpc_connection *rxrpc_find_connection(struct rxrpc_transport *trans,
+                                              struct rxrpc_header *hdr)
+{
+       struct rxrpc_connection *conn;
+       struct rb_node *p;
+       __be32 epoch;
+       u32 conn_id;
+
+       _enter(",{%x,%x}", ntohl(hdr->cid), hdr->flags);
+
+       read_lock_bh(&trans->conn_lock);
+
+       conn_id = ntohl(hdr->cid) & RXRPC_CIDMASK;
+       epoch = hdr->epoch;
+
+       if (hdr->flags & RXRPC_CLIENT_INITIATED)
+               p = trans->server_conns.rb_node;
+       else
+               p = trans->client_conns.rb_node;
+
+       while (p) {
+               conn = rb_entry(p, struct rxrpc_connection, node);
+
+               _debug("maybe %x", conn->real_conn_id);
+
+               if (epoch < conn->epoch)
+                       p = p->rb_left;
+               else if (epoch > conn->epoch)
+                       p = p->rb_right;
+               else if (conn_id < conn->real_conn_id)
+                       p = p->rb_left;
+               else if (conn_id > conn->real_conn_id)
+                       p = p->rb_right;
+               else
+                       goto found;
+       }
+
+       read_unlock_bh(&trans->conn_lock);
+       _leave(" = NULL");
+       return NULL;
+
+found:
+       atomic_inc(&conn->usage);
+       read_unlock_bh(&trans->conn_lock);
+       _leave(" = %p", conn);
+       return conn;
+}
+
+/*
+ * release a virtual connection
+ */
+void rxrpc_put_connection(struct rxrpc_connection *conn)
+{
+       _enter("%p{u=%d,d=%d}",
+              conn, atomic_read(&conn->usage), conn->debug_id);
+
+       ASSERTCMP(atomic_read(&conn->usage), >, 0);
+
+       conn->put_time = xtime.tv_sec;
+       if (atomic_dec_and_test(&conn->usage)) {
+               _debug("zombie");
+               rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0);
+       }
+
+       _leave("");
+}
+
+/*
+ * destroy a virtual connection
+ */
+static void rxrpc_destroy_connection(struct rxrpc_connection *conn)
+{
+       _enter("%p{%d}", conn, atomic_read(&conn->usage));
+
+       ASSERTCMP(atomic_read(&conn->usage), ==, 0);
+
+       _net("DESTROY CONN %d", conn->debug_id);
+
+       if (conn->bundle)
+               rxrpc_put_bundle(conn->trans, conn->bundle);
+
+       ASSERT(RB_EMPTY_ROOT(&conn->calls));
+       rxrpc_purge_queue(&conn->rx_queue);
+
+       rxrpc_clear_conn_security(conn);
+       rxrpc_put_transport(conn->trans);
+       kfree(conn);
+       _leave("");
+}
+
+/*
+ * reap dead connections
+ */
+void rxrpc_connection_reaper(struct work_struct *work)
+{
+       struct rxrpc_connection *conn, *_p;
+       unsigned long now, earliest, reap_time;
+
+       LIST_HEAD(graveyard);
+
+       _enter("");
+
+       now = xtime.tv_sec;
+       earliest = ULONG_MAX;
+
+       write_lock_bh(&rxrpc_connection_lock);
+       list_for_each_entry_safe(conn, _p, &rxrpc_connections, link) {
+               _debug("reap CONN %d { u=%d,t=%ld }",
+                      conn->debug_id, atomic_read(&conn->usage),
+                      (long) now - (long) conn->put_time);
+
+               if (likely(atomic_read(&conn->usage) > 0))
+                       continue;
+
+               spin_lock(&conn->trans->client_lock);
+               write_lock(&conn->trans->conn_lock);
+               reap_time = conn->put_time + rxrpc_connection_timeout;
+
+               if (atomic_read(&conn->usage) > 0) {
+                       ;
+               } else if (reap_time <= now) {
+                       list_move_tail(&conn->link, &graveyard);
+                       if (conn->out_clientflag)
+                               rb_erase(&conn->node,
+                                        &conn->trans->client_conns);
+                       else
+                               rb_erase(&conn->node,
+                                        &conn->trans->server_conns);
+                       if (conn->bundle) {
+                               list_del_init(&conn->bundle_link);
+                               conn->bundle->num_conns--;
+                       }
+
+               } else if (reap_time < earliest) {
+                       earliest = reap_time;
+               }
+
+               write_unlock(&conn->trans->conn_lock);
+               spin_unlock(&conn->trans->client_lock);
+       }
+       write_unlock_bh(&rxrpc_connection_lock);
+
+       if (earliest != ULONG_MAX) {
+               _debug("reschedule reaper %ld", (long) earliest - now);
+               ASSERTCMP(earliest, >, now);
+               rxrpc_queue_delayed_work(&rxrpc_connection_reap,
+                                        (earliest - now) * HZ);
+       }
+
+       /* then destroy all those pulled out */
+       while (!list_empty(&graveyard)) {
+               conn = list_entry(graveyard.next, struct rxrpc_connection,
+                                 link);
+               list_del_init(&conn->link);
+
+               ASSERTCMP(atomic_read(&conn->usage), ==, 0);
+               rxrpc_destroy_connection(conn);
+       }
+
+       _leave("");
+}
+
+/*
+ * preemptively destroy all the connection records rather than waiting for them
+ * to time out
+ */
+void __exit rxrpc_destroy_all_connections(void)
+{
+       _enter("");
+
+       rxrpc_connection_timeout = 0;
+       cancel_delayed_work(&rxrpc_connection_reap);
+       rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0);
+
+       _leave("");
+}
diff --git a/net/rxrpc/ar-connevent.c b/net/rxrpc/ar-connevent.c
new file mode 100644 (file)
index 0000000..1ada43d
--- /dev/null
@@ -0,0 +1,403 @@
+/* connection-level event handling
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/net.h>
+#include <linux/skbuff.h>
+#include <linux/errqueue.h>
+#include <linux/udp.h>
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <linux/icmp.h>
+#include <net/sock.h>
+#include <net/af_rxrpc.h>
+#include <net/ip.h>
+#include "ar-internal.h"
+
+/*
+ * pass a connection-level abort onto all calls on that connection
+ */
+static void rxrpc_abort_calls(struct rxrpc_connection *conn, int state,
+                             u32 abort_code)
+{
+       struct rxrpc_call *call;
+       struct rb_node *p;
+
+       _enter("{%d},%x", conn->debug_id, abort_code);
+
+       read_lock_bh(&conn->lock);
+
+       for (p = rb_first(&conn->calls); p; p = rb_next(p)) {
+               call = rb_entry(p, struct rxrpc_call, conn_node);
+               write_lock(&call->state_lock);
+               if (call->state <= RXRPC_CALL_COMPLETE) {
+                       call->state = state;
+                       call->abort_code = abort_code;
+                       if (state == RXRPC_CALL_LOCALLY_ABORTED)
+                               set_bit(RXRPC_CALL_CONN_ABORT, &call->events);
+                       else
+                               set_bit(RXRPC_CALL_RCVD_ABORT, &call->events);
+                       rxrpc_queue_call(call);
+               }
+               write_unlock(&call->state_lock);
+       }
+
+       read_unlock_bh(&conn->lock);
+       _leave("");
+}
+
+/*
+ * generate a connection-level abort
+ */
+static int rxrpc_abort_connection(struct rxrpc_connection *conn,
+                                 u32 error, u32 abort_code)
+{
+       struct rxrpc_header hdr;
+       struct msghdr msg;
+       struct kvec iov[2];
+       __be32 word;
+       size_t len;
+       int ret;
+
+       _enter("%d,,%u,%u", conn->debug_id, error, abort_code);
+
+       /* generate a connection-level abort */
+       spin_lock_bh(&conn->state_lock);
+       if (conn->state < RXRPC_CONN_REMOTELY_ABORTED) {
+               conn->state = RXRPC_CONN_LOCALLY_ABORTED;
+               conn->error = error;
+               spin_unlock_bh(&conn->state_lock);
+       } else {
+               spin_unlock_bh(&conn->state_lock);
+               _leave(" = 0 [already dead]");
+               return 0;
+       }
+
+       rxrpc_abort_calls(conn, RXRPC_CALL_LOCALLY_ABORTED, abort_code);
+
+       msg.msg_name    = &conn->trans->peer->srx.transport.sin;
+       msg.msg_namelen = sizeof(conn->trans->peer->srx.transport.sin);
+       msg.msg_control = NULL;
+       msg.msg_controllen = 0;
+       msg.msg_flags   = 0;
+
+       hdr.epoch       = conn->epoch;
+       hdr.cid         = conn->cid;
+       hdr.callNumber  = 0;
+       hdr.seq         = 0;
+       hdr.type        = RXRPC_PACKET_TYPE_ABORT;
+       hdr.flags       = conn->out_clientflag;
+       hdr.userStatus  = 0;
+       hdr.securityIndex = conn->security_ix;
+       hdr._rsvd       = 0;
+       hdr.serviceId   = conn->service_id;
+
+       word = htonl(abort_code);
+
+       iov[0].iov_base = &hdr;
+       iov[0].iov_len  = sizeof(hdr);
+       iov[1].iov_base = &word;
+       iov[1].iov_len  = sizeof(word);
+
+       len = iov[0].iov_len + iov[1].iov_len;
+
+       hdr.serial = htonl(atomic_inc_return(&conn->serial));
+       _proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code);
+
+       ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
+       if (ret < 0) {
+               _debug("sendmsg failed: %d", ret);
+               return -EAGAIN;
+       }
+
+       _leave(" = 0");
+       return 0;
+}
+
+/*
+ * mark a call as being on a now-secured channel
+ * - must be called with softirqs disabled
+ */
+void rxrpc_call_is_secure(struct rxrpc_call *call)
+{
+       _enter("%p", call);
+       if (call) {
+               read_lock(&call->state_lock);
+               if (call->state < RXRPC_CALL_COMPLETE &&
+                   !test_and_set_bit(RXRPC_CALL_SECURED, &call->events))
+                       rxrpc_queue_call(call);
+               read_unlock(&call->state_lock);
+       }
+}
+
+/*
+ * connection-level Rx packet processor
+ */
+static int rxrpc_process_event(struct rxrpc_connection *conn,
+                              struct sk_buff *skb,
+                              u32 *_abort_code)
+{
+       struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+       __be32 tmp;
+       u32 serial;
+       int loop, ret;
+
+       if (conn->state >= RXRPC_CONN_REMOTELY_ABORTED)
+               return -ECONNABORTED;
+
+       serial = ntohl(sp->hdr.serial);
+
+       switch (sp->hdr.type) {
+       case RXRPC_PACKET_TYPE_ABORT:
+               if (skb_copy_bits(skb, 0, &tmp, sizeof(tmp)) < 0)
+                       return -EPROTO;
+               _proto("Rx ABORT %%%u { ac=%d }", serial, ntohl(tmp));
+
+               conn->state = RXRPC_CONN_REMOTELY_ABORTED;
+               rxrpc_abort_calls(conn, RXRPC_CALL_REMOTELY_ABORTED,
+                                 ntohl(tmp));
+               return -ECONNABORTED;
+
+       case RXRPC_PACKET_TYPE_CHALLENGE:
+               if (conn->security)
+                       return conn->security->respond_to_challenge(
+                               conn, skb, _abort_code);
+               return -EPROTO;
+
+       case RXRPC_PACKET_TYPE_RESPONSE:
+               if (!conn->security)
+                       return -EPROTO;
+
+               ret = conn->security->verify_response(conn, skb, _abort_code);
+               if (ret < 0)
+                       return ret;
+
+               ret = conn->security->init_connection_security(conn);
+               if (ret < 0)
+                       return ret;
+
+               conn->security->prime_packet_security(conn);
+               read_lock_bh(&conn->lock);
+               spin_lock(&conn->state_lock);
+
+               if (conn->state == RXRPC_CONN_SERVER_CHALLENGING) {
+                       conn->state = RXRPC_CONN_SERVER;
+                       for (loop = 0; loop < RXRPC_MAXCALLS; loop++)
+                               rxrpc_call_is_secure(conn->channels[loop]);
+               }
+
+               spin_unlock(&conn->state_lock);
+               read_unlock_bh(&conn->lock);
+               return 0;
+
+       default:
+               return -EPROTO;
+       }
+}
+
+/*
+ * set up security and issue a challenge
+ */
+static void rxrpc_secure_connection(struct rxrpc_connection *conn)
+{
+       u32 abort_code;
+       int ret;
+
+       _enter("{%d}", conn->debug_id);
+
+       ASSERT(conn->security_ix != 0);
+
+       if (!conn->key) {
+               _debug("set up security");
+               ret = rxrpc_init_server_conn_security(conn);
+               switch (ret) {
+               case 0:
+                       break;
+               case -ENOENT:
+                       abort_code = RX_CALL_DEAD;
+                       goto abort;
+               default:
+                       abort_code = RXKADNOAUTH;
+                       goto abort;
+               }
+       }
+
+       ASSERT(conn->security != NULL);
+
+       if (conn->security->issue_challenge(conn) < 0) {
+               abort_code = RX_CALL_DEAD;
+               ret = -ENOMEM;
+               goto abort;
+       }
+
+       _leave("");
+       return;
+
+abort:
+       _debug("abort %d, %d", ret, abort_code);
+       rxrpc_abort_connection(conn, -ret, abort_code);
+       _leave(" [aborted]");
+}
+
+/*
+ * connection-level event processor
+ */
+void rxrpc_process_connection(struct work_struct *work)
+{
+       struct rxrpc_connection *conn =
+               container_of(work, struct rxrpc_connection, processor);
+       struct rxrpc_skb_priv *sp;
+       struct sk_buff *skb;
+       u32 abort_code = RX_PROTOCOL_ERROR;
+       int ret;
+
+       _enter("{%d}", conn->debug_id);
+
+       atomic_inc(&conn->usage);
+
+       if (test_and_clear_bit(RXRPC_CONN_CHALLENGE, &conn->events)) {
+               rxrpc_secure_connection(conn);
+               rxrpc_put_connection(conn);
+       }
+
+       /* go through the conn-level event packets, releasing the ref on this
+        * connection that each one has when we've finished with it */
+       while ((skb = skb_dequeue(&conn->rx_queue))) {
+               sp = rxrpc_skb(skb);
+
+               ret = rxrpc_process_event(conn, skb, &abort_code);
+               switch (ret) {
+               case -EPROTO:
+               case -EKEYEXPIRED:
+               case -EKEYREJECTED:
+                       goto protocol_error;
+               case -EAGAIN:
+                       goto requeue_and_leave;
+               case -ECONNABORTED:
+               default:
+                       rxrpc_put_connection(conn);
+                       rxrpc_free_skb(skb);
+                       break;
+               }
+       }
+
+out:
+       rxrpc_put_connection(conn);
+       _leave("");
+       return;
+
+requeue_and_leave:
+       skb_queue_head(&conn->rx_queue, skb);
+       goto out;
+
+protocol_error:
+       if (rxrpc_abort_connection(conn, -ret, abort_code) < 0)
+               goto requeue_and_leave;
+       rxrpc_put_connection(conn);
+       rxrpc_free_skb(skb);
+       _leave(" [EPROTO]");
+       goto out;
+}
+
+/*
+ * put a packet up for transport-level abort
+ */
+void rxrpc_reject_packet(struct rxrpc_local *local, struct sk_buff *skb)
+{
+       CHECK_SLAB_OKAY(&local->usage);
+
+       if (!atomic_inc_not_zero(&local->usage)) {
+               printk("resurrected on reject\n");
+               BUG();
+       }
+
+       skb_queue_tail(&local->reject_queue, skb);
+       rxrpc_queue_work(&local->rejecter);
+}
+
+/*
+ * reject packets through the local endpoint
+ */
+void rxrpc_reject_packets(struct work_struct *work)
+{
+       union {
+               struct sockaddr sa;
+               struct sockaddr_in sin;
+       } sa;
+       struct rxrpc_skb_priv *sp;
+       struct rxrpc_header hdr;
+       struct rxrpc_local *local;
+       struct sk_buff *skb;
+       struct msghdr msg;
+       struct kvec iov[2];
+       size_t size;
+       __be32 code;
+
+       local = container_of(work, struct rxrpc_local, rejecter);
+       rxrpc_get_local(local);
+
+       _enter("%d", local->debug_id);
+
+       iov[0].iov_base = &hdr;
+       iov[0].iov_len = sizeof(hdr);
+       iov[1].iov_base = &code;
+       iov[1].iov_len = sizeof(code);
+       size = sizeof(hdr) + sizeof(code);
+
+       msg.msg_name = &sa;
+       msg.msg_control = NULL;
+       msg.msg_controllen = 0;
+       msg.msg_flags = 0;
+
+       memset(&sa, 0, sizeof(sa));
+       sa.sa.sa_family = local->srx.transport.family;
+       switch (sa.sa.sa_family) {
+       case AF_INET:
+               msg.msg_namelen = sizeof(sa.sin);
+               break;
+       default:
+               msg.msg_namelen = 0;
+               break;
+       }
+
+       memset(&hdr, 0, sizeof(hdr));
+       hdr.type = RXRPC_PACKET_TYPE_ABORT;
+
+       while ((skb = skb_dequeue(&local->reject_queue))) {
+               sp = rxrpc_skb(skb);
+               switch (sa.sa.sa_family) {
+               case AF_INET:
+                       sa.sin.sin_port = udp_hdr(skb)->source;
+                       sa.sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
+                       code = htonl(skb->priority);
+
+                       hdr.epoch = sp->hdr.epoch;
+                       hdr.cid = sp->hdr.cid;
+                       hdr.callNumber = sp->hdr.callNumber;
+                       hdr.serviceId = sp->hdr.serviceId;
+                       hdr.flags = sp->hdr.flags;
+                       hdr.flags ^= RXRPC_CLIENT_INITIATED;
+                       hdr.flags &= RXRPC_CLIENT_INITIATED;
+
+                       kernel_sendmsg(local->socket, &msg, iov, 2, size);
+                       break;
+
+               default:
+                       break;
+               }
+
+               rxrpc_free_skb(skb);
+               rxrpc_put_local(local);
+       }
+
+       rxrpc_put_local(local);
+       _leave("");
+}
diff --git a/net/rxrpc/ar-error.c b/net/rxrpc/ar-error.c
new file mode 100644 (file)
index 0000000..2c27df1
--- /dev/null
@@ -0,0 +1,253 @@
+/* Error message handling (ICMP)
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/net.h>
+#include <linux/skbuff.h>
+#include <linux/errqueue.h>
+#include <linux/udp.h>
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <linux/icmp.h>
+#include <net/sock.h>
+#include <net/af_rxrpc.h>
+#include <net/ip.h>
+#include "ar-internal.h"
+
+/*
+ * handle an error received on the local endpoint
+ */
+void rxrpc_UDP_error_report(struct sock *sk)
+{
+       struct sock_exterr_skb *serr;
+       struct rxrpc_transport *trans;
+       struct rxrpc_local *local = sk->sk_user_data;
+       struct rxrpc_peer *peer;
+       struct sk_buff *skb;
+       __be32 addr;
+       __be16 port;
+
+       _enter("%p{%d}", sk, local->debug_id);
+
+       skb = skb_dequeue(&sk->sk_error_queue);
+       if (!skb) {
+               _leave("UDP socket errqueue empty");
+               return;
+       }
+
+       rxrpc_new_skb(skb);
+
+       serr = SKB_EXT_ERR(skb);
+       addr = *(__be32 *)(skb_network_header(skb) + serr->addr_offset);
+       port = serr->port;
+
+       _net("Rx UDP Error from "NIPQUAD_FMT":%hu",
+            NIPQUAD(addr), ntohs(port));
+       _debug("Msg l:%d d:%d", skb->len, skb->data_len);
+
+       peer = rxrpc_find_peer(local, addr, port);
+       if (IS_ERR(peer)) {
+               rxrpc_free_skb(skb);
+               _leave(" [no peer]");
+               return;
+       }
+
+       trans = rxrpc_find_transport(local, peer);
+       if (!trans) {
+               rxrpc_put_peer(peer);
+               rxrpc_free_skb(skb);
+               _leave(" [no trans]");
+               return;
+       }
+
+       if (serr->ee.ee_origin == SO_EE_ORIGIN_ICMP &&
+           serr->ee.ee_type == ICMP_DEST_UNREACH &&
+           serr->ee.ee_code == ICMP_FRAG_NEEDED
+           ) {
+               u32 mtu = serr->ee.ee_info;
+
+               _net("Rx Received ICMP Fragmentation Needed (%d)", mtu);
+
+               /* wind down the local interface MTU */
+               if (mtu > 0 && peer->if_mtu == 65535 && mtu < peer->if_mtu) {
+                       peer->if_mtu = mtu;
+                       _net("I/F MTU %u", mtu);
+               }
+
+               /* ip_rt_frag_needed() may have eaten the info */
+               if (mtu == 0)
+                       mtu = ntohs(icmp_hdr(skb)->un.frag.mtu);
+
+               if (mtu == 0) {
+                       /* they didn't give us a size, estimate one */
+                       if (mtu > 1500) {
+                               mtu >>= 1;
+                               if (mtu < 1500)
+                                       mtu = 1500;
+                       } else {
+                               mtu -= 100;
+                               if (mtu < peer->hdrsize)
+                                       mtu = peer->hdrsize + 4;
+                       }
+               }
+
+               if (mtu < peer->mtu) {
+                       peer->mtu = mtu;
+                       peer->maxdata = peer->mtu - peer->hdrsize;
+                       _net("Net MTU %u (maxdata %u)",
+                            peer->mtu, peer->maxdata);
+               }
+       }
+
+       rxrpc_put_peer(peer);
+
+       /* pass the transport ref to error_handler to release */
+       skb_queue_tail(&trans->error_queue, skb);
+       rxrpc_queue_work(&trans->error_handler);
+
+       /* reset and regenerate socket error */
+       spin_lock_bh(&sk->sk_error_queue.lock);
+       sk->sk_err = 0;
+       skb = skb_peek(&sk->sk_error_queue);
+       if (skb) {
+               sk->sk_err = SKB_EXT_ERR(skb)->ee.ee_errno;
+               spin_unlock_bh(&sk->sk_error_queue.lock);
+               sk->sk_error_report(sk);
+       } else {
+               spin_unlock_bh(&sk->sk_error_queue.lock);
+       }
+
+       _leave("");
+}
+
+/*
+ * deal with UDP error messages
+ */
+void rxrpc_UDP_error_handler(struct work_struct *work)
+{
+       struct sock_extended_err *ee;
+       struct sock_exterr_skb *serr;
+       struct rxrpc_transport *trans =
+               container_of(work, struct rxrpc_transport, error_handler);
+       struct sk_buff *skb;
+       int local, err;
+
+       _enter("");
+
+       skb = skb_dequeue(&trans->error_queue);
+       if (!skb)
+               return;
+
+       serr = SKB_EXT_ERR(skb);
+       ee = &serr->ee;
+
+       _net("Rx Error o=%d t=%d c=%d e=%d",
+            ee->ee_origin, ee->ee_type, ee->ee_code, ee->ee_errno);
+
+       err = ee->ee_errno;
+
+       switch (ee->ee_origin) {
+       case SO_EE_ORIGIN_ICMP:
+               local = 0;
+               switch (ee->ee_type) {
+               case ICMP_DEST_UNREACH:
+                       switch (ee->ee_code) {
+                       case ICMP_NET_UNREACH:
+                               _net("Rx Received ICMP Network Unreachable");
+                               err = ENETUNREACH;
+                               break;
+                       case ICMP_HOST_UNREACH:
+                               _net("Rx Received ICMP Host Unreachable");
+                               err = EHOSTUNREACH;
+                               break;
+                       case ICMP_PORT_UNREACH:
+                               _net("Rx Received ICMP Port Unreachable");
+                               err = ECONNREFUSED;
+                               break;
+                       case ICMP_FRAG_NEEDED:
+                               _net("Rx Received ICMP Fragmentation Needed (%d)",
+                                    ee->ee_info);
+                               err = 0; /* dealt with elsewhere */
+                               break;
+                       case ICMP_NET_UNKNOWN:
+                               _net("Rx Received ICMP Unknown Network");
+                               err = ENETUNREACH;
+                               break;
+                       case ICMP_HOST_UNKNOWN:
+                               _net("Rx Received ICMP Unknown Host");
+                               err = EHOSTUNREACH;
+                               break;
+                       default:
+                               _net("Rx Received ICMP DestUnreach code=%u",
+                                    ee->ee_code);
+                               break;
+                       }
+                       break;
+
+               case ICMP_TIME_EXCEEDED:
+                       _net("Rx Received ICMP TTL Exceeded");
+                       break;
+
+               default:
+                       _proto("Rx Received ICMP error { type=%u code=%u }",
+                              ee->ee_type, ee->ee_code);
+                       break;
+               }
+               break;
+
+       case SO_EE_ORIGIN_LOCAL:
+               _proto("Rx Received local error { error=%d }",
+                      ee->ee_errno);
+               local = 1;
+               break;
+
+       case SO_EE_ORIGIN_NONE:
+       case SO_EE_ORIGIN_ICMP6:
+       default:
+               _proto("Rx Received error report { orig=%u }",
+                      ee->ee_origin);
+               local = 0;
+               break;
+       }
+
+       /* terminate all the affected calls if there's an unrecoverable
+        * error */
+       if (err) {
+               struct rxrpc_call *call, *_n;
+
+               _debug("ISSUE ERROR %d", err);
+
+               spin_lock_bh(&trans->peer->lock);
+               trans->peer->net_error = err;
+
+               list_for_each_entry_safe(call, _n, &trans->peer->error_targets,
+                                        error_link) {
+                       write_lock(&call->state_lock);
+                       if (call->state != RXRPC_CALL_COMPLETE &&
+                           call->state < RXRPC_CALL_NETWORK_ERROR) {
+                               call->state = RXRPC_CALL_NETWORK_ERROR;
+                               set_bit(RXRPC_CALL_RCVD_ERROR, &call->events);
+                               rxrpc_queue_call(call);
+                       }
+                       write_unlock(&call->state_lock);
+                       list_del_init(&call->error_link);
+               }
+
+               spin_unlock_bh(&trans->peer->lock);
+       }
+
+       if (!skb_queue_empty(&trans->error_queue))
+               rxrpc_queue_work(&trans->error_handler);
+
+       rxrpc_free_skb(skb);
+       rxrpc_put_transport(trans);
+       _leave("");
+}
diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c
new file mode 100644 (file)
index 0000000..91b5bbb
--- /dev/null
@@ -0,0 +1,797 @@
+/* RxRPC packet reception
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/net.h>
+#include <linux/skbuff.h>
+#include <linux/errqueue.h>
+#include <linux/udp.h>
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <linux/icmp.h>
+#include <net/sock.h>
+#include <net/af_rxrpc.h>
+#include <net/ip.h>
+#include "ar-internal.h"
+
+unsigned long rxrpc_ack_timeout = 1;
+
+const char *rxrpc_pkts[] = {
+       "?00",
+       "DATA", "ACK", "BUSY", "ABORT", "ACKALL", "CHALL", "RESP", "DEBUG",
+       "?09", "?10", "?11", "?12", "?13", "?14", "?15"
+};
+
+/*
+ * queue a packet for recvmsg to pass to userspace
+ * - the caller must hold a lock on call->lock
+ * - must not be called with interrupts disabled (sk_filter() disables BH's)
+ * - eats the packet whether successful or not
+ * - there must be just one reference to the packet, which the caller passes to
+ *   this function
+ */
+int rxrpc_queue_rcv_skb(struct rxrpc_call *call, struct sk_buff *skb,
+                       bool force, bool terminal)
+{
+       struct rxrpc_skb_priv *sp;
+       struct rxrpc_sock *rx = call->socket;
+       struct sock *sk;
+       int skb_len, ret;
+
+       _enter(",,%d,%d", force, terminal);
+
+       ASSERT(!irqs_disabled());
+
+       sp = rxrpc_skb(skb);
+       ASSERTCMP(sp->call, ==, call);
+
+       /* if we've already posted the terminal message for a call, then we
+        * don't post any more */
+       if (test_bit(RXRPC_CALL_TERMINAL_MSG, &call->flags)) {
+               _debug("already terminated");
+               ASSERTCMP(call->state, >=, RXRPC_CALL_COMPLETE);
+               skb->destructor = NULL;
+               sp->call = NULL;
+               rxrpc_put_call(call);
+               rxrpc_free_skb(skb);
+               return 0;
+       }
+
+       sk = &rx->sk;
+
+       if (!force) {
+               /* cast skb->rcvbuf to unsigned...  It's pointless, but
+                * reduces number of warnings when compiling with -W
+                * --ANK */
+//             ret = -ENOBUFS;
+//             if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
+//                 (unsigned) sk->sk_rcvbuf)
+//                     goto out;
+
+               ret = sk_filter(sk, skb);
+               if (ret < 0)
+                       goto out;
+       }
+
+       spin_lock_bh(&sk->sk_receive_queue.lock);
+       if (!test_bit(RXRPC_CALL_TERMINAL_MSG, &call->flags) &&
+           !test_bit(RXRPC_CALL_RELEASED, &call->flags) &&
+           call->socket->sk.sk_state != RXRPC_CLOSE) {
+               skb->destructor = rxrpc_packet_destructor;
+               skb->dev = NULL;
+               skb->sk = sk;
+               atomic_add(skb->truesize, &sk->sk_rmem_alloc);
+
+               if (terminal) {
+                       _debug("<<<< TERMINAL MESSAGE >>>>");
+                       set_bit(RXRPC_CALL_TERMINAL_MSG, &call->flags);
+               }
+
+               /* allow interception by a kernel service */
+               if (rx->interceptor) {
+                       rx->interceptor(sk, call->user_call_ID, skb);
+                       spin_unlock_bh(&sk->sk_receive_queue.lock);
+               } else {
+
+                       /* Cache the SKB length before we tack it onto the
+                        * receive queue.  Once it is added it no longer
+                        * belongs to us and may be freed by other threads of
+                        * control pulling packets from the queue */
+                       skb_len = skb->len;
+
+                       _net("post skb %p", skb);
+                       __skb_queue_tail(&sk->sk_receive_queue, skb);
+                       spin_unlock_bh(&sk->sk_receive_queue.lock);
+
+                       if (!sock_flag(sk, SOCK_DEAD))
+                               sk->sk_data_ready(sk, skb_len);
+               }
+               skb = NULL;
+       } else {
+               spin_unlock_bh(&sk->sk_receive_queue.lock);
+       }
+       ret = 0;
+
+out:
+       /* release the socket buffer */
+       if (skb) {
+               skb->destructor = NULL;
+               sp->call = NULL;
+               rxrpc_put_call(call);
+               rxrpc_free_skb(skb);
+       }
+
+       _leave(" = %d", ret);
+       return ret;
+}
+
+/*
+ * process a DATA packet, posting the packet to the appropriate queue
+ * - eats the packet if successful
+ */
+static int rxrpc_fast_process_data(struct rxrpc_call *call,
+                                  struct sk_buff *skb, u32 seq)
+{
+       struct rxrpc_skb_priv *sp;
+       bool terminal;
+       int ret, ackbit, ack;
+
+       _enter("{%u,%u},,{%u}", call->rx_data_post, call->rx_first_oos, seq);
+
+       sp = rxrpc_skb(skb);
+       ASSERTCMP(sp->call, ==, NULL);
+
+       spin_lock(&call->lock);
+
+       if (call->state > RXRPC_CALL_COMPLETE)
+               goto discard;
+
+       ASSERTCMP(call->rx_data_expect, >=, call->rx_data_post);
+       ASSERTCMP(call->rx_data_post, >=, call->rx_data_recv);
+       ASSERTCMP(call->rx_data_recv, >=, call->rx_data_eaten);
+
+       if (seq < call->rx_data_post) {
+               _debug("dup #%u [-%u]", seq, call->rx_data_post);
+               ack = RXRPC_ACK_DUPLICATE;
+               ret = -ENOBUFS;
+               goto discard_and_ack;
+       }
+
+       /* we may already have the packet in the out of sequence queue */
+       ackbit = seq - (call->rx_data_eaten + 1);
+       ASSERTCMP(ackbit, >=, 0);
+       if (__test_and_set_bit(ackbit, call->ackr_window)) {
+               _debug("dup oos #%u [%u,%u]",
+                      seq, call->rx_data_eaten, call->rx_data_post);
+               ack = RXRPC_ACK_DUPLICATE;
+               goto discard_and_ack;
+       }
+
+       if (seq >= call->ackr_win_top) {
+               _debug("exceed #%u [%u]", seq, call->ackr_win_top);
+               __clear_bit(ackbit, call->ackr_window);
+               ack = RXRPC_ACK_EXCEEDS_WINDOW;
+               goto discard_and_ack;
+       }
+
+       if (seq == call->rx_data_expect) {
+               clear_bit(RXRPC_CALL_EXPECT_OOS, &call->flags);
+               call->rx_data_expect++;
+       } else if (seq > call->rx_data_expect) {
+               _debug("oos #%u [%u]", seq, call->rx_data_expect);
+               call->rx_data_expect = seq + 1;
+               if (test_and_set_bit(RXRPC_CALL_EXPECT_OOS, &call->flags)) {
+                       ack = RXRPC_ACK_OUT_OF_SEQUENCE;
+                       goto enqueue_and_ack;
+               }
+               goto enqueue_packet;
+       }
+
+       if (seq != call->rx_data_post) {
+               _debug("ahead #%u [%u]", seq, call->rx_data_post);
+               goto enqueue_packet;
+       }
+
+       if (test_bit(RXRPC_CALL_RCVD_LAST, &call->flags))
+               goto protocol_error;
+
+       /* if the packet need security things doing to it, then it goes down
+        * the slow path */
+       if (call->conn->security)
+               goto enqueue_packet;
+
+       sp->call = call;
+       rxrpc_get_call(call);
+       terminal = ((sp->hdr.flags & RXRPC_LAST_PACKET) &&
+                   !(sp->hdr.flags & RXRPC_CLIENT_INITIATED));
+       ret = rxrpc_queue_rcv_skb(call, skb, false, terminal);
+       if (ret < 0) {
+               if (ret == -ENOMEM || ret == -ENOBUFS) {
+                       __clear_bit(ackbit, call->ackr_window);
+                       ack = RXRPC_ACK_NOSPACE;
+                       goto discard_and_ack;
+               }
+               goto out;
+       }
+
+       skb = NULL;
+
+       _debug("post #%u", seq);
+       ASSERTCMP(call->rx_data_post, ==, seq);
+       call->rx_data_post++;
+
+       if (sp->hdr.flags & RXRPC_LAST_PACKET)
+               set_bit(RXRPC_CALL_RCVD_LAST, &call->flags);
+
+       /* if we've reached an out of sequence packet then we need to drain
+        * that queue into the socket Rx queue now */
+       if (call->rx_data_post == call->rx_first_oos) {
+               _debug("drain rx oos now");
+               read_lock(&call->state_lock);
+               if (call->state < RXRPC_CALL_COMPLETE &&
+                   !test_and_set_bit(RXRPC_CALL_DRAIN_RX_OOS, &call->events))
+                       rxrpc_queue_call(call);
+               read_unlock(&call->state_lock);
+       }
+
+       spin_unlock(&call->lock);
+       atomic_inc(&call->ackr_not_idle);
+       rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, sp->hdr.serial, false);
+       _leave(" = 0 [posted]");
+       return 0;
+
+protocol_error:
+       ret = -EBADMSG;
+out:
+       spin_unlock(&call->lock);
+       _leave(" = %d", ret);
+       return ret;
+
+discard_and_ack:
+       _debug("discard and ACK packet %p", skb);
+       __rxrpc_propose_ACK(call, ack, sp->hdr.serial, true);
+discard:
+       spin_unlock(&call->lock);
+       rxrpc_free_skb(skb);
+       _leave(" = 0 [discarded]");
+       return 0;
+
+enqueue_and_ack:
+       __rxrpc_propose_ACK(call, ack, sp->hdr.serial, true);
+enqueue_packet:
+       _net("defer skb %p", skb);
+       spin_unlock(&call->lock);
+       skb_queue_tail(&call->rx_queue, skb);
+       atomic_inc(&call->ackr_not_idle);
+       read_lock(&call->state_lock);
+       if (call->state < RXRPC_CALL_DEAD)
+               rxrpc_queue_call(call);
+       read_unlock(&call->state_lock);
+       _leave(" = 0 [queued]");
+       return 0;
+}
+
+/*
+ * assume an implicit ACKALL of the transmission phase of a client socket upon
+ * reception of the first reply packet
+ */
+static void rxrpc_assume_implicit_ackall(struct rxrpc_call *call, u32 serial)
+{
+       write_lock_bh(&call->state_lock);
+
+       switch (call->state) {
+       case RXRPC_CALL_CLIENT_AWAIT_REPLY:
+               call->state = RXRPC_CALL_CLIENT_RECV_REPLY;
+               call->acks_latest = serial;
+
+               _debug("implicit ACKALL %%%u", call->acks_latest);
+               set_bit(RXRPC_CALL_RCVD_ACKALL, &call->events);
+               write_unlock_bh(&call->state_lock);
+
+               if (try_to_del_timer_sync(&call->resend_timer) >= 0) {
+                       clear_bit(RXRPC_CALL_RESEND_TIMER, &call->events);
+                       clear_bit(RXRPC_CALL_RESEND, &call->events);
+                       clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
+               }
+               break;
+
+       default:
+               write_unlock_bh(&call->state_lock);
+               break;
+       }
+}
+
+/*
+ * post an incoming packet to the nominated call to deal with
+ * - must get rid of the sk_buff, either by freeing it or by queuing it
+ */
+void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb)
+{
+       struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+       __be32 _abort_code;
+       u32 serial, hi_serial, seq, abort_code;
+
+       _enter("%p,%p", call, skb);
+
+       ASSERT(!irqs_disabled());
+
+#if 0 // INJECT RX ERROR
+       if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA) {
+               static int skip = 0;
+               if (++skip == 3) {
+                       printk("DROPPED 3RD PACKET!!!!!!!!!!!!!\n");
+                       skip = 0;
+                       goto free_packet;
+               }
+       }
+#endif
+
+       /* track the latest serial number on this connection for ACK packet
+        * information */
+       serial = ntohl(sp->hdr.serial);
+       hi_serial = atomic_read(&call->conn->hi_serial);
+       while (serial > hi_serial)
+               hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial,
+                                          serial);
+
+       /* request ACK generation for any ACK or DATA packet that requests
+        * it */
+       if (sp->hdr.flags & RXRPC_REQUEST_ACK) {
+               _proto("ACK Requested on %%%u", serial);
+               rxrpc_propose_ACK(call, RXRPC_ACK_REQUESTED, sp->hdr.serial,
+                                 !(sp->hdr.flags & RXRPC_MORE_PACKETS));
+       }
+
+       switch (sp->hdr.type) {
+       case RXRPC_PACKET_TYPE_ABORT:
+               _debug("abort");
+
+               if (skb_copy_bits(skb, 0, &_abort_code,
+                                 sizeof(_abort_code)) < 0)
+                       goto protocol_error;
+
+               abort_code = ntohl(_abort_code);
+               _proto("Rx ABORT %%%u { %x }", serial, abort_code);
+
+               write_lock_bh(&call->state_lock);
+               if (call->state < RXRPC_CALL_COMPLETE) {
+                       call->state = RXRPC_CALL_REMOTELY_ABORTED;
+                       call->abort_code = abort_code;
+                       set_bit(RXRPC_CALL_RCVD_ABORT, &call->events);
+                       rxrpc_queue_call(call);
+               }
+               goto free_packet_unlock;
+
+       case RXRPC_PACKET_TYPE_BUSY:
+               _proto("Rx BUSY %%%u", serial);
+
+               if (call->conn->out_clientflag)
+                       goto protocol_error;
+
+               write_lock_bh(&call->state_lock);
+               switch (call->state) {
+               case RXRPC_CALL_CLIENT_SEND_REQUEST:
+                       call->state = RXRPC_CALL_SERVER_BUSY;
+                       set_bit(RXRPC_CALL_RCVD_BUSY, &call->events);
+                       rxrpc_queue_call(call);
+               case RXRPC_CALL_SERVER_BUSY:
+                       goto free_packet_unlock;
+               default:
+                       goto protocol_error_locked;
+               }
+
+       default:
+               _proto("Rx %s %%%u", rxrpc_pkts[sp->hdr.type], serial);
+               goto protocol_error;
+
+       case RXRPC_PACKET_TYPE_DATA:
+               seq = ntohl(sp->hdr.seq);
+
+               _proto("Rx DATA %%%u { #%u }", serial, seq);
+
+               if (seq == 0)
+                       goto protocol_error;
+
+               call->ackr_prev_seq = sp->hdr.seq;
+
+               /* received data implicitly ACKs all of the request packets we
+                * sent when we're acting as a client */
+               if (call->state == RXRPC_CALL_CLIENT_AWAIT_REPLY)
+                       rxrpc_assume_implicit_ackall(call, serial);
+
+               switch (rxrpc_fast_process_data(call, skb, seq)) {
+               case 0:
+                       skb = NULL;
+                       goto done;
+
+               default:
+                       BUG();
+
+                       /* data packet received beyond the last packet */
+               case -EBADMSG:
+                       goto protocol_error;
+               }
+
+       case RXRPC_PACKET_TYPE_ACK:
+               /* ACK processing is done in process context */
+               read_lock_bh(&call->state_lock);
+               if (call->state < RXRPC_CALL_DEAD) {
+                       skb_queue_tail(&call->rx_queue, skb);
+                       rxrpc_queue_call(call);
+                       skb = NULL;
+               }
+               read_unlock_bh(&call->state_lock);
+               goto free_packet;
+       }
+
+protocol_error:
+       _debug("protocol error");
+       write_lock_bh(&call->state_lock);
+protocol_error_locked:
+       if (call->state <= RXRPC_CALL_COMPLETE) {
+               call->state = RXRPC_CALL_LOCALLY_ABORTED;
+               call->abort_code = RX_PROTOCOL_ERROR;
+               set_bit(RXRPC_CALL_ABORT, &call->events);
+               rxrpc_queue_call(call);
+       }
+free_packet_unlock:
+       write_unlock_bh(&call->state_lock);
+free_packet:
+       rxrpc_free_skb(skb);
+done:
+       _leave("");
+}
+
+/*
+ * split up a jumbo data packet
+ */
+static void rxrpc_process_jumbo_packet(struct rxrpc_call *call,
+                                      struct sk_buff *jumbo)
+{
+       struct rxrpc_jumbo_header jhdr;
+       struct rxrpc_skb_priv *sp;
+       struct sk_buff *part;
+
+       _enter(",{%u,%u}", jumbo->data_len, jumbo->len);
+
+       sp = rxrpc_skb(jumbo);
+
+       do {
+               sp->hdr.flags &= ~RXRPC_JUMBO_PACKET;
+
+               /* make a clone to represent the first subpacket in what's left
+                * of the jumbo packet */
+               part = skb_clone(jumbo, GFP_ATOMIC);
+               if (!part) {
+                       /* simply ditch the tail in the event of ENOMEM */
+                       pskb_trim(jumbo, RXRPC_JUMBO_DATALEN);
+                       break;
+               }
+               rxrpc_new_skb(part);
+
+               pskb_trim(part, RXRPC_JUMBO_DATALEN);
+
+               if (!pskb_pull(jumbo, RXRPC_JUMBO_DATALEN))
+                       goto protocol_error;
+
+               if (skb_copy_bits(jumbo, 0, &jhdr, sizeof(jhdr)) < 0)
+                       goto protocol_error;
+               if (!pskb_pull(jumbo, sizeof(jhdr)))
+                       BUG();
+
+               sp->hdr.seq     = htonl(ntohl(sp->hdr.seq) + 1);
+               sp->hdr.serial  = htonl(ntohl(sp->hdr.serial) + 1);
+               sp->hdr.flags   = jhdr.flags;
+               sp->hdr._rsvd   = jhdr._rsvd;
+
+               _proto("Rx DATA Jumbo %%%u", ntohl(sp->hdr.serial) - 1);
+
+               rxrpc_fast_process_packet(call, part);
+               part = NULL;
+
+       } while (sp->hdr.flags & RXRPC_JUMBO_PACKET);
+
+       rxrpc_fast_process_packet(call, jumbo);
+       _leave("");
+       return;
+
+protocol_error:
+       _debug("protocol error");
+       rxrpc_free_skb(part);
+       rxrpc_free_skb(jumbo);
+       write_lock_bh(&call->state_lock);
+       if (call->state <= RXRPC_CALL_COMPLETE) {
+               call->state = RXRPC_CALL_LOCALLY_ABORTED;
+               call->abort_code = RX_PROTOCOL_ERROR;
+               set_bit(RXRPC_CALL_ABORT, &call->events);
+               rxrpc_queue_call(call);
+       }
+       write_unlock_bh(&call->state_lock);
+       _leave("");
+}
+
+/*
+ * post an incoming packet to the appropriate call/socket to deal with
+ * - must get rid of the sk_buff, either by freeing it or by queuing it
+ */
+static void rxrpc_post_packet_to_call(struct rxrpc_connection *conn,
+                                     struct sk_buff *skb)
+{
+       struct rxrpc_skb_priv *sp;
+       struct rxrpc_call *call;
+       struct rb_node *p;
+       __be32 call_id;
+
+       _enter("%p,%p", conn, skb);
+
+       read_lock_bh(&conn->lock);
+
+       sp = rxrpc_skb(skb);
+
+       /* look at extant calls by channel number first */
+       call = conn->channels[ntohl(sp->hdr.cid) & RXRPC_CHANNELMASK];
+       if (!call || call->call_id != sp->hdr.callNumber)
+               goto call_not_extant;
+
+       _debug("extant call [%d]", call->state);
+       ASSERTCMP(call->conn, ==, conn);
+
+       read_lock(&call->state_lock);
+       switch (call->state) {
+       case RXRPC_CALL_LOCALLY_ABORTED:
+               if (!test_and_set_bit(RXRPC_CALL_ABORT, &call->events))
+                       rxrpc_queue_call(call);
+       case RXRPC_CALL_REMOTELY_ABORTED:
+       case RXRPC_CALL_NETWORK_ERROR:
+       case RXRPC_CALL_DEAD:
+               goto free_unlock;
+       default:
+               break;
+       }
+
+       read_unlock(&call->state_lock);
+       rxrpc_get_call(call);
+       read_unlock_bh(&conn->lock);
+
+       if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA &&
+           sp->hdr.flags & RXRPC_JUMBO_PACKET)
+               rxrpc_process_jumbo_packet(call, skb);
+       else
+               rxrpc_fast_process_packet(call, skb);
+
+       rxrpc_put_call(call);
+       goto done;
+
+call_not_extant:
+       /* search the completed calls in case what we're dealing with is
+        * there */
+       _debug("call not extant");
+
+       call_id = sp->hdr.callNumber;
+       p = conn->calls.rb_node;
+       while (p) {
+               call = rb_entry(p, struct rxrpc_call, conn_node);
+
+               if (call_id < call->call_id)
+                       p = p->rb_left;
+               else if (call_id > call->call_id)
+                       p = p->rb_right;
+               else
+                       goto found_completed_call;
+       }
+
+dead_call:
+       /* it's a either a really old call that we no longer remember or its a
+        * new incoming call */
+       read_unlock_bh(&conn->lock);
+
+       if (sp->hdr.flags & RXRPC_CLIENT_INITIATED &&
+           sp->hdr.seq == __constant_cpu_to_be32(1)) {
+               _debug("incoming call");
+               skb_queue_tail(&conn->trans->local->accept_queue, skb);
+               rxrpc_queue_work(&conn->trans->local->acceptor);
+               goto done;
+       }
+
+       _debug("dead call");
+       skb->priority = RX_CALL_DEAD;
+       rxrpc_reject_packet(conn->trans->local, skb);
+       goto done;
+
+       /* resend last packet of a completed call
+        * - client calls may have been aborted or ACK'd
+        * - server calls may have been aborted
+        */
+found_completed_call:
+       _debug("completed call");
+
+       if (atomic_read(&call->usage) == 0)
+               goto dead_call;
+
+       /* synchronise any state changes */
+       read_lock(&call->state_lock);
+       ASSERTIFCMP(call->state != RXRPC_CALL_CLIENT_FINAL_ACK,
+                   call->state, >=, RXRPC_CALL_COMPLETE);
+
+       if (call->state == RXRPC_CALL_LOCALLY_ABORTED ||
+           call->state == RXRPC_CALL_REMOTELY_ABORTED ||
+           call->state == RXRPC_CALL_DEAD) {
+               read_unlock(&call->state_lock);
+               goto dead_call;
+       }
+
+       if (call->conn->in_clientflag) {
+               read_unlock(&call->state_lock);
+               goto dead_call; /* complete server call */
+       }
+
+       _debug("final ack again");
+       rxrpc_get_call(call);
+       set_bit(RXRPC_CALL_ACK_FINAL, &call->events);
+       rxrpc_queue_call(call);
+
+free_unlock:
+       read_unlock(&call->state_lock);
+       read_unlock_bh(&conn->lock);
+       rxrpc_free_skb(skb);
+done:
+       _leave("");
+}
+
+/*
+ * post connection-level events to the connection
+ * - this includes challenges, responses and some aborts
+ */
+static void rxrpc_post_packet_to_conn(struct rxrpc_connection *conn,
+                                     struct sk_buff *skb)
+{
+       _enter("%p,%p", conn, skb);
+
+       atomic_inc(&conn->usage);
+       skb_queue_tail(&conn->rx_queue, skb);
+       rxrpc_queue_conn(conn);
+}
+
+/*
+ * handle data received on the local endpoint
+ * - may be called in interrupt context
+ */
+void rxrpc_data_ready(struct sock *sk, int count)
+{
+       struct rxrpc_connection *conn;
+       struct rxrpc_transport *trans;
+       struct rxrpc_skb_priv *sp;
+       struct rxrpc_local *local;
+       struct rxrpc_peer *peer;
+       struct sk_buff *skb;
+       int ret;
+
+       _enter("%p, %d", sk, count);
+
+       ASSERT(!irqs_disabled());
+
+       read_lock_bh(&rxrpc_local_lock);
+       local = sk->sk_user_data;
+       if (local && atomic_read(&local->usage) > 0)
+               rxrpc_get_local(local);
+       else
+               local = NULL;
+       read_unlock_bh(&rxrpc_local_lock);
+       if (!local) {
+               _leave(" [local dead]");
+               return;
+       }
+
+       skb = skb_recv_datagram(sk, 0, 1, &ret);
+       if (!skb) {
+               rxrpc_put_local(local);
+               if (ret == -EAGAIN)
+                       return;
+               _debug("UDP socket error %d", ret);
+               return;
+       }
+
+       rxrpc_new_skb(skb);
+
+       _net("recv skb %p", skb);
+
+       /* we'll probably need to checksum it (didn't call sock_recvmsg) */
+       if (skb_checksum_complete(skb)) {
+               rxrpc_free_skb(skb);
+               rxrpc_put_local(local);
+               _leave(" [CSUM failed]");
+               return;
+       }
+
+       /* the socket buffer we have is owned by UDP, with UDP's data all over
+        * it, but we really want our own */
+       skb_orphan(skb);
+       sp = rxrpc_skb(skb);
+       memset(sp, 0, sizeof(*sp));
+
+       _net("Rx UDP packet from %08x:%04hu",
+            ntohl(ip_hdr(skb)->saddr), ntohs(udp_hdr(skb)->source));
+
+       /* dig out the RxRPC connection details */
+       if (skb_copy_bits(skb, sizeof(struct udphdr), &sp->hdr,
+                         sizeof(sp->hdr)) < 0)
+               goto bad_message;
+       if (!pskb_pull(skb, sizeof(struct udphdr) + sizeof(sp->hdr)))
+               BUG();
+
+       _net("Rx RxRPC %s ep=%x call=%x:%x",
+            sp->hdr.flags & RXRPC_CLIENT_INITIATED ? "ToServer" : "ToClient",
+            ntohl(sp->hdr.epoch),
+            ntohl(sp->hdr.cid),
+            ntohl(sp->hdr.callNumber));
+
+       if (sp->hdr.type == 0 || sp->hdr.type >= RXRPC_N_PACKET_TYPES) {
+               _proto("Rx Bad Packet Type %u", sp->hdr.type);
+               goto bad_message;
+       }
+
+       if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA &&
+           (sp->hdr.callNumber == 0 || sp->hdr.seq == 0))
+               goto bad_message;
+
+       peer = rxrpc_find_peer(local, ip_hdr(skb)->saddr, udp_hdr(skb)->source);
+       if (IS_ERR(peer))
+               goto cant_route_call;
+
+       trans = rxrpc_find_transport(local, peer);
+       rxrpc_put_peer(peer);
+       if (!trans)
+               goto cant_route_call;
+
+       conn = rxrpc_find_connection(trans, &sp->hdr);
+       rxrpc_put_transport(trans);
+       if (!conn)
+               goto cant_route_call;
+
+       _debug("CONN %p {%d}", conn, conn->debug_id);
+
+       if (sp->hdr.callNumber == 0)
+               rxrpc_post_packet_to_conn(conn, skb);
+       else
+               rxrpc_post_packet_to_call(conn, skb);
+       rxrpc_put_connection(conn);
+       rxrpc_put_local(local);
+       return;
+
+cant_route_call:
+       _debug("can't route call");
+       if (sp->hdr.flags & RXRPC_CLIENT_INITIATED &&
+           sp->hdr.type == RXRPC_PACKET_TYPE_DATA) {
+               if (sp->hdr.seq == __constant_cpu_to_be32(1)) {
+                       _debug("first packet");
+                       skb_queue_tail(&local->accept_queue, skb);
+                       rxrpc_queue_work(&local->acceptor);
+                       rxrpc_put_local(local);
+                       _leave(" [incoming]");
+                       return;
+               }
+               skb->priority = RX_INVALID_OPERATION;
+       } else {
+               skb->priority = RX_CALL_DEAD;
+       }
+
+       _debug("reject");
+       rxrpc_reject_packet(local, skb);
+       rxrpc_put_local(local);
+       _leave(" [no call]");
+       return;
+
+bad_message:
+       skb->priority = RX_PROTOCOL_ERROR;
+       rxrpc_reject_packet(local, skb);
+       rxrpc_put_local(local);
+       _leave(" [badmsg]");
+}
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
new file mode 100644 (file)
index 0000000..58aaf89
--- /dev/null
@@ -0,0 +1,808 @@
+/* AF_RXRPC internal definitions
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <rxrpc/packet.h>
+
+#if 0
+#define CHECK_SLAB_OKAY(X)                                  \
+       BUG_ON(atomic_read((X)) >> (sizeof(atomic_t) - 2) == \
+              (POISON_FREE << 8 | POISON_FREE))
+#else
+#define CHECK_SLAB_OKAY(X) do {} while(0)
+#endif
+
+#define FCRYPT_BSIZE 8
+struct rxrpc_crypt {
+       union {
+               u8      x[FCRYPT_BSIZE];
+               u32     n[2];
+       };
+} __attribute__((aligned(8)));
+
+#define rxrpc_queue_work(WS)   queue_work(rxrpc_workqueue, (WS))
+#define rxrpc_queue_delayed_work(WS,D) \
+       queue_delayed_work(rxrpc_workqueue, (WS), (D))
+
+#define rxrpc_queue_call(CALL) rxrpc_queue_work(&(CALL)->processor)
+#define rxrpc_queue_conn(CONN) rxrpc_queue_work(&(CONN)->processor)
+
+/*
+ * sk_state for RxRPC sockets
+ */
+enum {
+       RXRPC_UNCONNECTED = 0,
+       RXRPC_CLIENT_BOUND,             /* client local address bound */
+       RXRPC_CLIENT_CONNECTED,         /* client is connected */
+       RXRPC_SERVER_BOUND,             /* server local address bound */
+       RXRPC_SERVER_LISTENING,         /* server listening for connections */
+       RXRPC_CLOSE,                    /* socket is being closed */
+};
+
+/*
+ * RxRPC socket definition
+ */
+struct rxrpc_sock {
+       /* WARNING: sk has to be the first member */
+       struct sock             sk;
+       rxrpc_interceptor_t     interceptor;    /* kernel service Rx interceptor function */
+       struct rxrpc_local      *local;         /* local endpoint */
+       struct rxrpc_transport  *trans;         /* transport handler */
+       struct rxrpc_conn_bundle *bundle;       /* virtual connection bundle */
+       struct rxrpc_connection *conn;          /* exclusive virtual connection */
+       struct list_head        listen_link;    /* link in the local endpoint's listen list */
+       struct list_head        secureq;        /* calls awaiting connection security clearance */
+       struct list_head        acceptq;        /* calls awaiting acceptance */
+       struct key              *key;           /* security for this socket */
+       struct key              *securities;    /* list of server security descriptors */
+       struct rb_root          calls;          /* outstanding calls on this socket */
+       unsigned long           flags;
+#define RXRPC_SOCK_EXCLUSIVE_CONN      1       /* exclusive connection for a client socket */
+       rwlock_t                call_lock;      /* lock for calls */
+       u32                     min_sec_level;  /* minimum security level */
+#define RXRPC_SECURITY_MAX     RXRPC_SECURITY_ENCRYPT
+       struct sockaddr_rxrpc   srx;            /* local address */
+       sa_family_t             proto;          /* protocol created with */
+       __be16                  service_id;     /* service ID of local/remote service */
+};
+
+#define rxrpc_sk(__sk) container_of((__sk), struct rxrpc_sock, sk)
+
+/*
+ * RxRPC socket buffer private variables
+ * - max 48 bytes (struct sk_buff::cb)
+ */
+struct rxrpc_skb_priv {
+       struct rxrpc_call       *call;          /* call with which associated */
+       unsigned long           resend_at;      /* time in jiffies at which to resend */
+       union {
+               unsigned        offset;         /* offset into buffer of next read */
+               int             remain;         /* amount of space remaining for next write */
+               u32             error;          /* network error code */
+               bool            need_resend;    /* T if needs resending */
+       };
+
+       struct rxrpc_header     hdr;            /* RxRPC packet header from this packet */
+};
+
+#define rxrpc_skb(__skb) ((struct rxrpc_skb_priv *) &(__skb)->cb)
+
+enum rxrpc_command {
+       RXRPC_CMD_SEND_DATA,            /* send data message */
+       RXRPC_CMD_SEND_ABORT,           /* request abort generation */
+       RXRPC_CMD_ACCEPT,               /* [server] accept incoming call */
+       RXRPC_CMD_REJECT_BUSY,          /* [server] reject a call as busy */
+};
+
+/*
+ * RxRPC security module interface
+ */
+struct rxrpc_security {
+       struct module           *owner;         /* providing module */
+       struct list_head        link;           /* link in master list */
+       const char              *name;          /* name of this service */
+       u8                      security_index; /* security type provided */
+
+       /* initialise a connection's security */
+       int (*init_connection_security)(struct rxrpc_connection *);
+
+       /* prime a connection's packet security */
+       void (*prime_packet_security)(struct rxrpc_connection *);
+
+       /* impose security on a packet */
+       int (*secure_packet)(const struct rxrpc_call *,
+                            struct sk_buff *,
+                            size_t,
+                            void *);
+
+       /* verify the security on a received packet */
+       int (*verify_packet)(const struct rxrpc_call *, struct sk_buff *,
+                            u32 *);
+
+       /* issue a challenge */
+       int (*issue_challenge)(struct rxrpc_connection *);
+
+       /* respond to a challenge */
+       int (*respond_to_challenge)(struct rxrpc_connection *,
+                                   struct sk_buff *,
+                                   u32 *);
+
+       /* verify a response */
+       int (*verify_response)(struct rxrpc_connection *,
+                              struct sk_buff *,
+                              u32 *);
+
+       /* clear connection security */
+       void (*clear)(struct rxrpc_connection *);
+};
+
+/*
+ * RxRPC local transport endpoint definition
+ * - matched by local port, address and protocol type
+ */
+struct rxrpc_local {
+       struct socket           *socket;        /* my UDP socket */
+       struct work_struct      destroyer;      /* endpoint destroyer */
+       struct work_struct      acceptor;       /* incoming call processor */
+       struct work_struct      rejecter;       /* packet reject writer */
+       struct list_head        services;       /* services listening on this endpoint */
+       struct list_head        link;           /* link in endpoint list */
+       struct rw_semaphore     defrag_sem;     /* control re-enablement of IP DF bit */
+       struct sk_buff_head     accept_queue;   /* incoming calls awaiting acceptance */
+       struct sk_buff_head     reject_queue;   /* packets awaiting rejection */
+       spinlock_t              lock;           /* access lock */
+       rwlock_t                services_lock;  /* lock for services list */
+       atomic_t                usage;
+       int                     debug_id;       /* debug ID for printks */
+       volatile char           error_rcvd;     /* T if received ICMP error outstanding */
+       struct sockaddr_rxrpc   srx;            /* local address */
+};
+
+/*
+ * RxRPC remote transport endpoint definition
+ * - matched by remote port, address and protocol type
+ * - holds the connection ID counter for connections between the two endpoints
+ */
+struct rxrpc_peer {
+       struct work_struct      destroyer;      /* peer destroyer */
+       struct list_head        link;           /* link in master peer list */
+       struct list_head        error_targets;  /* targets for net error distribution */
+       spinlock_t              lock;           /* access lock */
+       atomic_t                usage;
+       unsigned                if_mtu;         /* interface MTU for this peer */
+       unsigned                mtu;            /* network MTU for this peer */
+       unsigned                maxdata;        /* data size (MTU - hdrsize) */
+       unsigned short          hdrsize;        /* header size (IP + UDP + RxRPC) */
+       int                     debug_id;       /* debug ID for printks */
+       int                     net_error;      /* network error distributed */
+       struct sockaddr_rxrpc   srx;            /* remote address */
+
+       /* calculated RTT cache */
+#define RXRPC_RTT_CACHE_SIZE 32
+       suseconds_t             rtt;            /* current RTT estimate (in uS) */
+       unsigned                rtt_point;      /* next entry at which to insert */
+       unsigned                rtt_usage;      /* amount of cache actually used */
+       suseconds_t             rtt_cache[RXRPC_RTT_CACHE_SIZE]; /* calculated RTT cache */
+};
+
+/*
+ * RxRPC point-to-point transport / connection manager definition
+ * - handles a bundle of connections between two endpoints
+ * - matched by { local, peer }
+ */
+struct rxrpc_transport {
+       struct rxrpc_local      *local;         /* local transport endpoint */
+       struct rxrpc_peer       *peer;          /* remote transport endpoint */
+       struct work_struct      error_handler;  /* network error distributor */
+       struct rb_root          bundles;        /* client connection bundles on this transport */
+       struct rb_root          client_conns;   /* client connections on this transport */
+       struct rb_root          server_conns;   /* server connections on this transport */
+       struct list_head        link;           /* link in master session list */
+       struct sk_buff_head     error_queue;    /* error packets awaiting processing */
+       time_t                  put_time;       /* time at which to reap */
+       spinlock_t              client_lock;    /* client connection allocation lock */
+       rwlock_t                conn_lock;      /* lock for active/dead connections */
+       atomic_t                usage;
+       int                     debug_id;       /* debug ID for printks */
+       unsigned int            conn_idcounter; /* connection ID counter (client) */
+};
+
+/*
+ * RxRPC client connection bundle
+ * - matched by { transport, service_id, key }
+ */
+struct rxrpc_conn_bundle {
+       struct rb_node          node;           /* node in transport's lookup tree */
+       struct list_head        unused_conns;   /* unused connections in this bundle */
+       struct list_head        avail_conns;    /* available connections in this bundle */
+       struct list_head        busy_conns;     /* busy connections in this bundle */
+       struct key              *key;           /* security for this bundle */
+       wait_queue_head_t       chanwait;       /* wait for channel to become available */
+       atomic_t                usage;
+       int                     debug_id;       /* debug ID for printks */
+       unsigned short          num_conns;      /* number of connections in this bundle */
+       __be16                  service_id;     /* service ID */
+       uint8_t                 security_ix;    /* security type */
+};
+
+/*
+ * RxRPC connection definition
+ * - matched by { transport, service_id, conn_id, direction, key }
+ * - each connection can only handle four simultaneous calls
+ */
+struct rxrpc_connection {
+       struct rxrpc_transport  *trans;         /* transport session */
+       struct rxrpc_conn_bundle *bundle;       /* connection bundle (client) */
+       struct work_struct      processor;      /* connection event processor */
+       struct rb_node          node;           /* node in transport's lookup tree */
+       struct list_head        link;           /* link in master connection list */
+       struct list_head        bundle_link;    /* link in bundle */
+       struct rb_root          calls;          /* calls on this connection */
+       struct sk_buff_head     rx_queue;       /* received conn-level packets */
+       struct rxrpc_call       *channels[RXRPC_MAXCALLS]; /* channels (active calls) */
+       struct rxrpc_security   *security;      /* applied security module */
+       struct key              *key;           /* security for this connection (client) */
+       struct key              *server_key;    /* security for this service */
+       struct crypto_blkcipher *cipher;        /* encryption handle */
+       struct rxrpc_crypt      csum_iv;        /* packet checksum base */
+       unsigned long           events;
+#define RXRPC_CONN_CHALLENGE   0               /* send challenge packet */
+       time_t                  put_time;       /* time at which to reap */
+       rwlock_t                lock;           /* access lock */
+       spinlock_t              state_lock;     /* state-change lock */
+       atomic_t                usage;
+       u32                     real_conn_id;   /* connection ID (host-endian) */
+       enum {                                  /* current state of connection */
+               RXRPC_CONN_UNUSED,              /* - connection not yet attempted */
+               RXRPC_CONN_CLIENT,              /* - client connection */
+               RXRPC_CONN_SERVER_UNSECURED,    /* - server unsecured connection */
+               RXRPC_CONN_SERVER_CHALLENGING,  /* - server challenging for security */
+               RXRPC_CONN_SERVER,              /* - server secured connection */
+               RXRPC_CONN_REMOTELY_ABORTED,    /* - conn aborted by peer */
+               RXRPC_CONN_LOCALLY_ABORTED,     /* - conn aborted locally */
+               RXRPC_CONN_NETWORK_ERROR,       /* - conn terminated by network error */
+       } state;
+       int                     error;          /* error code for local abort */
+       int                     debug_id;       /* debug ID for printks */
+       unsigned                call_counter;   /* call ID counter */
+       atomic_t                serial;         /* packet serial number counter */
+       atomic_t                hi_serial;      /* highest serial number received */
+       u8                      avail_calls;    /* number of calls available */
+       u8                      size_align;     /* data size alignment (for security) */
+       u8                      header_size;    /* rxrpc + security header size */
+       u8                      security_size;  /* security header size */
+       u32                     security_level; /* security level negotiated */
+       u32                     security_nonce; /* response re-use preventer */
+
+       /* the following are all in net order */
+       __be32                  epoch;          /* epoch of this connection */
+       __be32                  cid;            /* connection ID */
+       __be16                  service_id;     /* service ID */
+       u8                      security_ix;    /* security type */
+       u8                      in_clientflag;  /* RXRPC_CLIENT_INITIATED if we are server */
+       u8                      out_clientflag; /* RXRPC_CLIENT_INITIATED if we are client */
+};
+
+/*
+ * RxRPC call definition
+ * - matched by { connection, call_id }
+ */
+struct rxrpc_call {
+       struct rxrpc_connection *conn;          /* connection carrying call */
+       struct rxrpc_sock       *socket;        /* socket responsible */
+       struct timer_list       lifetimer;      /* lifetime remaining on call */
+       struct timer_list       deadspan;       /* reap timer for re-ACK'ing, etc  */
+       struct timer_list       ack_timer;      /* ACK generation timer */
+       struct timer_list       resend_timer;   /* Tx resend timer */
+       struct work_struct      destroyer;      /* call destroyer */
+       struct work_struct      processor;      /* packet processor and ACK generator */
+       struct list_head        link;           /* link in master call list */
+       struct list_head        error_link;     /* link in error distribution list */
+       struct list_head        accept_link;    /* calls awaiting acceptance */
+       struct rb_node          sock_node;      /* node in socket call tree */
+       struct rb_node          conn_node;      /* node in connection call tree */
+       struct sk_buff_head     rx_queue;       /* received packets */
+       struct sk_buff_head     rx_oos_queue;   /* packets received out of sequence */
+       struct sk_buff          *tx_pending;    /* Tx socket buffer being filled */
+       wait_queue_head_t       tx_waitq;       /* wait for Tx window space to become available */
+       unsigned long           user_call_ID;   /* user-defined call ID */
+       unsigned long           creation_jif;   /* time of call creation */
+       unsigned long           flags;
+#define RXRPC_CALL_RELEASED    0       /* call has been released - no more message to userspace */
+#define RXRPC_CALL_TERMINAL_MSG        1       /* call has given the socket its final message */
+#define RXRPC_CALL_RCVD_LAST   2       /* all packets received */
+#define RXRPC_CALL_RUN_RTIMER  3       /* Tx resend timer started */
+#define RXRPC_CALL_TX_SOFT_ACK 4       /* sent some soft ACKs */
+#define RXRPC_CALL_PROC_BUSY   5       /* the processor is busy */
+#define RXRPC_CALL_INIT_ACCEPT 6       /* acceptance was initiated */
+#define RXRPC_CALL_HAS_USERID  7       /* has a user ID attached */
+#define RXRPC_CALL_EXPECT_OOS  8       /* expect out of sequence packets */
+       unsigned long           events;
+#define RXRPC_CALL_RCVD_ACKALL 0       /* ACKALL or reply received */
+#define RXRPC_CALL_RCVD_BUSY   1       /* busy packet received */
+#define RXRPC_CALL_RCVD_ABORT  2       /* abort packet received */
+#define RXRPC_CALL_RCVD_ERROR  3       /* network error received */
+#define RXRPC_CALL_ACK_FINAL   4       /* need to generate final ACK (and release call) */
+#define RXRPC_CALL_ACK         5       /* need to generate ACK */
+#define RXRPC_CALL_REJECT_BUSY 6       /* need to generate busy message */
+#define RXRPC_CALL_ABORT       7       /* need to generate abort */
+#define RXRPC_CALL_CONN_ABORT  8       /* local connection abort generated */
+#define RXRPC_CALL_RESEND_TIMER        9       /* Tx resend timer expired */
+#define RXRPC_CALL_RESEND      10      /* Tx resend required */
+#define RXRPC_CALL_DRAIN_RX_OOS        11      /* drain the Rx out of sequence queue */
+#define RXRPC_CALL_LIFE_TIMER  12      /* call's lifetimer ran out */
+#define RXRPC_CALL_ACCEPTED    13      /* incoming call accepted by userspace app */
+#define RXRPC_CALL_SECURED     14      /* incoming call's connection is now secure */
+#define RXRPC_CALL_POST_ACCEPT 15      /* need to post an "accept?" message to the app */
+#define RXRPC_CALL_RELEASE     16      /* need to release the call's resources */
+
+       spinlock_t              lock;
+       rwlock_t                state_lock;     /* lock for state transition */
+       atomic_t                usage;
+       atomic_t                sequence;       /* Tx data packet sequence counter */
+       u32                     abort_code;     /* local/remote abort code */
+       enum {                                  /* current state of call */
+               RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
+               RXRPC_CALL_CLIENT_AWAIT_REPLY,  /* - client awaiting reply */
+               RXRPC_CALL_CLIENT_RECV_REPLY,   /* - client receiving reply phase */
+               RXRPC_CALL_CLIENT_FINAL_ACK,    /* - client sending final ACK phase */
+               RXRPC_CALL_SERVER_SECURING,     /* - server securing request connection */
+               RXRPC_CALL_SERVER_ACCEPTING,    /* - server accepting request */
+               RXRPC_CALL_SERVER_RECV_REQUEST, /* - server receiving request */
+               RXRPC_CALL_SERVER_ACK_REQUEST,  /* - server pending ACK of request */
+               RXRPC_CALL_SERVER_SEND_REPLY,   /* - server sending reply */
+               RXRPC_CALL_SERVER_AWAIT_ACK,    /* - server awaiting final ACK */
+               RXRPC_CALL_COMPLETE,            /* - call completed */
+               RXRPC_CALL_SERVER_BUSY,         /* - call rejected by busy server */
+               RXRPC_CALL_REMOTELY_ABORTED,    /* - call aborted by peer */
+               RXRPC_CALL_LOCALLY_ABORTED,     /* - call aborted locally on error or close */
+               RXRPC_CALL_NETWORK_ERROR,       /* - call terminated by network error */
+               RXRPC_CALL_DEAD,                /* - call is dead */
+       } state;
+       int                     debug_id;       /* debug ID for printks */
+       u8                      channel;        /* connection channel occupied by this call */
+
+       /* transmission-phase ACK management */
+       uint8_t                 acks_head;      /* offset into window of first entry */
+       uint8_t                 acks_tail;      /* offset into window of last entry */
+       uint8_t                 acks_winsz;     /* size of un-ACK'd window */
+       uint8_t                 acks_unacked;   /* lowest unacked packet in last ACK received */
+       int                     acks_latest;    /* serial number of latest ACK received */
+       rxrpc_seq_t             acks_hard;      /* highest definitively ACK'd msg seq */
+       unsigned long           *acks_window;   /* sent packet window
+                                                * - elements are pointers with LSB set if ACK'd
+                                                */
+
+       /* receive-phase ACK management */
+       rxrpc_seq_t             rx_data_expect; /* next data seq ID expected to be received */
+       rxrpc_seq_t             rx_data_post;   /* next data seq ID expected to be posted */
+       rxrpc_seq_t             rx_data_recv;   /* last data seq ID encountered by recvmsg */
+       rxrpc_seq_t             rx_data_eaten;  /* last data seq ID consumed by recvmsg */
+       rxrpc_seq_t             rx_first_oos;   /* first packet in rx_oos_queue (or 0) */
+       rxrpc_seq_t             ackr_win_top;   /* top of ACK window (rx_data_eaten is bottom) */
+       rxrpc_seq_net_t         ackr_prev_seq;  /* previous sequence number received */
+       uint8_t                 ackr_reason;    /* reason to ACK */
+       __be32                  ackr_serial;    /* serial of packet being ACK'd */
+       atomic_t                ackr_not_idle;  /* number of packets in Rx queue */
+
+       /* received packet records, 1 bit per record */
+#define RXRPC_ACKR_WINDOW_ASZ DIV_ROUND_UP(RXRPC_MAXACKS, BITS_PER_LONG)
+       unsigned long           ackr_window[RXRPC_ACKR_WINDOW_ASZ + 1];
+
+       /* the following should all be in net order */
+       __be32                  cid;            /* connection ID + channel index  */
+       __be32                  call_id;        /* call ID on connection  */
+};
+
+/*
+ * RxRPC key for Kerberos (type-2 security)
+ */
+struct rxkad_key {
+       u16     security_index;         /* RxRPC header security index */
+       u16     ticket_len;             /* length of ticket[] */
+       u32     expiry;                 /* time at which expires */
+       u32     kvno;                   /* key version number */
+       u8      session_key[8];         /* DES session key */
+       u8      ticket[0];              /* the encrypted ticket */
+};
+
+struct rxrpc_key_payload {
+       struct rxkad_key k;
+};
+
+/*
+ * locally abort an RxRPC call
+ */
+static inline void rxrpc_abort_call(struct rxrpc_call *call, u32 abort_code)
+{
+       write_lock_bh(&call->state_lock);
+       if (call->state < RXRPC_CALL_COMPLETE) {
+               call->abort_code = abort_code;
+               call->state = RXRPC_CALL_LOCALLY_ABORTED;
+               set_bit(RXRPC_CALL_ABORT, &call->events);
+       }
+       write_unlock_bh(&call->state_lock);
+}
+
+/*
+ * af_rxrpc.c
+ */
+extern atomic_t rxrpc_n_skbs;
+extern __be32 rxrpc_epoch;
+extern atomic_t rxrpc_debug_id;
+extern struct workqueue_struct *rxrpc_workqueue;
+
+/*
+ * ar-accept.c
+ */
+extern void rxrpc_accept_incoming_calls(struct work_struct *);
+extern struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *,
+                                           unsigned long);
+extern int rxrpc_reject_call(struct rxrpc_sock *);
+
+/*
+ * ar-ack.c
+ */
+extern void __rxrpc_propose_ACK(struct rxrpc_call *, uint8_t, __be32, bool);
+extern void rxrpc_propose_ACK(struct rxrpc_call *, uint8_t, __be32, bool);
+extern void rxrpc_process_call(struct work_struct *);
+
+/*
+ * ar-call.c
+ */
+extern struct kmem_cache *rxrpc_call_jar;
+extern struct list_head rxrpc_calls;
+extern rwlock_t rxrpc_call_lock;
+
+extern struct rxrpc_call *rxrpc_get_client_call(struct rxrpc_sock *,
+                                               struct rxrpc_transport *,
+                                               struct rxrpc_conn_bundle *,
+                                               unsigned long, int, gfp_t);
+extern struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *,
+                                             struct rxrpc_connection *,
+                                             struct rxrpc_header *, gfp_t);
+extern struct rxrpc_call *rxrpc_find_server_call(struct rxrpc_sock *,
+                                                unsigned long);
+extern void rxrpc_release_call(struct rxrpc_call *);
+extern void rxrpc_release_calls_on_socket(struct rxrpc_sock *);
+extern void __rxrpc_put_call(struct rxrpc_call *);
+extern void __exit rxrpc_destroy_all_calls(void);
+
+/*
+ * ar-connection.c
+ */
+extern struct list_head rxrpc_connections;
+extern rwlock_t rxrpc_connection_lock;
+
+extern struct rxrpc_conn_bundle *rxrpc_get_bundle(struct rxrpc_sock *,
+                                                 struct rxrpc_transport *,
+                                                 struct key *,
+                                                 __be16, gfp_t);
+extern void rxrpc_put_bundle(struct rxrpc_transport *,
+                            struct rxrpc_conn_bundle *);
+extern int rxrpc_connect_call(struct rxrpc_sock *, struct rxrpc_transport *,
+                             struct rxrpc_conn_bundle *, struct rxrpc_call *,
+                             gfp_t);
+extern void rxrpc_put_connection(struct rxrpc_connection *);
+extern void __exit rxrpc_destroy_all_connections(void);
+extern struct rxrpc_connection *rxrpc_find_connection(struct rxrpc_transport *,
+                                                     struct rxrpc_header *);
+extern struct rxrpc_connection *
+rxrpc_incoming_connection(struct rxrpc_transport *, struct rxrpc_header *,
+                         gfp_t);
+
+/*
+ * ar-connevent.c
+ */
+extern void rxrpc_process_connection(struct work_struct *);
+extern void rxrpc_reject_packet(struct rxrpc_local *, struct sk_buff *);
+extern void rxrpc_reject_packets(struct work_struct *);
+
+/*
+ * ar-error.c
+ */
+extern void rxrpc_UDP_error_report(struct sock *);
+extern void rxrpc_UDP_error_handler(struct work_struct *);
+
+/*
+ * ar-input.c
+ */
+extern unsigned long rxrpc_ack_timeout;
+extern const char *rxrpc_pkts[];
+
+extern void rxrpc_data_ready(struct sock *, int);
+extern int rxrpc_queue_rcv_skb(struct rxrpc_call *, struct sk_buff *, bool,
+                              bool);
+extern void rxrpc_fast_process_packet(struct rxrpc_call *, struct sk_buff *);
+
+/*
+ * ar-local.c
+ */
+extern rwlock_t rxrpc_local_lock;
+extern struct rxrpc_local *rxrpc_lookup_local(struct sockaddr_rxrpc *);
+extern void rxrpc_put_local(struct rxrpc_local *);
+extern void __exit rxrpc_destroy_all_locals(void);
+
+/*
+ * ar-key.c
+ */
+extern struct key_type key_type_rxrpc;
+extern struct key_type key_type_rxrpc_s;
+
+extern int rxrpc_request_key(struct rxrpc_sock *, char __user *, int);
+extern int rxrpc_server_keyring(struct rxrpc_sock *, char __user *, int);
+extern int rxrpc_get_server_data_key(struct rxrpc_connection *, const void *,
+                                    time_t, u32);
+
+/*
+ * ar-output.c
+ */
+extern int rxrpc_resend_timeout;
+
+extern int rxrpc_send_packet(struct rxrpc_transport *, struct sk_buff *);
+extern int rxrpc_client_sendmsg(struct kiocb *, struct rxrpc_sock *,
+                               struct rxrpc_transport *, struct msghdr *,
+                               size_t);
+extern int rxrpc_server_sendmsg(struct kiocb *, struct rxrpc_sock *,
+                               struct msghdr *, size_t);
+
+/*
+ * ar-peer.c
+ */
+extern struct rxrpc_peer *rxrpc_get_peer(struct sockaddr_rxrpc *, gfp_t);
+extern void rxrpc_put_peer(struct rxrpc_peer *);
+extern struct rxrpc_peer *rxrpc_find_peer(struct rxrpc_local *,
+                                         __be32, __be16);
+extern void __exit rxrpc_destroy_all_peers(void);
+
+/*
+ * ar-proc.c
+ */
+extern const char *rxrpc_call_states[];
+extern struct file_operations rxrpc_call_seq_fops;
+extern struct file_operations rxrpc_connection_seq_fops;
+
+/*
+ * ar-recvmsg.c
+ */
+extern void rxrpc_remove_user_ID(struct rxrpc_sock *, struct rxrpc_call *);
+extern int rxrpc_recvmsg(struct kiocb *, struct socket *, struct msghdr *,
+                        size_t, int);
+
+/*
+ * ar-security.c
+ */
+extern int rxrpc_register_security(struct rxrpc_security *);
+extern void rxrpc_unregister_security(struct rxrpc_security *);
+extern int rxrpc_init_client_conn_security(struct rxrpc_connection *);
+extern int rxrpc_init_server_conn_security(struct rxrpc_connection *);
+extern int rxrpc_secure_packet(const struct rxrpc_call *, struct sk_buff *,
+                              size_t, void *);
+extern int rxrpc_verify_packet(const struct rxrpc_call *, struct sk_buff *,
+                              u32 *);
+extern void rxrpc_clear_conn_security(struct rxrpc_connection *);
+
+/*
+ * ar-skbuff.c
+ */
+extern void rxrpc_packet_destructor(struct sk_buff *);
+
+/*
+ * ar-transport.c
+ */
+extern struct rxrpc_transport *rxrpc_get_transport(struct rxrpc_local *,
+                                                  struct rxrpc_peer *,
+                                                  gfp_t);
+extern void rxrpc_put_transport(struct rxrpc_transport *);
+extern void __exit rxrpc_destroy_all_transports(void);
+extern struct rxrpc_transport *rxrpc_find_transport(struct rxrpc_local *,
+                                                   struct rxrpc_peer *);
+
+/*
+ * debug tracing
+ */
+extern unsigned rxrpc_debug;
+
+#define dbgprintk(FMT,...) \
+       printk("[%x%-6.6s] "FMT"\n", smp_processor_id(), current->comm ,##__VA_ARGS__)
+
+/* make sure we maintain the format strings, even when debugging is disabled */
+static inline __attribute__((format(printf,1,2)))
+void _dbprintk(const char *fmt, ...)
+{
+}
+
+#define kenter(FMT,...)        dbgprintk("==> %s("FMT")",__FUNCTION__ ,##__VA_ARGS__)
+#define kleave(FMT,...)        dbgprintk("<== %s()"FMT"",__FUNCTION__ ,##__VA_ARGS__)
+#define kdebug(FMT,...)        dbgprintk("    "FMT ,##__VA_ARGS__)
+#define kproto(FMT,...)        dbgprintk("### "FMT ,##__VA_ARGS__)
+#define knet(FMT,...)  dbgprintk("@@@ "FMT ,##__VA_ARGS__)
+
+
+#if defined(__KDEBUG)
+#define _enter(FMT,...)        kenter(FMT,##__VA_ARGS__)
+#define _leave(FMT,...)        kleave(FMT,##__VA_ARGS__)
+#define _debug(FMT,...)        kdebug(FMT,##__VA_ARGS__)
+#define _proto(FMT,...)        kproto(FMT,##__VA_ARGS__)
+#define _net(FMT,...)  knet(FMT,##__VA_ARGS__)
+
+#elif defined(CONFIG_AF_RXRPC_DEBUG)
+#define RXRPC_DEBUG_KENTER     0x01
+#define RXRPC_DEBUG_KLEAVE     0x02
+#define RXRPC_DEBUG_KDEBUG     0x04
+#define RXRPC_DEBUG_KPROTO     0x08
+#define RXRPC_DEBUG_KNET       0x10
+
+#define _enter(FMT,...)                                        \
+do {                                                   \
+       if (unlikely(rxrpc_debug & RXRPC_DEBUG_KENTER)) \
+               kenter(FMT,##__VA_ARGS__);              \
+} while (0)
+
+#define _leave(FMT,...)                                        \
+do {                                                   \
+       if (unlikely(rxrpc_debug & RXRPC_DEBUG_KLEAVE)) \
+               kleave(FMT,##__VA_ARGS__);              \
+} while (0)
+
+#define _debug(FMT,...)                                        \
+do {                                                   \
+       if (unlikely(rxrpc_debug & RXRPC_DEBUG_KDEBUG)) \
+               kdebug(FMT,##__VA_ARGS__);              \
+} while (0)
+
+#define _proto(FMT,...)                                        \
+do {                                                   \
+       if (unlikely(rxrpc_debug & RXRPC_DEBUG_KPROTO)) \
+               kproto(FMT,##__VA_ARGS__);              \
+} while (0)
+
+#define _net(FMT,...)                                  \
+do {                                                   \
+       if (unlikely(rxrpc_debug & RXRPC_DEBUG_KNET))   \
+               knet(FMT,##__VA_ARGS__);                \
+} while (0)
+
+#else
+#define _enter(FMT,...)        _dbprintk("==> %s("FMT")",__FUNCTION__ ,##__VA_ARGS__)
+#define _leave(FMT,...)        _dbprintk("<== %s()"FMT"",__FUNCTION__ ,##__VA_ARGS__)
+#define _debug(FMT,...)        _dbprintk("    "FMT ,##__VA_ARGS__)
+#define _proto(FMT,...)        _dbprintk("### "FMT ,##__VA_ARGS__)
+#define _net(FMT,...)  _dbprintk("@@@ "FMT ,##__VA_ARGS__)
+#endif
+
+/*
+ * debug assertion checking
+ */
+#if 1 // defined(__KDEBUGALL)
+
+#define ASSERT(X)                                              \
+do {                                                           \
+       if (unlikely(!(X))) {                                   \
+               printk(KERN_ERR "\n");                          \
+               printk(KERN_ERR "RxRPC: Assertion failed\n");   \
+               BUG();                                          \
+       }                                                       \
+} while(0)
+
+#define ASSERTCMP(X, OP, Y)                                            \
+do {                                                                   \
+       if (unlikely(!((X) OP (Y)))) {                                  \
+               printk(KERN_ERR "\n");                                  \
+               printk(KERN_ERR "RxRPC: Assertion failed\n");           \
+               printk(KERN_ERR "%lu " #OP " %lu is false\n",           \
+                      (unsigned long)(X), (unsigned long)(Y));         \
+               printk(KERN_ERR "0x%lx " #OP " 0x%lx is false\n",       \
+                      (unsigned long)(X), (unsigned long)(Y));         \
+               BUG();                                                  \
+       }                                                               \
+} while(0)
+
+#define ASSERTIF(C, X)                                         \
+do {                                                           \
+       if (unlikely((C) && !(X))) {                            \
+               printk(KERN_ERR "\n");                          \
+               printk(KERN_ERR "RxRPC: Assertion failed\n");   \
+               BUG();                                          \
+       }                                                       \
+} while(0)
+
+#define ASSERTIFCMP(C, X, OP, Y)                                       \
+do {                                                                   \
+       if (unlikely((C) && !((X) OP (Y)))) {                           \
+               printk(KERN_ERR "\n");                                  \
+               printk(KERN_ERR "RxRPC: Assertion failed\n");           \
+               printk(KERN_ERR "%lu " #OP " %lu is false\n",           \
+                      (unsigned long)(X), (unsigned long)(Y));         \
+               printk(KERN_ERR "0x%lx " #OP " 0x%lx is false\n",       \
+                      (unsigned long)(X), (unsigned long)(Y));         \
+               BUG();                                                  \
+       }                                                               \
+} while(0)
+
+#else
+
+#define ASSERT(X)                              \
+do {                                           \
+} while(0)
+
+#define ASSERTCMP(X, OP, Y)                    \
+do {                                           \
+} while(0)
+
+#define ASSERTIF(C, X)                         \
+do {                                           \
+} while(0)
+
+#define ASSERTIFCMP(C, X, OP, Y)               \
+do {                                           \
+} while(0)
+
+#endif /* __KDEBUGALL */
+
+/*
+ * socket buffer accounting / leak finding
+ */
+static inline void __rxrpc_new_skb(struct sk_buff *skb, const char *fn)
+{
+       //_net("new skb %p %s [%d]", skb, fn, atomic_read(&rxrpc_n_skbs));
+       //atomic_inc(&rxrpc_n_skbs);
+}
+
+#define rxrpc_new_skb(skb) __rxrpc_new_skb((skb), __func__)
+
+static inline void __rxrpc_kill_skb(struct sk_buff *skb, const char *fn)
+{
+       //_net("kill skb %p %s [%d]", skb, fn, atomic_read(&rxrpc_n_skbs));
+       //atomic_dec(&rxrpc_n_skbs);
+}
+
+#define rxrpc_kill_skb(skb) __rxrpc_kill_skb((skb), __func__)
+
+static inline void __rxrpc_free_skb(struct sk_buff *skb, const char *fn)
+{
+       if (skb) {
+               CHECK_SLAB_OKAY(&skb->users);
+               //_net("free skb %p %s [%d]",
+               //     skb, fn, atomic_read(&rxrpc_n_skbs));
+               //atomic_dec(&rxrpc_n_skbs);
+               kfree_skb(skb);
+       }
+}
+
+#define rxrpc_free_skb(skb) __rxrpc_free_skb((skb), __func__)
+
+static inline void rxrpc_purge_queue(struct sk_buff_head *list)
+{
+       struct sk_buff *skb;
+       while ((skb = skb_dequeue((list))) != NULL)
+               rxrpc_free_skb(skb);
+}
+
+static inline void __rxrpc_get_local(struct rxrpc_local *local, const char *f)
+{
+       CHECK_SLAB_OKAY(&local->usage);
+       if (atomic_inc_return(&local->usage) == 1)
+               printk("resurrected (%s)\n", f);
+}
+
+#define rxrpc_get_local(LOCAL) __rxrpc_get_local((LOCAL), __func__)
+
+#define rxrpc_get_call(CALL)                           \
+do {                                                   \
+       CHECK_SLAB_OKAY(&(CALL)->usage);                \
+       if (atomic_inc_return(&(CALL)->usage) == 1)     \
+               BUG();                                  \
+} while(0)
+
+#define rxrpc_put_call(CALL)                           \
+do {                                                   \
+       __rxrpc_put_call(CALL);                         \
+} while(0)
diff --git a/net/rxrpc/ar-key.c b/net/rxrpc/ar-key.c
new file mode 100644 (file)
index 0000000..7e049ff
--- /dev/null
@@ -0,0 +1,334 @@
+/* RxRPC key management
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * RxRPC keys should have a description of describing their purpose:
+ *     "afs@CAMBRIDGE.REDHAT.COM>
+ */
+
+#include <linux/module.h>
+#include <linux/net.h>
+#include <linux/skbuff.h>
+#include <linux/key.h>
+#include <linux/crypto.h>
+#include <net/sock.h>
+#include <net/af_rxrpc.h>
+#include <keys/rxrpc-type.h>
+#include <keys/user-type.h>
+#include "ar-internal.h"
+
+static int rxrpc_instantiate(struct key *, const void *, size_t);
+static int rxrpc_instantiate_s(struct key *, const void *, size_t);
+static void rxrpc_destroy(struct key *);
+static void rxrpc_destroy_s(struct key *);
+static void rxrpc_describe(const struct key *, struct seq_file *);
+
+/*
+ * rxrpc defined keys take an arbitrary string as the description and an
+ * arbitrary blob of data as the payload
+ */
+struct key_type key_type_rxrpc = {
+       .name           = "rxrpc",
+       .instantiate    = rxrpc_instantiate,
+       .match          = user_match,
+       .destroy        = rxrpc_destroy,
+       .describe       = rxrpc_describe,
+};
+
+EXPORT_SYMBOL(key_type_rxrpc);
+
+/*
+ * rxrpc server defined keys take "<serviceId>:<securityIndex>" as the
+ * description and an 8-byte decryption key as the payload
+ */
+struct key_type key_type_rxrpc_s = {
+       .name           = "rxrpc_s",
+       .instantiate    = rxrpc_instantiate_s,
+       .match          = user_match,
+       .destroy        = rxrpc_destroy_s,
+       .describe       = rxrpc_describe,
+};
+
+/*
+ * instantiate an rxrpc defined key
+ * data should be of the form:
+ *     OFFSET  LEN     CONTENT
+ *     0       4       key interface version number
+ *     4       2       security index (type)
+ *     6       2       ticket length
+ *     8       4       key expiry time (time_t)
+ *     12      4       kvno
+ *     16      8       session key
+ *     24      [len]   ticket
+ *
+ * if no data is provided, then a no-security key is made
+ */
+static int rxrpc_instantiate(struct key *key, const void *data, size_t datalen)
+{
+       const struct rxkad_key *tsec;
+       struct rxrpc_key_payload *upayload;
+       size_t plen;
+       u32 kver;
+       int ret;
+
+       _enter("{%x},,%zu", key_serial(key), datalen);
+
+       /* handle a no-security key */
+       if (!data && datalen == 0)
+               return 0;
+
+       /* get the key interface version number */
+       ret = -EINVAL;
+       if (datalen <= 4 || !data)
+               goto error;
+       memcpy(&kver, data, sizeof(kver));
+       data += sizeof(kver);
+       datalen -= sizeof(kver);
+
+       _debug("KEY I/F VERSION: %u", kver);
+
+       ret = -EKEYREJECTED;
+       if (kver != 1)
+               goto error;
+
+       /* deal with a version 1 key */
+       ret = -EINVAL;
+       if (datalen < sizeof(*tsec))
+               goto error;
+
+       tsec = data;
+       if (datalen != sizeof(*tsec) + tsec->ticket_len)
+               goto error;
+
+       _debug("SCIX: %u", tsec->security_index);
+       _debug("TLEN: %u", tsec->ticket_len);
+       _debug("EXPY: %x", tsec->expiry);
+       _debug("KVNO: %u", tsec->kvno);
+       _debug("SKEY: %02x%02x%02x%02x%02x%02x%02x%02x",
+              tsec->session_key[0], tsec->session_key[1],
+              tsec->session_key[2], tsec->session_key[3],
+              tsec->session_key[4], tsec->session_key[5],
+              tsec->session_key[6], tsec->session_key[7]);
+       if (tsec->ticket_len >= 8)
+               _debug("TCKT: %02x%02x%02x%02x%02x%02x%02x%02x",
+                      tsec->ticket[0], tsec->ticket[1],
+                      tsec->ticket[2], tsec->ticket[3],
+                      tsec->ticket[4], tsec->ticket[5],
+                      tsec->ticket[6], tsec->ticket[7]);
+
+       ret = -EPROTONOSUPPORT;
+       if (tsec->security_index != 2)
+               goto error;
+
+       key->type_data.x[0] = tsec->security_index;
+
+       plen = sizeof(*upayload) + tsec->ticket_len;
+       ret = key_payload_reserve(key, plen);
+       if (ret < 0)
+               goto error;
+
+       ret = -ENOMEM;
+       upayload = kmalloc(plen, GFP_KERNEL);
+       if (!upayload)
+               goto error;
+
+       /* attach the data */
+       memcpy(&upayload->k, tsec, sizeof(*tsec));
+       memcpy(&upayload->k.ticket, (void *)tsec + sizeof(*tsec),
+              tsec->ticket_len);
+       key->payload.data = upayload;
+       key->expiry = tsec->expiry;
+       ret = 0;
+
+error:
+       return ret;
+}
+
+/*
+ * instantiate a server secret key
+ * data should be a pointer to the 8-byte secret key
+ */
+static int rxrpc_instantiate_s(struct key *key, const void *data,
+                              size_t datalen)
+{
+       struct crypto_blkcipher *ci;
+
+       _enter("{%x},,%zu", key_serial(key), datalen);
+
+       if (datalen != 8)
+               return -EINVAL;
+
+       memcpy(&key->type_data, data, 8);
+
+       ci = crypto_alloc_blkcipher("pcbc(des)", 0, CRYPTO_ALG_ASYNC);
+       if (IS_ERR(ci)) {
+               _leave(" = %ld", PTR_ERR(ci));
+               return PTR_ERR(ci);
+       }
+
+       if (crypto_blkcipher_setkey(ci, data, 8) < 0)
+               BUG();
+
+       key->payload.data = ci;
+       _leave(" = 0");
+       return 0;
+}
+
+/*
+ * dispose of the data dangling from the corpse of a rxrpc key
+ */
+static void rxrpc_destroy(struct key *key)
+{
+       kfree(key->payload.data);
+}
+
+/*
+ * dispose of the data dangling from the corpse of a rxrpc key
+ */
+static void rxrpc_destroy_s(struct key *key)
+{
+       if (key->payload.data) {
+               crypto_free_blkcipher(key->payload.data);
+               key->payload.data = NULL;
+       }
+}
+
+/*
+ * describe the rxrpc key
+ */
+static void rxrpc_describe(const struct key *key, struct seq_file *m)
+{
+       seq_puts(m, key->description);
+}
+
+/*
+ * grab the security key for a socket
+ */
+int rxrpc_request_key(struct rxrpc_sock *rx, char __user *optval, int optlen)
+{
+       struct key *key;
+       char *description;
+
+       _enter("");
+
+       if (optlen <= 0 || optlen > PAGE_SIZE - 1)
+               return -EINVAL;
+
+       description = kmalloc(optlen + 1, GFP_KERNEL);
+       if (!description)
+               return -ENOMEM;
+
+       if (copy_from_user(description, optval, optlen)) {
+               kfree(description);
+               return -EFAULT;
+       }
+       description[optlen] = 0;
+
+       key = request_key(&key_type_rxrpc, description, NULL);
+       if (IS_ERR(key)) {
+               kfree(description);
+               _leave(" = %ld", PTR_ERR(key));
+               return PTR_ERR(key);
+       }
+
+       rx->key = key;
+       kfree(description);
+       _leave(" = 0 [key %x]", key->serial);
+       return 0;
+}
+
+/*
+ * grab the security keyring for a server socket
+ */
+int rxrpc_server_keyring(struct rxrpc_sock *rx, char __user *optval,
+                        int optlen)
+{
+       struct key *key;
+       char *description;
+
+       _enter("");
+
+       if (optlen <= 0 || optlen > PAGE_SIZE - 1)
+               return -EINVAL;
+
+       description = kmalloc(optlen + 1, GFP_KERNEL);
+       if (!description)
+               return -ENOMEM;
+
+       if (copy_from_user(description, optval, optlen)) {
+               kfree(description);
+               return -EFAULT;
+       }
+       description[optlen] = 0;
+
+       key = request_key(&key_type_keyring, description, NULL);
+       if (IS_ERR(key)) {
+               kfree(description);
+               _leave(" = %ld", PTR_ERR(key));
+               return PTR_ERR(key);
+       }
+
+       rx->securities = key;
+       kfree(description);
+       _leave(" = 0 [key %x]", key->serial);
+       return 0;
+}
+
+/*
+ * generate a server data key
+ */
+int rxrpc_get_server_data_key(struct rxrpc_connection *conn,
+                             const void *session_key,
+                             time_t expiry,
+                             u32 kvno)
+{
+       struct key *key;
+       int ret;
+
+       struct {
+               u32 kver;
+               struct rxkad_key tsec;
+       } data;
+
+       _enter("");
+
+       key = key_alloc(&key_type_rxrpc, "x", 0, 0, current, 0,
+                       KEY_ALLOC_NOT_IN_QUOTA);
+       if (IS_ERR(key)) {
+               _leave(" = -ENOMEM [alloc %ld]", PTR_ERR(key));
+               return -ENOMEM;
+       }
+
+       _debug("key %d", key_serial(key));
+
+       data.kver = 1;
+       data.tsec.security_index = 2;
+       data.tsec.ticket_len = 0;
+       data.tsec.expiry = expiry;
+       data.tsec.kvno = 0;
+
+       memcpy(&data.tsec.session_key, session_key,
+              sizeof(data.tsec.session_key));
+
+       ret = key_instantiate_and_link(key, &data, sizeof(data), NULL, NULL);
+       if (ret < 0)
+               goto error;
+
+       conn->key = key;
+       _leave(" = 0 [%d]", key_serial(key));
+       return 0;
+
+error:
+       key_revoke(key);
+       key_put(key);
+       _leave(" = -ENOMEM [ins %d]", ret);
+       return -ENOMEM;
+}
+
+EXPORT_SYMBOL(rxrpc_get_server_data_key);
diff --git a/net/rxrpc/ar-local.c b/net/rxrpc/ar-local.c
new file mode 100644 (file)
index 0000000..fe03f71
--- /dev/null
@@ -0,0 +1,309 @@
+/* AF_RXRPC local endpoint management
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/net.h>
+#include <linux/skbuff.h>
+#include <net/sock.h>
+#include <net/af_rxrpc.h>
+#include "ar-internal.h"
+
+static LIST_HEAD(rxrpc_locals);
+DEFINE_RWLOCK(rxrpc_local_lock);
+static DECLARE_RWSEM(rxrpc_local_sem);
+static DECLARE_WAIT_QUEUE_HEAD(rxrpc_local_wq);
+
+static void rxrpc_destroy_local(struct work_struct *work);
+
+/*
+ * allocate a new local
+ */
+static
+struct rxrpc_local *rxrpc_alloc_local(struct sockaddr_rxrpc *srx)
+{
+       struct rxrpc_local *local;
+
+       local = kzalloc(sizeof(struct rxrpc_local), GFP_KERNEL);
+       if (local) {
+               INIT_WORK(&local->destroyer, &rxrpc_destroy_local);
+               INIT_WORK(&local->acceptor, &rxrpc_accept_incoming_calls);
+               INIT_WORK(&local->rejecter, &rxrpc_reject_packets);
+               INIT_LIST_HEAD(&local->services);
+               INIT_LIST_HEAD(&local->link);
+               init_rwsem(&local->defrag_sem);
+               skb_queue_head_init(&local->accept_queue);
+               skb_queue_head_init(&local->reject_queue);
+               spin_lock_init(&local->lock);
+               rwlock_init(&local->services_lock);
+               atomic_set(&local->usage, 1);
+               local->debug_id = atomic_inc_return(&rxrpc_debug_id);
+               memcpy(&local->srx, srx, sizeof(*srx));
+       }
+
+       _leave(" = %p", local);
+       return local;
+}
+
+/*
+ * create the local socket
+ * - must be called with rxrpc_local_sem writelocked
+ */
+static int rxrpc_create_local(struct rxrpc_local *local)
+{
+       struct sock *sock;
+       int ret, opt;
+
+       _enter("%p{%d}", local, local->srx.transport_type);
+
+       /* create a socket to represent the local endpoint */
+       ret = sock_create_kern(PF_INET, local->srx.transport_type, IPPROTO_UDP,
+                              &local->socket);
+       if (ret < 0) {
+               _leave(" = %d [socket]", ret);
+               return ret;
+       }
+
+       /* if a local address was supplied then bind it */
+       if (local->srx.transport_len > sizeof(sa_family_t)) {
+               _debug("bind");
+               ret = kernel_bind(local->socket,
+                                 (struct sockaddr *) &local->srx.transport,
+                                 local->srx.transport_len);
+               if (ret < 0) {
+                       _debug("bind failed");
+                       goto error;
+               }
+       }
+
+       /* we want to receive ICMP errors */
+       opt = 1;
+       ret = kernel_setsockopt(local->socket, SOL_IP, IP_RECVERR,
+                               (char *) &opt, sizeof(opt));
+       if (ret < 0) {
+               _debug("setsockopt failed");
+               goto error;
+       }
+
+       /* we want to set the don't fragment bit */
+       opt = IP_PMTUDISC_DO;
+       ret = kernel_setsockopt(local->socket, SOL_IP, IP_MTU_DISCOVER,
+                               (char *) &opt, sizeof(opt));
+       if (ret < 0) {
+               _debug("setsockopt failed");
+               goto error;
+       }
+
+       write_lock_bh(&rxrpc_local_lock);
+       list_add(&local->link, &rxrpc_locals);
+       write_unlock_bh(&rxrpc_local_lock);
+
+       /* set the socket up */
+       sock = local->socket->sk;
+       sock->sk_user_data      = local;
+       sock->sk_data_ready     = rxrpc_data_ready;
+       sock->sk_error_report   = rxrpc_UDP_error_report;
+       _leave(" = 0");
+       return 0;
+
+error:
+       local->socket->ops->shutdown(local->socket, 2);
+       local->socket->sk->sk_user_data = NULL;
+       sock_release(local->socket);
+       local->socket = NULL;
+
+       _leave(" = %d", ret);
+       return ret;
+}
+
+/*
+ * create a new local endpoint using the specified UDP address
+ */
+struct rxrpc_local *rxrpc_lookup_local(struct sockaddr_rxrpc *srx)
+{
+       struct rxrpc_local *local;
+       int ret;
+
+       _enter("{%d,%u,%u.%u.%u.%u+%hu}",
+              srx->transport_type,
+              srx->transport.family,
+              NIPQUAD(srx->transport.sin.sin_addr),
+              ntohs(srx->transport.sin.sin_port));
+
+       down_write(&rxrpc_local_sem);
+
+       /* see if we have a suitable local local endpoint already */
+       read_lock_bh(&rxrpc_local_lock);
+
+       list_for_each_entry(local, &rxrpc_locals, link) {
+               _debug("CMP {%d,%u,%u.%u.%u.%u+%hu}",
+                      local->srx.transport_type,
+                      local->srx.transport.family,
+                      NIPQUAD(local->srx.transport.sin.sin_addr),
+                      ntohs(local->srx.transport.sin.sin_port));
+
+               if (local->srx.transport_type != srx->transport_type ||
+                   local->srx.transport.family != srx->transport.family)
+                       continue;
+
+               switch (srx->transport.family) {
+               case AF_INET:
+                       if (local->srx.transport.sin.sin_port !=
+                           srx->transport.sin.sin_port)
+                               continue;
+                       if (memcmp(&local->srx.transport.sin.sin_addr,
+                                  &srx->transport.sin.sin_addr,
+                                  sizeof(struct in_addr)) != 0)
+                               continue;
+                       goto found_local;
+
+               default:
+                       BUG();
+               }
+       }
+
+       read_unlock_bh(&rxrpc_local_lock);
+
+       /* we didn't find one, so we need to create one */
+       local = rxrpc_alloc_local(srx);
+       if (!local) {
+               up_write(&rxrpc_local_sem);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       ret = rxrpc_create_local(local);
+       if (ret < 0) {
+               up_write(&rxrpc_local_sem);
+               kfree(local);
+               _leave(" = %d", ret);
+               return ERR_PTR(ret);
+       }
+
+       up_write(&rxrpc_local_sem);
+
+       _net("LOCAL new %d {%d,%u,%u.%u.%u.%u+%hu}",
+            local->debug_id,
+            local->srx.transport_type,
+            local->srx.transport.family,
+            NIPQUAD(local->srx.transport.sin.sin_addr),
+            ntohs(local->srx.transport.sin.sin_port));
+
+       _leave(" = %p [new]", local);
+       return local;
+
+found_local:
+       rxrpc_get_local(local);
+       read_unlock_bh(&rxrpc_local_lock);
+       up_write(&rxrpc_local_sem);
+
+       _net("LOCAL old %d {%d,%u,%u.%u.%u.%u+%hu}",
+            local->debug_id,
+            local->srx.transport_type,
+            local->srx.transport.family,
+            NIPQUAD(local->srx.transport.sin.sin_addr),
+            ntohs(local->srx.transport.sin.sin_port));
+
+       _leave(" = %p [reuse]", local);
+       return local;
+}
+
+/*
+ * release a local endpoint
+ */
+void rxrpc_put_local(struct rxrpc_local *local)
+{
+       _enter("%p{u=%d}", local, atomic_read(&local->usage));
+
+       ASSERTCMP(atomic_read(&local->usage), >, 0);
+
+       /* to prevent a race, the decrement and the dequeue must be effectively
+        * atomic */
+       write_lock_bh(&rxrpc_local_lock);
+       if (unlikely(atomic_dec_and_test(&local->usage))) {
+               _debug("destroy local");
+               rxrpc_queue_work(&local->destroyer);
+       }
+       write_unlock_bh(&rxrpc_local_lock);
+       _leave("");
+}
+
+/*
+ * destroy a local endpoint
+ */
+static void rxrpc_destroy_local(struct work_struct *work)
+{
+       struct rxrpc_local *local =
+               container_of(work, struct rxrpc_local, destroyer);
+
+       _enter("%p{%d}", local, atomic_read(&local->usage));
+
+       down_write(&rxrpc_local_sem);
+
+       write_lock_bh(&rxrpc_local_lock);
+       if (atomic_read(&local->usage) > 0) {
+               write_unlock_bh(&rxrpc_local_lock);
+               up_read(&rxrpc_local_sem);
+               _leave(" [resurrected]");
+               return;
+       }
+
+       list_del(&local->link);
+       local->socket->sk->sk_user_data = NULL;
+       write_unlock_bh(&rxrpc_local_lock);
+
+       downgrade_write(&rxrpc_local_sem);
+
+       ASSERT(list_empty(&local->services));
+       ASSERT(!work_pending(&local->acceptor));
+       ASSERT(!work_pending(&local->rejecter));
+
+       /* finish cleaning up the local descriptor */
+       rxrpc_purge_queue(&local->accept_queue);
+       rxrpc_purge_queue(&local->reject_queue);
+       local->socket->ops->shutdown(local->socket, 2);
+       sock_release(local->socket);
+
+       up_read(&rxrpc_local_sem);
+
+       _net("DESTROY LOCAL %d", local->debug_id);
+       kfree(local);
+
+       if (list_empty(&rxrpc_locals))
+               wake_up_all(&rxrpc_local_wq);
+
+       _leave("");
+}
+
+/*
+ * preemptively destroy all local local endpoint rather than waiting for
+ * them to be destroyed
+ */
+void __exit rxrpc_destroy_all_locals(void)
+{
+       DECLARE_WAITQUEUE(myself,current);
+
+       _enter("");
+
+       /* we simply have to wait for them to go away */
+       if (!list_empty(&rxrpc_locals)) {
+               set_current_state(TASK_UNINTERRUPTIBLE);
+               add_wait_queue(&rxrpc_local_wq, &myself);
+
+               while (!list_empty(&rxrpc_locals)) {
+                       schedule();
+                       set_current_state(TASK_UNINTERRUPTIBLE);
+               }
+
+               remove_wait_queue(&rxrpc_local_wq, &myself);
+               set_current_state(TASK_RUNNING);
+       }
+
+       _leave("");
+}
diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c
new file mode 100644 (file)
index 0000000..5cdde4a
--- /dev/null
@@ -0,0 +1,734 @@
+/* RxRPC packet transmission
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/net.h>
+#include <linux/skbuff.h>
+#include <linux/circ_buf.h>
+#include <net/sock.h>
+#include <net/af_rxrpc.h>
+#include "ar-internal.h"
+
+int rxrpc_resend_timeout = 4;
+
+static int rxrpc_send_data(struct kiocb *iocb,
+                          struct rxrpc_sock *rx,
+                          struct rxrpc_call *call,
+                          struct msghdr *msg, size_t len);
+
+/*
+ * extract control messages from the sendmsg() control buffer
+ */
+static int rxrpc_sendmsg_cmsg(struct rxrpc_sock *rx, struct msghdr *msg,
+                             unsigned long *user_call_ID,
+                             enum rxrpc_command *command,
+                             u32 *abort_code,
+                             bool server)
+{
+       struct cmsghdr *cmsg;
+       int len;
+
+       *command = RXRPC_CMD_SEND_DATA;
+
+       if (msg->msg_controllen == 0)
+               return -EINVAL;
+
+       for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
+               if (!CMSG_OK(msg, cmsg))
+                       return -EINVAL;
+
+               len = cmsg->cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr));
+               _debug("CMSG %d, %d, %d",
+                      cmsg->cmsg_level, cmsg->cmsg_type, len);
+
+               if (cmsg->cmsg_level != SOL_RXRPC)
+                       continue;
+
+               switch (cmsg->cmsg_type) {
+               case RXRPC_USER_CALL_ID:
+                       if (msg->msg_flags & MSG_CMSG_COMPAT) {
+                               if (len != sizeof(u32))
+                                       return -EINVAL;
+                               *user_call_ID = *(u32 *) CMSG_DATA(cmsg);
+                       } else {
+                               if (len != sizeof(unsigned long))
+                                       return -EINVAL;
+                               *user_call_ID = *(unsigned long *)
+                                       CMSG_DATA(cmsg);
+                       }
+                       _debug("User Call ID %lx", *user_call_ID);
+                       break;
+
+               case RXRPC_ABORT:
+                       if (*command != RXRPC_CMD_SEND_DATA)
+                               return -EINVAL;
+                       *command = RXRPC_CMD_SEND_ABORT;
+                       if (len != sizeof(*abort_code))
+                               return -EINVAL;
+                       *abort_code = *(unsigned int *) CMSG_DATA(cmsg);
+                       _debug("Abort %x", *abort_code);
+                       if (*abort_code == 0)
+                               return -EINVAL;
+                       break;
+
+               case RXRPC_ACCEPT:
+                       if (*command != RXRPC_CMD_SEND_DATA)
+                               return -EINVAL;
+                       *command = RXRPC_CMD_ACCEPT;
+                       if (len != 0)
+                               return -EINVAL;
+                       if (!server)
+                               return -EISCONN;
+                       break;
+
+               default:
+                       return -EINVAL;
+               }
+       }
+
+       _leave(" = 0");
+       return 0;
+}
+
+/*
+ * abort a call, sending an ABORT packet to the peer
+ */
+static void rxrpc_send_abort(struct rxrpc_call *call, u32 abort_code)
+{
+       write_lock_bh(&call->state_lock);
+
+       if (call->state <= RXRPC_CALL_COMPLETE) {
+               call->state = RXRPC_CALL_LOCALLY_ABORTED;
+               call->abort_code = abort_code;
+               set_bit(RXRPC_CALL_ABORT, &call->events);
+               del_timer_sync(&call->resend_timer);
+               del_timer_sync(&call->ack_timer);
+               clear_bit(RXRPC_CALL_RESEND_TIMER, &call->events);
+               clear_bit(RXRPC_CALL_ACK, &call->events);
+               clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
+               rxrpc_queue_call(call);
+       }
+
+       write_unlock_bh(&call->state_lock);
+}
+
+/*
+ * send a message forming part of a client call through an RxRPC socket
+ * - caller holds the socket locked
+ * - the socket may be either a client socket or a server socket
+ */
+int rxrpc_client_sendmsg(struct kiocb *iocb, struct rxrpc_sock *rx,
+                        struct rxrpc_transport *trans, struct msghdr *msg,
+                        size_t len)
+{
+       struct rxrpc_conn_bundle *bundle;
+       enum rxrpc_command cmd;
+       struct rxrpc_call *call;
+       unsigned long user_call_ID = 0;
+       struct key *key;
+       __be16 service_id;
+       u32 abort_code = 0;
+       int ret;
+
+       _enter("");
+
+       ASSERT(trans != NULL);
+
+       ret = rxrpc_sendmsg_cmsg(rx, msg, &user_call_ID, &cmd, &abort_code,
+                                false);
+       if (ret < 0)
+               return ret;
+
+       bundle = NULL;
+       if (trans) {
+               service_id = rx->service_id;
+               if (msg->msg_name) {
+                       struct sockaddr_rxrpc *srx =
+                               (struct sockaddr_rxrpc *) msg->msg_name;
+                       service_id = htons(srx->srx_service);
+               }
+               key = rx->key;
+               if (key && !rx->key->payload.data)
+                       key = NULL;
+               bundle = rxrpc_get_bundle(rx, trans, key, service_id,
+                                         GFP_KERNEL);
+               if (IS_ERR(bundle))
+                       return PTR_ERR(bundle);
+       }
+
+       call = rxrpc_get_client_call(rx, trans, bundle, user_call_ID,
+                                    abort_code == 0, GFP_KERNEL);
+       if (trans)
+               rxrpc_put_bundle(trans, bundle);
+       if (IS_ERR(call)) {
+               _leave(" = %ld", PTR_ERR(call));
+               return PTR_ERR(call);
+       }
+
+       _debug("CALL %d USR %lx ST %d on CONN %p",
+              call->debug_id, call->user_call_ID, call->state, call->conn);
+
+       if (call->state >= RXRPC_CALL_COMPLETE) {
+               /* it's too late for this call */
+               ret = -ESHUTDOWN;
+       } else if (cmd == RXRPC_CMD_SEND_ABORT) {
+               rxrpc_send_abort(call, abort_code);
+       } else if (cmd != RXRPC_CMD_SEND_DATA) {
+               ret = -EINVAL;
+       } else if (call->state != RXRPC_CALL_CLIENT_SEND_REQUEST) {
+               /* request phase complete for this client call */
+               ret = -EPROTO;
+       } else {
+               ret = rxrpc_send_data(iocb, rx, call, msg, len);
+       }
+
+       rxrpc_put_call(call);
+       _leave(" = %d", ret);
+       return ret;
+}
+
+/**
+ * rxrpc_kernel_send_data - Allow a kernel service to send data on a call
+ * @call: The call to send data through
+ * @msg: The data to send
+ * @len: The amount of data to send
+ *
+ * Allow a kernel service to send data on a call.  The call must be in an state
+ * appropriate to sending data.  No control data should be supplied in @msg,
+ * nor should an address be supplied.  MSG_MORE should be flagged if there's
+ * more data to come, otherwise this data will end the transmission phase.
+ */
+int rxrpc_kernel_send_data(struct rxrpc_call *call, struct msghdr *msg,
+                          size_t len)
+{
+       int ret;
+
+       _enter("{%d,%s},", call->debug_id, rxrpc_call_states[call->state]);
+
+       ASSERTCMP(msg->msg_name, ==, NULL);
+       ASSERTCMP(msg->msg_control, ==, NULL);
+
+       lock_sock(&call->socket->sk);
+
+       _debug("CALL %d USR %lx ST %d on CONN %p",
+              call->debug_id, call->user_call_ID, call->state, call->conn);
+
+       if (call->state >= RXRPC_CALL_COMPLETE) {
+               ret = -ESHUTDOWN; /* it's too late for this call */
+       } else if (call->state != RXRPC_CALL_CLIENT_SEND_REQUEST &&
+                  call->state != RXRPC_CALL_SERVER_ACK_REQUEST &&
+                  call->state != RXRPC_CALL_SERVER_SEND_REPLY) {
+               ret = -EPROTO; /* request phase complete for this client call */
+       } else {
+               mm_segment_t oldfs = get_fs();
+               set_fs(KERNEL_DS);
+               ret = rxrpc_send_data(NULL, call->socket, call, msg, len);
+               set_fs(oldfs);
+       }
+
+       release_sock(&call->socket->sk);
+       _leave(" = %d", ret);
+       return ret;
+}
+
+EXPORT_SYMBOL(rxrpc_kernel_send_data);
+
+/*
+ * rxrpc_kernel_abort_call - Allow a kernel service to abort a call
+ * @call: The call to be aborted
+ * @abort_code: The abort code to stick into the ABORT packet
+ *
+ * Allow a kernel service to abort a call, if it's still in an abortable state.
+ */
+void rxrpc_kernel_abort_call(struct rxrpc_call *call, u32 abort_code)
+{
+       _enter("{%d},%d", call->debug_id, abort_code);
+
+       lock_sock(&call->socket->sk);
+
+       _debug("CALL %d USR %lx ST %d on CONN %p",
+              call->debug_id, call->user_call_ID, call->state, call->conn);
+
+       if (call->state < RXRPC_CALL_COMPLETE)
+               rxrpc_send_abort(call, abort_code);
+
+       release_sock(&call->socket->sk);
+       _leave("");
+}
+
+EXPORT_SYMBOL(rxrpc_kernel_abort_call);
+
+/*
+ * send a message through a server socket
+ * - caller holds the socket locked
+ */
+int rxrpc_server_sendmsg(struct kiocb *iocb, struct rxrpc_sock *rx,
+                        struct msghdr *msg, size_t len)
+{
+       enum rxrpc_command cmd;
+       struct rxrpc_call *call;
+       unsigned long user_call_ID = 0;
+       u32 abort_code = 0;
+       int ret;
+
+       _enter("");
+
+       ret = rxrpc_sendmsg_cmsg(rx, msg, &user_call_ID, &cmd, &abort_code,
+                                true);
+       if (ret < 0)
+               return ret;
+
+       if (cmd == RXRPC_CMD_ACCEPT) {
+               call = rxrpc_accept_call(rx, user_call_ID);
+               if (IS_ERR(call))
+                       return PTR_ERR(call);
+               rxrpc_put_call(call);
+               return 0;
+       }
+
+       call = rxrpc_find_server_call(rx, user_call_ID);
+       if (!call)
+               return -EBADSLT;
+       if (call->state >= RXRPC_CALL_COMPLETE) {
+               ret = -ESHUTDOWN;
+               goto out;
+       }
+
+       switch (cmd) {
+       case RXRPC_CMD_SEND_DATA:
+               if (call->state != RXRPC_CALL_CLIENT_SEND_REQUEST &&
+                   call->state != RXRPC_CALL_SERVER_ACK_REQUEST &&
+                   call->state != RXRPC_CALL_SERVER_SEND_REPLY) {
+                       /* Tx phase not yet begun for this call */
+                       ret = -EPROTO;
+                       break;
+               }
+
+               ret = rxrpc_send_data(iocb, rx, call, msg, len);
+               break;
+
+       case RXRPC_CMD_SEND_ABORT:
+               rxrpc_send_abort(call, abort_code);
+               break;
+       default:
+               BUG();
+       }
+
+       out:
+       rxrpc_put_call(call);
+       _leave(" = %d", ret);
+       return ret;
+}
+
+/*
+ * send a packet through the transport endpoint
+ */
+int rxrpc_send_packet(struct rxrpc_transport *trans, struct sk_buff *skb)
+{
+       struct kvec iov[1];
+       struct msghdr msg;
+       int ret, opt;
+
+       _enter(",{%d}", skb->len);
+
+       iov[0].iov_base = skb->head;
+       iov[0].iov_len = skb->len;
+
+       msg.msg_name = &trans->peer->srx.transport.sin;
+       msg.msg_namelen = sizeof(trans->peer->srx.transport.sin);
+       msg.msg_control = NULL;
+       msg.msg_controllen = 0;
+       msg.msg_flags = 0;
+
+       /* send the packet with the don't fragment bit set if we currently
+        * think it's small enough */
+       if (skb->len - sizeof(struct rxrpc_header) < trans->peer->maxdata) {
+               down_read(&trans->local->defrag_sem);
+               /* send the packet by UDP
+                * - returns -EMSGSIZE if UDP would have to fragment the packet
+                *   to go out of the interface
+                *   - in which case, we'll have processed the ICMP error
+                *     message and update the peer record
+                */
+               ret = kernel_sendmsg(trans->local->socket, &msg, iov, 1,
+                                    iov[0].iov_len);
+
+               up_read(&trans->local->defrag_sem);
+               if (ret == -EMSGSIZE)
+                       goto send_fragmentable;
+
+               _leave(" = %d [%u]", ret, trans->peer->maxdata);
+               return ret;
+       }
+
+send_fragmentable:
+       /* attempt to send this message with fragmentation enabled */
+       _debug("send fragment");
+
+       down_write(&trans->local->defrag_sem);
+       opt = IP_PMTUDISC_DONT;
+       ret = kernel_setsockopt(trans->local->socket, SOL_IP, IP_MTU_DISCOVER,
+                               (char *) &opt, sizeof(opt));
+       if (ret == 0) {
+               ret = kernel_sendmsg(trans->local->socket, &msg, iov, 1,
+                                    iov[0].iov_len);
+
+               opt = IP_PMTUDISC_DO;
+               kernel_setsockopt(trans->local->socket, SOL_IP,
+                                 IP_MTU_DISCOVER, (char *) &opt, sizeof(opt));
+       }
+
+       up_write(&trans->local->defrag_sem);
+       _leave(" = %d [frag %u]", ret, trans->peer->maxdata);
+       return ret;
+}
+
+/*
+ * wait for space to appear in the transmit/ACK window
+ * - caller holds the socket locked
+ */
+static int rxrpc_wait_for_tx_window(struct rxrpc_sock *rx,
+                                   struct rxrpc_call *call,
+                                   long *timeo)
+{
+       DECLARE_WAITQUEUE(myself, current);
+       int ret;
+
+       _enter(",{%d},%ld",
+              CIRC_SPACE(call->acks_head, call->acks_tail, call->acks_winsz),
+              *timeo);
+
+       add_wait_queue(&call->tx_waitq, &myself);
+
+       for (;;) {
+               set_current_state(TASK_INTERRUPTIBLE);
+               ret = 0;
+               if (CIRC_SPACE(call->acks_head, call->acks_tail,
+                              call->acks_winsz) > 0)
+                       break;
+               if (signal_pending(current)) {
+                       ret = sock_intr_errno(*timeo);
+                       break;
+               }
+
+               release_sock(&rx->sk);
+               *timeo = schedule_timeout(*timeo);
+               lock_sock(&rx->sk);
+       }
+
+       remove_wait_queue(&call->tx_waitq, &myself);
+       set_current_state(TASK_RUNNING);
+       _leave(" = %d", ret);
+       return ret;
+}
+
+/*
+ * attempt to schedule an instant Tx resend
+ */
+static inline void rxrpc_instant_resend(struct rxrpc_call *call)
+{
+       read_lock_bh(&call->state_lock);
+       if (try_to_del_timer_sync(&call->resend_timer) >= 0) {
+               clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
+               if (call->state < RXRPC_CALL_COMPLETE &&
+                   !test_and_set_bit(RXRPC_CALL_RESEND_TIMER, &call->events))
+                       rxrpc_queue_call(call);
+       }
+       read_unlock_bh(&call->state_lock);
+}
+
+/*
+ * queue a packet for transmission, set the resend timer and attempt
+ * to send the packet immediately
+ */
+static void rxrpc_queue_packet(struct rxrpc_call *call, struct sk_buff *skb,
+                              bool last)
+{
+       struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+       int ret;
+
+       _net("queue skb %p [%d]", skb, call->acks_head);
+
+       ASSERT(call->acks_window != NULL);
+       call->acks_window[call->acks_head] = (unsigned long) skb;
+       smp_wmb();
+       call->acks_head = (call->acks_head + 1) & (call->acks_winsz - 1);
+
+       if (last || call->state == RXRPC_CALL_SERVER_ACK_REQUEST) {
+               _debug("________awaiting reply/ACK__________");
+               write_lock_bh(&call->state_lock);
+               switch (call->state) {
+               case RXRPC_CALL_CLIENT_SEND_REQUEST:
+                       call->state = RXRPC_CALL_CLIENT_AWAIT_REPLY;
+                       break;
+               case RXRPC_CALL_SERVER_ACK_REQUEST:
+                       call->state = RXRPC_CALL_SERVER_SEND_REPLY;
+                       if (!last)
+                               break;
+               case RXRPC_CALL_SERVER_SEND_REPLY:
+                       call->state = RXRPC_CALL_SERVER_AWAIT_ACK;
+                       break;
+               default:
+                       break;
+               }
+               write_unlock_bh(&call->state_lock);
+       }
+
+       _proto("Tx DATA %%%u { #%u }",
+              ntohl(sp->hdr.serial), ntohl(sp->hdr.seq));
+
+       sp->need_resend = 0;
+       sp->resend_at = jiffies + rxrpc_resend_timeout * HZ;
+       if (!test_and_set_bit(RXRPC_CALL_RUN_RTIMER, &call->flags)) {
+               _debug("run timer");
+               call->resend_timer.expires = sp->resend_at;
+               add_timer(&call->resend_timer);
+       }
+
+       /* attempt to cancel the rx-ACK timer, deferring reply transmission if
+        * we're ACK'ing the request phase of an incoming call */
+       ret = -EAGAIN;
+       if (try_to_del_timer_sync(&call->ack_timer) >= 0) {
+               /* the packet may be freed by rxrpc_process_call() before this
+                * returns */
+               ret = rxrpc_send_packet(call->conn->trans, skb);
+               _net("sent skb %p", skb);
+       } else {
+               _debug("failed to delete ACK timer");
+       }
+
+       if (ret < 0) {
+               _debug("need instant resend %d", ret);
+               sp->need_resend = 1;
+               rxrpc_instant_resend(call);
+       }
+
+       _leave("");
+}
+
+/*
+ * send data through a socket
+ * - must be called in process context
+ * - caller holds the socket locked
+ */
+static int rxrpc_send_data(struct kiocb *iocb,
+                          struct rxrpc_sock *rx,
+                          struct rxrpc_call *call,
+                          struct msghdr *msg, size_t len)
+{
+       struct rxrpc_skb_priv *sp;
+       unsigned char __user *from;
+       struct sk_buff *skb;
+       struct iovec *iov;
+       struct sock *sk = &rx->sk;
+       long timeo;
+       bool more;
+       int ret, ioc, segment, copied;
+
+       _enter(",,,{%zu},%zu", msg->msg_iovlen, len);
+
+       timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
+
+       /* this should be in poll */
+       clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
+
+       if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
+               return -EPIPE;
+
+       iov = msg->msg_iov;
+       ioc = msg->msg_iovlen - 1;
+       from = iov->iov_base;
+       segment = iov->iov_len;
+       iov++;
+       more = msg->msg_flags & MSG_MORE;
+
+       skb = call->tx_pending;
+       call->tx_pending = NULL;
+
+       copied = 0;
+       do {
+               int copy;
+
+               if (segment > len)
+                       segment = len;
+
+               _debug("SEGMENT %d @%p", segment, from);
+
+               if (!skb) {
+                       size_t size, chunk, max, space;
+
+                       _debug("alloc");
+
+                       if (CIRC_SPACE(call->acks_head, call->acks_tail,
+                                      call->acks_winsz) <= 0) {
+                               ret = -EAGAIN;
+                               if (msg->msg_flags & MSG_DONTWAIT)
+                                       goto maybe_error;
+                               ret = rxrpc_wait_for_tx_window(rx, call,
+                                                              &timeo);
+                               if (ret < 0)
+                                       goto maybe_error;
+                       }
+
+                       max = call->conn->trans->peer->maxdata;
+                       max -= call->conn->security_size;
+                       max &= ~(call->conn->size_align - 1UL);
+
+                       chunk = max;
+                       if (chunk > len)
+                               chunk = len;
+
+                       space = chunk + call->conn->size_align;
+                       space &= ~(call->conn->size_align - 1UL);
+
+                       size = space + call->conn->header_size;
+
+                       _debug("SIZE: %zu/%zu/%zu", chunk, space, size);
+
+                       /* create a buffer that we can retain until it's ACK'd */
+                       skb = sock_alloc_send_skb(
+                               sk, size, msg->msg_flags & MSG_DONTWAIT, &ret);
+                       if (!skb)
+                               goto maybe_error;
+
+                       rxrpc_new_skb(skb);
+
+                       _debug("ALLOC SEND %p", skb);
+
+                       ASSERTCMP(skb->mark, ==, 0);
+
+                       _debug("HS: %u", call->conn->header_size);
+                       skb_reserve(skb, call->conn->header_size);
+                       skb->len += call->conn->header_size;
+
+                       sp = rxrpc_skb(skb);
+                       sp->remain = chunk;
+                       if (sp->remain > skb_tailroom(skb))
+                               sp->remain = skb_tailroom(skb);
+
+                       _net("skb: hr %d, tr %d, hl %d, rm %d",
+                              skb_headroom(skb),
+                              skb_tailroom(skb),
+                              skb_headlen(skb),
+                              sp->remain);
+
+                       skb->ip_summed = CHECKSUM_UNNECESSARY;
+               }
+
+               _debug("append");
+               sp = rxrpc_skb(skb);
+
+               /* append next segment of data to the current buffer */
+               copy = skb_tailroom(skb);
+               ASSERTCMP(copy, >, 0);
+               if (copy > segment)
+                       copy = segment;
+               if (copy > sp->remain)
+                       copy = sp->remain;
+
+               _debug("add");
+               ret = skb_add_data(skb, from, copy);
+               _debug("added");
+               if (ret < 0)
+                       goto efault;
+               sp->remain -= copy;
+               skb->mark += copy;
+
+               len -= copy;
+               segment -= copy;
+               from += copy;
+               while (segment == 0 && ioc > 0) {
+                       from = iov->iov_base;
+                       segment = iov->iov_len;
+                       iov++;
+                       ioc--;
+               }
+               if (len == 0) {
+                       segment = 0;
+                       ioc = 0;
+               }
+
+               /* check for the far side aborting the call or a network error
+                * occurring */
+               if (call->state > RXRPC_CALL_COMPLETE)
+                       goto call_aborted;
+
+               /* add the packet to the send queue if it's now full */
+               if (sp->remain <= 0 || (segment == 0 && !more)) {
+                       struct rxrpc_connection *conn = call->conn;
+                       size_t pad;
+
+                       /* pad out if we're using security */
+                       if (conn->security) {
+                               pad = conn->security_size + skb->mark;
+                               pad = conn->size_align - pad;
+                               pad &= conn->size_align - 1;
+                               _debug("pad %zu", pad);
+                               if (pad)
+                                       memset(skb_put(skb, pad), 0, pad);
+                       }
+
+                       sp->hdr.epoch = conn->epoch;
+                       sp->hdr.cid = call->cid;
+                       sp->hdr.callNumber = call->call_id;
+                       sp->hdr.seq =
+                               htonl(atomic_inc_return(&call->sequence));
+                       sp->hdr.serial =
+                               htonl(atomic_inc_return(&conn->serial));
+                       sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
+                       sp->hdr.userStatus = 0;
+                       sp->hdr.securityIndex = conn->security_ix;
+                       sp->hdr._rsvd = 0;
+                       sp->hdr.serviceId = conn->service_id;
+
+                       sp->hdr.flags = conn->out_clientflag;
+                       if (len == 0 && !more)
+                               sp->hdr.flags |= RXRPC_LAST_PACKET;
+                       else if (CIRC_SPACE(call->acks_head, call->acks_tail,
+                                           call->acks_winsz) > 1)
+                               sp->hdr.flags |= RXRPC_MORE_PACKETS;
+
+                       ret = rxrpc_secure_packet(
+                               call, skb, skb->mark,
+                               skb->head + sizeof(struct rxrpc_header));
+                       if (ret < 0)
+                               goto out;
+
+                       memcpy(skb->head, &sp->hdr,
+                              sizeof(struct rxrpc_header));
+                       rxrpc_queue_packet(call, skb, segment == 0 && !more);
+                       skb = NULL;
+               }
+
+       } while (segment > 0);
+
+out:
+       call->tx_pending = skb;
+       _leave(" = %d", ret);
+       return ret;
+
+call_aborted:
+       rxrpc_free_skb(skb);
+       if (call->state == RXRPC_CALL_NETWORK_ERROR)
+               ret = call->conn->trans->peer->net_error;
+       else
+               ret = -ECONNABORTED;
+       _leave(" = %d", ret);
+       return ret;
+
+maybe_error:
+       if (copied)
+               ret = copied;
+       goto out;
+
+efault:
+       ret = -EFAULT;
+       goto out;
+}
diff --git a/net/rxrpc/ar-peer.c b/net/rxrpc/ar-peer.c
new file mode 100644 (file)
index 0000000..d399de4
--- /dev/null
@@ -0,0 +1,273 @@
+/* RxRPC remote transport endpoint management
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/net.h>
+#include <linux/skbuff.h>
+#include <linux/udp.h>
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <linux/icmp.h>
+#include <net/sock.h>
+#include <net/af_rxrpc.h>
+#include <net/ip.h>
+#include "ar-internal.h"
+
+static LIST_HEAD(rxrpc_peers);
+static DEFINE_RWLOCK(rxrpc_peer_lock);
+static DECLARE_WAIT_QUEUE_HEAD(rxrpc_peer_wq);
+
+static void rxrpc_destroy_peer(struct work_struct *work);
+
+/*
+ * allocate a new peer
+ */
+static struct rxrpc_peer *rxrpc_alloc_peer(struct sockaddr_rxrpc *srx,
+                                          gfp_t gfp)
+{
+       struct rxrpc_peer *peer;
+
+       _enter("");
+
+       peer = kzalloc(sizeof(struct rxrpc_peer), gfp);
+       if (peer) {
+               INIT_WORK(&peer->destroyer, &rxrpc_destroy_peer);
+               INIT_LIST_HEAD(&peer->link);
+               INIT_LIST_HEAD(&peer->error_targets);
+               spin_lock_init(&peer->lock);
+               atomic_set(&peer->usage, 1);
+               peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
+               memcpy(&peer->srx, srx, sizeof(*srx));
+
+               peer->mtu = peer->if_mtu = 65535;
+
+               if (srx->transport.family == AF_INET) {
+                       peer->hdrsize = sizeof(struct iphdr);
+                       switch (srx->transport_type) {
+                       case SOCK_DGRAM:
+                               peer->hdrsize += sizeof(struct udphdr);
+                               break;
+                       default:
+                               BUG();
+                               break;
+                       }
+               } else {
+                       BUG();
+               }
+
+               peer->hdrsize += sizeof(struct rxrpc_header);
+               peer->maxdata = peer->mtu - peer->hdrsize;
+       }
+
+       _leave(" = %p", peer);
+       return peer;
+}
+
+/*
+ * obtain a remote transport endpoint for the specified address
+ */
+struct rxrpc_peer *rxrpc_get_peer(struct sockaddr_rxrpc *srx, gfp_t gfp)
+{
+       struct rxrpc_peer *peer, *candidate;
+       const char *new = "old";
+       int usage;
+
+       _enter("{%d,%d,%u.%u.%u.%u+%hu}",
+              srx->transport_type,
+              srx->transport_len,
+              NIPQUAD(srx->transport.sin.sin_addr),
+              ntohs(srx->transport.sin.sin_port));
+
+       /* search the peer list first */
+       read_lock_bh(&rxrpc_peer_lock);
+       list_for_each_entry(peer, &rxrpc_peers, link) {
+               _debug("check PEER %d { u=%d t=%d l=%d }",
+                      peer->debug_id,
+                      atomic_read(&peer->usage),
+                      peer->srx.transport_type,
+                      peer->srx.transport_len);
+
+               if (atomic_read(&peer->usage) > 0 &&
+                   peer->srx.transport_type == srx->transport_type &&
+                   peer->srx.transport_len == srx->transport_len &&
+                   memcmp(&peer->srx.transport,
+                          &srx->transport,
+                          srx->transport_len) == 0)
+                       goto found_extant_peer;
+       }
+       read_unlock_bh(&rxrpc_peer_lock);
+
+       /* not yet present - create a candidate for a new record and then
+        * redo the search */
+       candidate = rxrpc_alloc_peer(srx, gfp);
+       if (!candidate) {
+               _leave(" = -ENOMEM");
+               return ERR_PTR(-ENOMEM);
+       }
+
+       write_lock_bh(&rxrpc_peer_lock);
+
+       list_for_each_entry(peer, &rxrpc_peers, link) {
+               if (atomic_read(&peer->usage) > 0 &&
+                   peer->srx.transport_type == srx->transport_type &&
+                   peer->srx.transport_len == srx->transport_len &&
+                   memcmp(&peer->srx.transport,
+                          &srx->transport,
+                          srx->transport_len) == 0)
+                       goto found_extant_second;
+       }
+
+       /* we can now add the new candidate to the list */
+       peer = candidate;
+       candidate = NULL;
+
+       list_add_tail(&peer->link, &rxrpc_peers);
+       write_unlock_bh(&rxrpc_peer_lock);
+       new = "new";
+
+success:
+       _net("PEER %s %d {%d,%u,%u.%u.%u.%u+%hu}",
+            new,
+            peer->debug_id,
+            peer->srx.transport_type,
+            peer->srx.transport.family,
+            NIPQUAD(peer->srx.transport.sin.sin_addr),
+            ntohs(peer->srx.transport.sin.sin_port));
+
+       _leave(" = %p {u=%d}", peer, atomic_read(&peer->usage));
+       return peer;
+
+       /* we found the peer in the list immediately */
+found_extant_peer:
+       usage = atomic_inc_return(&peer->usage);
+       read_unlock_bh(&rxrpc_peer_lock);
+       goto success;
+
+       /* we found the peer on the second time through the list */
+found_extant_second:
+       usage = atomic_inc_return(&peer->usage);
+       write_unlock_bh(&rxrpc_peer_lock);
+       kfree(candidate);
+       goto success;
+}
+
+/*
+ * find the peer associated with a packet
+ */
+struct rxrpc_peer *rxrpc_find_peer(struct rxrpc_local *local,
+                                  __be32 addr, __be16 port)
+{
+       struct rxrpc_peer *peer;
+
+       _enter("");
+
+       /* search the peer list */
+       read_lock_bh(&rxrpc_peer_lock);
+
+       if (local->srx.transport.family == AF_INET &&
+           local->srx.transport_type == SOCK_DGRAM
+           ) {
+               list_for_each_entry(peer, &rxrpc_peers, link) {
+                       if (atomic_read(&peer->usage) > 0 &&
+                           peer->srx.transport_type == SOCK_DGRAM &&
+                           peer->srx.transport.family == AF_INET &&
+                           peer->srx.transport.sin.sin_port == port &&
+                           peer->srx.transport.sin.sin_addr.s_addr == addr)
+                               goto found_UDP_peer;
+               }
+
+               goto new_UDP_peer;
+       }
+
+       read_unlock_bh(&rxrpc_peer_lock);
+       _leave(" = -EAFNOSUPPORT");
+       return ERR_PTR(-EAFNOSUPPORT);
+
+found_UDP_peer:
+       _net("Rx UDP DGRAM from peer %d", peer->debug_id);
+       atomic_inc(&peer->usage);
+       read_unlock_bh(&rxrpc_peer_lock);
+       _leave(" = %p", peer);
+       return peer;
+
+new_UDP_peer:
+       _net("Rx UDP DGRAM from NEW peer %d", peer->debug_id);
+       read_unlock_bh(&rxrpc_peer_lock);
+       _leave(" = -EBUSY [new]");
+       return ERR_PTR(-EBUSY);
+}
+
+/*
+ * release a remote transport endpoint
+ */
+void rxrpc_put_peer(struct rxrpc_peer *peer)
+{
+       _enter("%p{u=%d}", peer, atomic_read(&peer->usage));
+
+       ASSERTCMP(atomic_read(&peer->usage), >, 0);
+
+       if (likely(!atomic_dec_and_test(&peer->usage))) {
+               _leave(" [in use]");
+               return;
+       }
+
+       rxrpc_queue_work(&peer->destroyer);
+       _leave("");
+}
+
+/*
+ * destroy a remote transport endpoint
+ */
+static void rxrpc_destroy_peer(struct work_struct *work)
+{
+       struct rxrpc_peer *peer =
+               container_of(work, struct rxrpc_peer, destroyer);
+
+       _enter("%p{%d}", peer, atomic_read(&peer->usage));
+
+       write_lock_bh(&rxrpc_peer_lock);
+       list_del(&peer->link);
+       write_unlock_bh(&rxrpc_peer_lock);
+
+       _net("DESTROY PEER %d", peer->debug_id);
+       kfree(peer);
+
+       if (list_empty(&rxrpc_peers))
+               wake_up_all(&rxrpc_peer_wq);
+       _leave("");
+}
+
+/*
+ * preemptively destroy all the peer records from a transport endpoint rather
+ * than waiting for them to time out
+ */
+void __exit rxrpc_destroy_all_peers(void)
+{
+       DECLARE_WAITQUEUE(myself,current);
+
+       _enter("");
+
+       /* we simply have to wait for them to go away */
+       if (!list_empty(&rxrpc_peers)) {
+               set_current_state(TASK_UNINTERRUPTIBLE);
+               add_wait_queue(&rxrpc_peer_wq, &myself);
+
+               while (!list_empty(&rxrpc_peers)) {
+                       schedule();
+                       set_current_state(TASK_UNINTERRUPTIBLE);
+               }
+
+               remove_wait_queue(&rxrpc_peer_wq, &myself);
+               set_current_state(TASK_RUNNING);
+       }
+
+       _leave("");
+}
diff --git a/net/rxrpc/ar-proc.c b/net/rxrpc/ar-proc.c
new file mode 100644 (file)
index 0000000..58f4b4e
--- /dev/null
@@ -0,0 +1,247 @@
+/* /proc/net/ support for AF_RXRPC
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <net/sock.h>
+#include <net/af_rxrpc.h>
+#include "ar-internal.h"
+
+static const char *rxrpc_conn_states[] = {
+       [RXRPC_CONN_UNUSED]             = "Unused  ",
+       [RXRPC_CONN_CLIENT]             = "Client  ",
+       [RXRPC_CONN_SERVER_UNSECURED]   = "SvUnsec ",
+       [RXRPC_CONN_SERVER_CHALLENGING] = "SvChall ",
+       [RXRPC_CONN_SERVER]             = "SvSecure",
+       [RXRPC_CONN_REMOTELY_ABORTED]   = "RmtAbort",
+       [RXRPC_CONN_LOCALLY_ABORTED]    = "LocAbort",
+       [RXRPC_CONN_NETWORK_ERROR]      = "NetError",
+};
+
+const char *rxrpc_call_states[] = {
+       [RXRPC_CALL_CLIENT_SEND_REQUEST]        = "ClSndReq",
+       [RXRPC_CALL_CLIENT_AWAIT_REPLY]         = "ClAwtRpl",
+       [RXRPC_CALL_CLIENT_RECV_REPLY]          = "ClRcvRpl",
+       [RXRPC_CALL_CLIENT_FINAL_ACK]           = "ClFnlACK",
+       [RXRPC_CALL_SERVER_SECURING]            = "SvSecure",
+       [RXRPC_CALL_SERVER_ACCEPTING]           = "SvAccept",
+       [RXRPC_CALL_SERVER_RECV_REQUEST]        = "SvRcvReq",
+       [RXRPC_CALL_SERVER_ACK_REQUEST]         = "SvAckReq",
+       [RXRPC_CALL_SERVER_SEND_REPLY]          = "SvSndRpl",
+       [RXRPC_CALL_SERVER_AWAIT_ACK]           = "SvAwtACK",
+       [RXRPC_CALL_COMPLETE]                   = "Complete",
+       [RXRPC_CALL_SERVER_BUSY]                = "SvBusy  ",
+       [RXRPC_CALL_REMOTELY_ABORTED]           = "RmtAbort",
+       [RXRPC_CALL_LOCALLY_ABORTED]            = "LocAbort",
+       [RXRPC_CALL_NETWORK_ERROR]              = "NetError",
+       [RXRPC_CALL_DEAD]                       = "Dead    ",
+};
+
+/*
+ * generate a list of extant and dead calls in /proc/net/rxrpc_calls
+ */
+static void *rxrpc_call_seq_start(struct seq_file *seq, loff_t *_pos)
+{
+       struct list_head *_p;
+       loff_t pos = *_pos;
+
+       read_lock(&rxrpc_call_lock);
+       if (!pos)
+               return SEQ_START_TOKEN;
+       pos--;
+
+       list_for_each(_p, &rxrpc_calls)
+               if (!pos--)
+                       break;
+
+       return _p != &rxrpc_calls ? _p : NULL;
+}
+
+static void *rxrpc_call_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+       struct list_head *_p;
+
+       (*pos)++;
+
+       _p = v;
+       _p = (v == SEQ_START_TOKEN) ? rxrpc_calls.next : _p->next;
+
+       return _p != &rxrpc_calls ? _p : NULL;
+}
+
+static void rxrpc_call_seq_stop(struct seq_file *seq, void *v)
+{
+       read_unlock(&rxrpc_call_lock);
+}
+
+static int rxrpc_call_seq_show(struct seq_file *seq, void *v)
+{
+       struct rxrpc_transport *trans;
+       struct rxrpc_call *call;
+       char lbuff[4 + 4 + 4 + 4 + 5 + 1], rbuff[4 + 4 + 4 + 4 + 5 + 1];
+
+       if (v == SEQ_START_TOKEN) {
+               seq_puts(seq,
+                        "Proto Local                  Remote                "
+                        " SvID ConnID   CallID   End Use State    Abort   "
+                        " UserID\n");
+               return 0;
+       }
+
+       call = list_entry(v, struct rxrpc_call, link);
+       trans = call->conn->trans;
+
+       sprintf(lbuff, NIPQUAD_FMT":%u",
+               NIPQUAD(trans->local->srx.transport.sin.sin_addr),
+               ntohs(trans->local->srx.transport.sin.sin_port));
+
+       sprintf(rbuff, NIPQUAD_FMT":%u",
+               NIPQUAD(trans->peer->srx.transport.sin.sin_addr),
+               ntohs(trans->peer->srx.transport.sin.sin_port));
+
+       seq_printf(seq,
+                  "UDP   %-22.22s %-22.22s %4x %08x %08x %s %3u"
+                  " %-8.8s %08x %lx\n",
+                  lbuff,
+                  rbuff,
+                  ntohs(call->conn->service_id),
+                  ntohl(call->conn->cid),
+                  ntohl(call->call_id),
+                  call->conn->in_clientflag ? "Svc" : "Clt",
+                  atomic_read(&call->usage),
+                  rxrpc_call_states[call->state],
+                  call->abort_code,
+                  call->user_call_ID);
+
+       return 0;
+}
+
+static struct seq_operations rxrpc_call_seq_ops = {
+       .start  = rxrpc_call_seq_start,
+       .next   = rxrpc_call_seq_next,
+       .stop   = rxrpc_call_seq_stop,
+       .show   = rxrpc_call_seq_show,
+};
+
+static int rxrpc_call_seq_open(struct inode *inode, struct file *file)
+{
+       return seq_open(file, &rxrpc_call_seq_ops);
+}
+
+struct file_operations rxrpc_call_seq_fops = {
+       .owner          = THIS_MODULE,
+       .open           = rxrpc_call_seq_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = seq_release_private,
+};
+
+/*
+ * generate a list of extant virtual connections in /proc/net/rxrpc_conns
+ */
+static void *rxrpc_connection_seq_start(struct seq_file *seq, loff_t *_pos)
+{
+       struct list_head *_p;
+       loff_t pos = *_pos;
+
+       read_lock(&rxrpc_connection_lock);
+       if (!pos)
+               return SEQ_START_TOKEN;
+       pos--;
+
+       list_for_each(_p, &rxrpc_connections)
+               if (!pos--)
+                       break;
+
+       return _p != &rxrpc_connections ? _p : NULL;
+}
+
+static void *rxrpc_connection_seq_next(struct seq_file *seq, void *v,
+                                      loff_t *pos)
+{
+       struct list_head *_p;
+
+       (*pos)++;
+
+       _p = v;
+       _p = (v == SEQ_START_TOKEN) ? rxrpc_connections.next : _p->next;
+
+       return _p != &rxrpc_connections ? _p : NULL;
+}
+
+static void rxrpc_connection_seq_stop(struct seq_file *seq, void *v)
+{
+       read_unlock(&rxrpc_connection_lock);
+}
+
+static int rxrpc_connection_seq_show(struct seq_file *seq, void *v)
+{
+       struct rxrpc_connection *conn;
+       struct rxrpc_transport *trans;
+       char lbuff[4 + 4 + 4 + 4 + 5 + 1], rbuff[4 + 4 + 4 + 4 + 5 + 1];
+
+       if (v == SEQ_START_TOKEN) {
+               seq_puts(seq,
+                        "Proto Local                  Remote                "
+                        " SvID ConnID   Calls    End Use State    Key     "
+                        " Serial   ISerial\n"
+                        );
+               return 0;
+       }
+
+       conn = list_entry(v, struct rxrpc_connection, link);
+       trans = conn->trans;
+
+       sprintf(lbuff, NIPQUAD_FMT":%u",
+               NIPQUAD(trans->local->srx.transport.sin.sin_addr),
+               ntohs(trans->local->srx.transport.sin.sin_port));
+
+       sprintf(rbuff, NIPQUAD_FMT":%u",
+               NIPQUAD(trans->peer->srx.transport.sin.sin_addr),
+               ntohs(trans->peer->srx.transport.sin.sin_port));
+
+       seq_printf(seq,
+                  "UDP   %-22.22s %-22.22s %4x %08x %08x %s %3u"
+                  " %s %08x %08x %08x\n",
+                  lbuff,
+                  rbuff,
+                  ntohs(conn->service_id),
+                  ntohl(conn->cid),
+                  conn->call_counter,
+                  conn->in_clientflag ? "Svc" : "Clt",
+                  atomic_read(&conn->usage),
+                  rxrpc_conn_states[conn->state],
+                  key_serial(conn->key),
+                  atomic_read(&conn->serial),
+                  atomic_read(&conn->hi_serial));
+
+       return 0;
+}
+
+static struct seq_operations rxrpc_connection_seq_ops = {
+       .start  = rxrpc_connection_seq_start,
+       .next   = rxrpc_connection_seq_next,
+       .stop   = rxrpc_connection_seq_stop,
+       .show   = rxrpc_connection_seq_show,
+};
+
+
+static int rxrpc_connection_seq_open(struct inode *inode, struct file *file)
+{
+       return seq_open(file, &rxrpc_connection_seq_ops);
+}
+
+struct file_operations rxrpc_connection_seq_fops = {
+       .owner          = THIS_MODULE,
+       .open           = rxrpc_connection_seq_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = seq_release_private,
+};
diff --git a/net/rxrpc/ar-recvmsg.c b/net/rxrpc/ar-recvmsg.c
new file mode 100644 (file)
index 0000000..f19121d
--- /dev/null
@@ -0,0 +1,437 @@
+/* RxRPC recvmsg() implementation
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/net.h>
+#include <linux/skbuff.h>
+#include <net/sock.h>
+#include <net/af_rxrpc.h>
+#include "ar-internal.h"
+
+/*
+ * removal a call's user ID from the socket tree to make the user ID available
+ * again and so that it won't be seen again in association with that call
+ */
+void rxrpc_remove_user_ID(struct rxrpc_sock *rx, struct rxrpc_call *call)
+{
+       _debug("RELEASE CALL %d", call->debug_id);
+
+       if (test_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
+               write_lock_bh(&rx->call_lock);
+               rb_erase(&call->sock_node, &call->socket->calls);
+               clear_bit(RXRPC_CALL_HAS_USERID, &call->flags);
+               write_unlock_bh(&rx->call_lock);
+       }
+
+       read_lock_bh(&call->state_lock);
+       if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) &&
+           !test_and_set_bit(RXRPC_CALL_RELEASE, &call->events))
+               rxrpc_queue_call(call);
+       read_unlock_bh(&call->state_lock);
+}
+
+/*
+ * receive a message from an RxRPC socket
+ * - we need to be careful about two or more threads calling recvmsg
+ *   simultaneously
+ */
+int rxrpc_recvmsg(struct kiocb *iocb, struct socket *sock,
+                 struct msghdr *msg, size_t len, int flags)
+{
+       struct rxrpc_skb_priv *sp;
+       struct rxrpc_call *call = NULL, *continue_call = NULL;
+       struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
+       struct sk_buff *skb;
+       long timeo;
+       int copy, ret, ullen, offset, copied = 0;
+       u32 abort_code;
+
+       DEFINE_WAIT(wait);
+
+       _enter(",,,%zu,%d", len, flags);
+
+       if (flags & (MSG_OOB | MSG_TRUNC))
+               return -EOPNOTSUPP;
+
+       ullen = msg->msg_flags & MSG_CMSG_COMPAT ? 4 : sizeof(unsigned long);
+
+       timeo = sock_rcvtimeo(&rx->sk, flags & MSG_DONTWAIT);
+       msg->msg_flags |= MSG_MORE;
+
+       lock_sock(&rx->sk);
+
+       for (;;) {
+               /* return immediately if a client socket has no outstanding
+                * calls */
+               if (RB_EMPTY_ROOT(&rx->calls)) {
+                       if (copied)
+                               goto out;
+                       if (rx->sk.sk_state != RXRPC_SERVER_LISTENING) {
+                               release_sock(&rx->sk);
+                               if (continue_call)
+                                       rxrpc_put_call(continue_call);
+                               return -ENODATA;
+                       }
+               }
+
+               /* get the next message on the Rx queue */
+               skb = skb_peek(&rx->sk.sk_receive_queue);
+               if (!skb) {
+                       /* nothing remains on the queue */
+                       if (copied &&
+                           (msg->msg_flags & MSG_PEEK || timeo == 0))
+                               goto out;
+
+                       /* wait for a message to turn up */
+                       release_sock(&rx->sk);
+                       prepare_to_wait_exclusive(rx->sk.sk_sleep, &wait,
+                                                 TASK_INTERRUPTIBLE);
+                       ret = sock_error(&rx->sk);
+                       if (ret)
+                               goto wait_error;
+
+                       if (skb_queue_empty(&rx->sk.sk_receive_queue)) {
+                               if (signal_pending(current))
+                                       goto wait_interrupted;
+                               timeo = schedule_timeout(timeo);
+                       }
+                       finish_wait(rx->sk.sk_sleep, &wait);
+                       lock_sock(&rx->sk);
+                       continue;
+               }
+
+       peek_next_packet:
+               sp = rxrpc_skb(skb);
+               call = sp->call;
+               ASSERT(call != NULL);
+
+               _debug("next pkt %s", rxrpc_pkts[sp->hdr.type]);
+
+               /* make sure we wait for the state to be updated in this call */
+               spin_lock_bh(&call->lock);
+               spin_unlock_bh(&call->lock);
+
+               if (test_bit(RXRPC_CALL_RELEASED, &call->flags)) {
+                       _debug("packet from released call");
+                       if (skb_dequeue(&rx->sk.sk_receive_queue) != skb)
+                               BUG();
+                       rxrpc_free_skb(skb);
+                       continue;
+               }
+
+               /* determine whether to continue last data receive */
+               if (continue_call) {
+                       _debug("maybe cont");
+                       if (call != continue_call ||
+                           skb->mark != RXRPC_SKB_MARK_DATA) {
+                               release_sock(&rx->sk);
+                               rxrpc_put_call(continue_call);
+                               _leave(" = %d [noncont]", copied);
+                               return copied;
+                       }
+               }
+
+               rxrpc_get_call(call);
+
+               /* copy the peer address and timestamp */
+               if (!continue_call) {
+                       if (msg->msg_name && msg->msg_namelen > 0)
+                               memcpy(&msg->msg_name, &call->conn->trans->peer->srx,
+                                      sizeof(call->conn->trans->peer->srx));
+                       sock_recv_timestamp(msg, &rx->sk, skb);
+               }
+
+               /* receive the message */
+               if (skb->mark != RXRPC_SKB_MARK_DATA)
+                       goto receive_non_data_message;
+
+               _debug("recvmsg DATA #%u { %d, %d }",
+                      ntohl(sp->hdr.seq), skb->len, sp->offset);
+
+               if (!continue_call) {
+                       /* only set the control data once per recvmsg() */
+                       ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID,
+                                      ullen, &call->user_call_ID);
+                       if (ret < 0)
+                               goto copy_error;
+                       ASSERT(test_bit(RXRPC_CALL_HAS_USERID, &call->flags));
+               }
+
+               ASSERTCMP(ntohl(sp->hdr.seq), >=, call->rx_data_recv);
+               ASSERTCMP(ntohl(sp->hdr.seq), <=, call->rx_data_recv + 1);
+               call->rx_data_recv = ntohl(sp->hdr.seq);
+
+               ASSERTCMP(ntohl(sp->hdr.seq), >, call->rx_data_eaten);
+
+               offset = sp->offset;
+               copy = skb->len - offset;
+               if (copy > len - copied)
+                       copy = len - copied;
+
+               if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
+                       ret = skb_copy_datagram_iovec(skb, offset,
+                                                     msg->msg_iov, copy);
+               } else {
+                       ret = skb_copy_and_csum_datagram_iovec(skb, offset,
+                                                              msg->msg_iov);
+                       if (ret == -EINVAL)
+                               goto csum_copy_error;
+               }
+
+               if (ret < 0)
+                       goto copy_error;
+
+               /* handle piecemeal consumption of data packets */
+               _debug("copied %d+%d", copy, copied);
+
+               offset += copy;
+               copied += copy;
+
+               if (!(flags & MSG_PEEK))
+                       sp->offset = offset;
+
+               if (sp->offset < skb->len) {
+                       _debug("buffer full");
+                       ASSERTCMP(copied, ==, len);
+                       break;
+               }
+
+               /* we transferred the whole data packet */
+               if (sp->hdr.flags & RXRPC_LAST_PACKET) {
+                       _debug("last");
+                       if (call->conn->out_clientflag) {
+                                /* last byte of reply received */
+                               ret = copied;
+                               goto terminal_message;
+                       }
+
+                       /* last bit of request received */
+                       if (!(flags & MSG_PEEK)) {
+                               _debug("eat packet");
+                               if (skb_dequeue(&rx->sk.sk_receive_queue) !=
+                                   skb)
+                                       BUG();
+                               rxrpc_free_skb(skb);
+                       }
+                       msg->msg_flags &= ~MSG_MORE;
+                       break;
+               }
+
+               /* move on to the next data message */
+               _debug("next");
+               if (!continue_call)
+                       continue_call = sp->call;
+               else
+                       rxrpc_put_call(call);
+               call = NULL;
+
+               if (flags & MSG_PEEK) {
+                       _debug("peek next");
+                       skb = skb->next;
+                       if (skb == (struct sk_buff *) &rx->sk.sk_receive_queue)
+                               break;
+                       goto peek_next_packet;
+               }
+
+               _debug("eat packet");
+               if (skb_dequeue(&rx->sk.sk_receive_queue) != skb)
+                       BUG();
+               rxrpc_free_skb(skb);
+       }
+
+       /* end of non-terminal data packet reception for the moment */
+       _debug("end rcv data");
+out:
+       release_sock(&rx->sk);
+       if (call)
+               rxrpc_put_call(call);
+       if (continue_call)
+               rxrpc_put_call(continue_call);
+       _leave(" = %d [data]", copied);
+       return copied;
+
+       /* handle non-DATA messages such as aborts, incoming connections and
+        * final ACKs */
+receive_non_data_message:
+       _debug("non-data");
+
+       if (skb->mark == RXRPC_SKB_MARK_NEW_CALL) {
+               _debug("RECV NEW CALL");
+               ret = put_cmsg(msg, SOL_RXRPC, RXRPC_NEW_CALL, 0, &abort_code);
+               if (ret < 0)
+                       goto copy_error;
+               if (!(flags & MSG_PEEK)) {
+                       if (skb_dequeue(&rx->sk.sk_receive_queue) != skb)
+                               BUG();
+                       rxrpc_free_skb(skb);
+               }
+               goto out;
+       }
+
+       ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID,
+                      ullen, &call->user_call_ID);
+       if (ret < 0)
+               goto copy_error;
+       ASSERT(test_bit(RXRPC_CALL_HAS_USERID, &call->flags));
+
+       switch (skb->mark) {
+       case RXRPC_SKB_MARK_DATA:
+               BUG();
+       case RXRPC_SKB_MARK_FINAL_ACK:
+               ret = put_cmsg(msg, SOL_RXRPC, RXRPC_ACK, 0, &abort_code);
+               break;
+       case RXRPC_SKB_MARK_BUSY:
+               ret = put_cmsg(msg, SOL_RXRPC, RXRPC_BUSY, 0, &abort_code);
+               break;
+       case RXRPC_SKB_MARK_REMOTE_ABORT:
+               abort_code = call->abort_code;
+               ret = put_cmsg(msg, SOL_RXRPC, RXRPC_ABORT, 4, &abort_code);
+               break;
+       case RXRPC_SKB_MARK_NET_ERROR:
+               _debug("RECV NET ERROR %d", sp->error);
+               abort_code = sp->error;
+               ret = put_cmsg(msg, SOL_RXRPC, RXRPC_NET_ERROR, 4, &abort_code);
+               break;
+       case RXRPC_SKB_MARK_LOCAL_ERROR:
+               _debug("RECV LOCAL ERROR %d", sp->error);
+               abort_code = sp->error;
+               ret = put_cmsg(msg, SOL_RXRPC, RXRPC_LOCAL_ERROR, 4,
+                              &abort_code);
+               break;
+       default:
+               BUG();
+               break;
+       }
+
+       if (ret < 0)
+               goto copy_error;
+
+terminal_message:
+       _debug("terminal");
+       msg->msg_flags &= ~MSG_MORE;
+       msg->msg_flags |= MSG_EOR;
+
+       if (!(flags & MSG_PEEK)) {
+               _net("free terminal skb %p", skb);
+               if (skb_dequeue(&rx->sk.sk_receive_queue) != skb)
+                       BUG();
+               rxrpc_free_skb(skb);
+               rxrpc_remove_user_ID(rx, call);
+       }
+
+       release_sock(&rx->sk);
+       rxrpc_put_call(call);
+       if (continue_call)
+               rxrpc_put_call(continue_call);
+       _leave(" = %d", ret);
+       return ret;
+
+copy_error:
+       _debug("copy error");
+       release_sock(&rx->sk);
+       rxrpc_put_call(call);
+       if (continue_call)
+               rxrpc_put_call(continue_call);
+       _leave(" = %d", ret);
+       return ret;
+
+csum_copy_error:
+       _debug("csum error");
+       release_sock(&rx->sk);
+       if (continue_call)
+               rxrpc_put_call(continue_call);
+       rxrpc_kill_skb(skb);
+       skb_kill_datagram(&rx->sk, skb, flags);
+       rxrpc_put_call(call);
+       return -EAGAIN;
+
+wait_interrupted:
+       ret = sock_intr_errno(timeo);
+wait_error:
+       finish_wait(rx->sk.sk_sleep, &wait);
+       if (continue_call)
+               rxrpc_put_call(continue_call);
+       if (copied)
+               copied = ret;
+       _leave(" = %d [waitfail %d]", copied, ret);
+       return copied;
+
+}
+
+/**
+ * rxrpc_kernel_data_delivered - Record delivery of data message
+ * @skb: Message holding data
+ *
+ * Record the delivery of a data message.  This permits RxRPC to keep its
+ * tracking correct.  The socket buffer will be deleted.
+ */
+void rxrpc_kernel_data_delivered(struct sk_buff *skb)
+{
+       struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+       struct rxrpc_call *call = sp->call;
+
+       ASSERTCMP(ntohl(sp->hdr.seq), >=, call->rx_data_recv);
+       ASSERTCMP(ntohl(sp->hdr.seq), <=, call->rx_data_recv + 1);
+       call->rx_data_recv = ntohl(sp->hdr.seq);
+
+       ASSERTCMP(ntohl(sp->hdr.seq), >, call->rx_data_eaten);
+       rxrpc_free_skb(skb);
+}
+
+EXPORT_SYMBOL(rxrpc_kernel_data_delivered);
+
+/**
+ * rxrpc_kernel_is_data_last - Determine if data message is last one
+ * @skb: Message holding data
+ *
+ * Determine if data message is last one for the parent call.
+ */
+bool rxrpc_kernel_is_data_last(struct sk_buff *skb)
+{
+       struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+
+       ASSERTCMP(skb->mark, ==, RXRPC_SKB_MARK_DATA);
+
+       return sp->hdr.flags & RXRPC_LAST_PACKET;
+}
+
+EXPORT_SYMBOL(rxrpc_kernel_is_data_last);
+
+/**
+ * rxrpc_kernel_get_abort_code - Get the abort code from an RxRPC abort message
+ * @skb: Message indicating an abort
+ *
+ * Get the abort code from an RxRPC abort message.
+ */
+u32 rxrpc_kernel_get_abort_code(struct sk_buff *skb)
+{
+       struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+
+       ASSERTCMP(skb->mark, ==, RXRPC_SKB_MARK_REMOTE_ABORT);
+
+       return sp->call->abort_code;
+}
+
+EXPORT_SYMBOL(rxrpc_kernel_get_abort_code);
+
+/**
+ * rxrpc_kernel_get_error - Get the error number from an RxRPC error message
+ * @skb: Message indicating an error
+ *
+ * Get the error number from an RxRPC error message.
+ */
+int rxrpc_kernel_get_error_number(struct sk_buff *skb)
+{
+       struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+
+       return sp->error;
+}
+
+EXPORT_SYMBOL(rxrpc_kernel_get_error_number);
diff --git a/net/rxrpc/ar-security.c b/net/rxrpc/ar-security.c
new file mode 100644 (file)
index 0000000..60d1d36
--- /dev/null
@@ -0,0 +1,258 @@
+/* RxRPC security handling
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/net.h>
+#include <linux/skbuff.h>
+#include <linux/udp.h>
+#include <linux/crypto.h>
+#include <net/sock.h>
+#include <net/af_rxrpc.h>
+#include "ar-internal.h"
+
+static LIST_HEAD(rxrpc_security_methods);
+static DECLARE_RWSEM(rxrpc_security_sem);
+
+/*
+ * get an RxRPC security module
+ */
+static struct rxrpc_security *rxrpc_security_get(struct rxrpc_security *sec)
+{
+       return try_module_get(sec->owner) ? sec : NULL;
+}
+
+/*
+ * release an RxRPC security module
+ */
+static void rxrpc_security_put(struct rxrpc_security *sec)
+{
+       module_put(sec->owner);
+}
+
+/*
+ * look up an rxrpc security module
+ */
+struct rxrpc_security *rxrpc_security_lookup(u8 security_index)
+{
+       struct rxrpc_security *sec = NULL;
+
+       _enter("");
+
+       down_read(&rxrpc_security_sem);
+
+       list_for_each_entry(sec, &rxrpc_security_methods, link) {
+               if (sec->security_index == security_index) {
+                       if (unlikely(!rxrpc_security_get(sec)))
+                               break;
+                       goto out;
+               }
+       }
+
+       sec = NULL;
+out:
+       up_read(&rxrpc_security_sem);
+       _leave(" = %p [%s]", sec, sec ? sec->name : "");
+       return sec;
+}
+
+/**
+ * rxrpc_register_security - register an RxRPC security handler
+ * @sec: security module
+ *
+ * register an RxRPC security handler for use by RxRPC
+ */
+int rxrpc_register_security(struct rxrpc_security *sec)
+{
+       struct rxrpc_security *psec;
+       int ret;
+
+       _enter("");
+       down_write(&rxrpc_security_sem);
+
+       ret = -EEXIST;
+       list_for_each_entry(psec, &rxrpc_security_methods, link) {
+               if (psec->security_index == sec->security_index)
+                       goto out;
+       }
+
+       list_add(&sec->link, &rxrpc_security_methods);
+
+       printk(KERN_NOTICE "RxRPC: Registered security type %d '%s'\n",
+              sec->security_index, sec->name);
+       ret = 0;
+
+out:
+       up_write(&rxrpc_security_sem);
+       _leave(" = %d", ret);
+       return ret;
+}
+
+EXPORT_SYMBOL_GPL(rxrpc_register_security);
+
+/**
+ * rxrpc_unregister_security - unregister an RxRPC security handler
+ * @sec: security module
+ *
+ * unregister an RxRPC security handler
+ */
+void rxrpc_unregister_security(struct rxrpc_security *sec)
+{
+
+       _enter("");
+       down_write(&rxrpc_security_sem);
+       list_del_init(&sec->link);
+       up_write(&rxrpc_security_sem);
+
+       printk(KERN_NOTICE "RxRPC: Unregistered security type %d '%s'\n",
+              sec->security_index, sec->name);
+}
+
+EXPORT_SYMBOL_GPL(rxrpc_unregister_security);
+
+/*
+ * initialise the security on a client connection
+ */
+int rxrpc_init_client_conn_security(struct rxrpc_connection *conn)
+{
+       struct rxrpc_security *sec;
+       struct key *key = conn->key;
+       int ret;
+
+       _enter("{%d},{%x}", conn->debug_id, key_serial(key));
+
+       if (!key)
+               return 0;
+
+       ret = key_validate(key);
+       if (ret < 0)
+               return ret;
+
+       sec = rxrpc_security_lookup(key->type_data.x[0]);
+       if (!sec)
+               return -EKEYREJECTED;
+       conn->security = sec;
+
+       ret = conn->security->init_connection_security(conn);
+       if (ret < 0) {
+               rxrpc_security_put(conn->security);
+               conn->security = NULL;
+               return ret;
+       }
+
+       _leave(" = 0");
+       return 0;
+}
+
+/*
+ * initialise the security on a server connection
+ */
+int rxrpc_init_server_conn_security(struct rxrpc_connection *conn)
+{
+       struct rxrpc_security *sec;
+       struct rxrpc_local *local = conn->trans->local;
+       struct rxrpc_sock *rx;
+       struct key *key;
+       key_ref_t kref;
+       char kdesc[5+1+3+1];
+
+       _enter("");
+
+       sprintf(kdesc, "%u:%u", ntohs(conn->service_id), conn->security_ix);
+
+       sec = rxrpc_security_lookup(conn->security_ix);
+       if (!sec) {
+               _leave(" = -ENOKEY [lookup]");
+               return -ENOKEY;
+       }
+
+       /* find the service */
+       read_lock_bh(&local->services_lock);
+       list_for_each_entry(rx, &local->services, listen_link) {
+               if (rx->service_id == conn->service_id)
+                       goto found_service;
+       }
+
+       /* the service appears to have died */
+       read_unlock_bh(&local->services_lock);
+       rxrpc_security_put(sec);
+       _leave(" = -ENOENT");
+       return -ENOENT;
+
+found_service:
+       if (!rx->securities) {
+               read_unlock_bh(&local->services_lock);
+               rxrpc_security_put(sec);
+               _leave(" = -ENOKEY");
+               return -ENOKEY;
+       }
+
+       /* look through the service's keyring */
+       kref = keyring_search(make_key_ref(rx->securities, 1UL),
+                             &key_type_rxrpc_s, kdesc);
+       if (IS_ERR(kref)) {
+               read_unlock_bh(&local->services_lock);
+               rxrpc_security_put(sec);
+               _leave(" = %ld [search]", PTR_ERR(kref));
+               return PTR_ERR(kref);
+       }
+
+       key = key_ref_to_ptr(kref);
+       read_unlock_bh(&local->services_lock);
+
+       conn->server_key = key;
+       conn->security = sec;
+
+       _leave(" = 0");
+       return 0;
+}
+
+/*
+ * secure a packet prior to transmission
+ */
+int rxrpc_secure_packet(const struct rxrpc_call *call,
+                       struct sk_buff *skb,
+                       size_t data_size,
+                       void *sechdr)
+{
+       if (call->conn->security)
+               return call->conn->security->secure_packet(
+                       call, skb, data_size, sechdr);
+       return 0;
+}
+
+/*
+ * secure a packet prior to transmission
+ */
+int rxrpc_verify_packet(const struct rxrpc_call *call, struct sk_buff *skb,
+                       u32 *_abort_code)
+{
+       if (call->conn->security)
+               return call->conn->security->verify_packet(
+                       call, skb, _abort_code);
+       return 0;
+}
+
+/*
+ * clear connection security
+ */
+void rxrpc_clear_conn_security(struct rxrpc_connection *conn)
+{
+       _enter("{%d}", conn->debug_id);
+
+       if (conn->security) {
+               conn->security->clear(conn);
+               rxrpc_security_put(conn->security);
+               conn->security = NULL;
+       }
+
+       key_put(conn->key);
+       key_put(conn->server_key);
+}
diff --git a/net/rxrpc/ar-skbuff.c b/net/rxrpc/ar-skbuff.c
new file mode 100644 (file)
index 0000000..de755e0
--- /dev/null
@@ -0,0 +1,132 @@
+/* ar-skbuff.c: socket buffer destruction handling
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/net.h>
+#include <linux/skbuff.h>
+#include <net/sock.h>
+#include <net/af_rxrpc.h>
+#include "ar-internal.h"
+
+/*
+ * set up for the ACK at the end of the receive phase when we discard the final
+ * receive phase data packet
+ * - called with softirqs disabled
+ */
+static void rxrpc_request_final_ACK(struct rxrpc_call *call)
+{
+       /* the call may be aborted before we have a chance to ACK it */
+       write_lock(&call->state_lock);
+
+       switch (call->state) {
+       case RXRPC_CALL_CLIENT_RECV_REPLY:
+               call->state = RXRPC_CALL_CLIENT_FINAL_ACK;
+               _debug("request final ACK");
+
+               /* get an extra ref on the call for the final-ACK generator to
+                * release */
+               rxrpc_get_call(call);
+               set_bit(RXRPC_CALL_ACK_FINAL, &call->events);
+               if (try_to_del_timer_sync(&call->ack_timer) >= 0)
+                       rxrpc_queue_call(call);
+               break;
+
+       case RXRPC_CALL_SERVER_RECV_REQUEST:
+               call->state = RXRPC_CALL_SERVER_ACK_REQUEST;
+       default:
+               break;
+       }
+
+       write_unlock(&call->state_lock);
+}
+
+/*
+ * drop the bottom ACK off of the call ACK window and advance the window
+ */
+static void rxrpc_hard_ACK_data(struct rxrpc_call *call,
+                               struct rxrpc_skb_priv *sp)
+{
+       int loop;
+       u32 seq;
+
+       spin_lock_bh(&call->lock);
+
+       _debug("hard ACK #%u", ntohl(sp->hdr.seq));
+
+       for (loop = 0; loop < RXRPC_ACKR_WINDOW_ASZ; loop++) {
+               call->ackr_window[loop] >>= 1;
+               call->ackr_window[loop] |=
+                       call->ackr_window[loop + 1] << (BITS_PER_LONG - 1);
+       }
+
+       seq = ntohl(sp->hdr.seq);
+       ASSERTCMP(seq, ==, call->rx_data_eaten + 1);
+       call->rx_data_eaten = seq;
+
+       if (call->ackr_win_top < UINT_MAX)
+               call->ackr_win_top++;
+
+       ASSERTIFCMP(call->state <= RXRPC_CALL_COMPLETE,
+                   call->rx_data_post, >=, call->rx_data_recv);
+       ASSERTIFCMP(call->state <= RXRPC_CALL_COMPLETE,
+                   call->rx_data_recv, >=, call->rx_data_eaten);
+
+       if (sp->hdr.flags & RXRPC_LAST_PACKET) {
+               rxrpc_request_final_ACK(call);
+       } else if (atomic_dec_and_test(&call->ackr_not_idle) &&
+                  test_and_clear_bit(RXRPC_CALL_TX_SOFT_ACK, &call->flags)) {
+               _debug("send Rx idle ACK");
+               __rxrpc_propose_ACK(call, RXRPC_ACK_IDLE, sp->hdr.serial,
+                                   true);
+       }
+
+       spin_unlock_bh(&call->lock);
+}
+
+/*
+ * destroy a packet that has an RxRPC control buffer
+ * - advance the hard-ACK state of the parent call (done here in case something
+ *   in the kernel bypasses recvmsg() and steals the packet directly off of the
+ *   socket receive queue)
+ */
+void rxrpc_packet_destructor(struct sk_buff *skb)
+{
+       struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+       struct rxrpc_call *call = sp->call;
+
+       _enter("%p{%p}", skb, call);
+
+       if (call) {
+               /* send the final ACK on a client call */
+               if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA)
+                       rxrpc_hard_ACK_data(call, sp);
+               rxrpc_put_call(call);
+               sp->call = NULL;
+       }
+
+       if (skb->sk)
+               sock_rfree(skb);
+       _leave("");
+}
+
+/**
+ * rxrpc_kernel_free_skb - Free an RxRPC socket buffer
+ * @skb: The socket buffer to be freed
+ *
+ * Let RxRPC free its own socket buffer, permitting it to maintain debug
+ * accounting.
+ */
+void rxrpc_kernel_free_skb(struct sk_buff *skb)
+{
+       rxrpc_free_skb(skb);
+}
+
+EXPORT_SYMBOL(rxrpc_kernel_free_skb);
diff --git a/net/rxrpc/ar-transport.c b/net/rxrpc/ar-transport.c
new file mode 100644 (file)
index 0000000..d43d78f
--- /dev/null
@@ -0,0 +1,276 @@
+/* RxRPC point-to-point transport session management
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/net.h>
+#include <linux/skbuff.h>
+#include <net/sock.h>
+#include <net/af_rxrpc.h>
+#include "ar-internal.h"
+
+static void rxrpc_transport_reaper(struct work_struct *work);
+
+static LIST_HEAD(rxrpc_transports);
+static DEFINE_RWLOCK(rxrpc_transport_lock);
+static unsigned long rxrpc_transport_timeout = 3600 * 24;
+static DECLARE_DELAYED_WORK(rxrpc_transport_reap, rxrpc_transport_reaper);
+
+/*
+ * allocate a new transport session manager
+ */
+static struct rxrpc_transport *rxrpc_alloc_transport(struct rxrpc_local *local,
+                                                    struct rxrpc_peer *peer,
+                                                    gfp_t gfp)
+{
+       struct rxrpc_transport *trans;
+
+       _enter("");
+
+       trans = kzalloc(sizeof(struct rxrpc_transport), gfp);
+       if (trans) {
+               trans->local = local;
+               trans->peer = peer;
+               INIT_LIST_HEAD(&trans->link);
+               trans->bundles = RB_ROOT;
+               trans->client_conns = RB_ROOT;
+               trans->server_conns = RB_ROOT;
+               skb_queue_head_init(&trans->error_queue);
+               spin_lock_init(&trans->client_lock);
+               rwlock_init(&trans->conn_lock);
+               atomic_set(&trans->usage, 1);
+               trans->debug_id = atomic_inc_return(&rxrpc_debug_id);
+
+               if (peer->srx.transport.family == AF_INET) {
+                       switch (peer->srx.transport_type) {
+                       case SOCK_DGRAM:
+                               INIT_WORK(&trans->error_handler,
+                                         rxrpc_UDP_error_handler);
+                               break;
+                       default:
+                               BUG();
+                               break;
+                       }
+               } else {
+                       BUG();
+               }
+       }
+
+       _leave(" = %p", trans);
+       return trans;
+}
+
+/*
+ * obtain a transport session for the nominated endpoints
+ */
+struct rxrpc_transport *rxrpc_get_transport(struct rxrpc_local *local,
+                                           struct rxrpc_peer *peer,
+                                           gfp_t gfp)
+{
+       struct rxrpc_transport *trans, *candidate;
+       const char *new = "old";
+       int usage;
+
+       _enter("{%u.%u.%u.%u+%hu},{%u.%u.%u.%u+%hu},",
+              NIPQUAD(local->srx.transport.sin.sin_addr),
+              ntohs(local->srx.transport.sin.sin_port),
+              NIPQUAD(peer->srx.transport.sin.sin_addr),
+              ntohs(peer->srx.transport.sin.sin_port));
+
+       /* search the transport list first */
+       read_lock_bh(&rxrpc_transport_lock);
+       list_for_each_entry(trans, &rxrpc_transports, link) {
+               if (trans->local == local && trans->peer == peer)
+                       goto found_extant_transport;
+       }
+       read_unlock_bh(&rxrpc_transport_lock);
+
+       /* not yet present - create a candidate for a new record and then
+        * redo the search */
+       candidate = rxrpc_alloc_transport(local, peer, gfp);
+       if (!candidate) {
+               _leave(" = -ENOMEM");
+               return ERR_PTR(-ENOMEM);
+       }
+
+       write_lock_bh(&rxrpc_transport_lock);
+
+       list_for_each_entry(trans, &rxrpc_transports, link) {
+               if (trans->local == local && trans->peer == peer)
+                       goto found_extant_second;
+       }
+
+       /* we can now add the new candidate to the list */
+       trans = candidate;
+       candidate = NULL;
+
+       rxrpc_get_local(trans->local);
+       atomic_inc(&trans->peer->usage);
+       list_add_tail(&trans->link, &rxrpc_transports);
+       write_unlock_bh(&rxrpc_transport_lock);
+       new = "new";
+
+success:
+       _net("TRANSPORT %s %d local %d -> peer %d",
+            new,
+            trans->debug_id,
+            trans->local->debug_id,
+            trans->peer->debug_id);
+
+       _leave(" = %p {u=%d}", trans, atomic_read(&trans->usage));
+       return trans;
+
+       /* we found the transport in the list immediately */
+found_extant_transport:
+       usage = atomic_inc_return(&trans->usage);
+       read_unlock_bh(&rxrpc_transport_lock);
+       goto success;
+
+       /* we found the transport on the second time through the list */
+found_extant_second:
+       usage = atomic_inc_return(&trans->usage);
+       write_unlock_bh(&rxrpc_transport_lock);
+       kfree(candidate);
+       goto success;
+}
+
+/*
+ * find the transport connecting two endpoints
+ */
+struct rxrpc_transport *rxrpc_find_transport(struct rxrpc_local *local,
+                                            struct rxrpc_peer *peer)
+{
+       struct rxrpc_transport *trans;
+
+       _enter("{%u.%u.%u.%u+%hu},{%u.%u.%u.%u+%hu},",
+              NIPQUAD(local->srx.transport.sin.sin_addr),
+              ntohs(local->srx.transport.sin.sin_port),
+              NIPQUAD(peer->srx.transport.sin.sin_addr),
+              ntohs(peer->srx.transport.sin.sin_port));
+
+       /* search the transport list */
+       read_lock_bh(&rxrpc_transport_lock);
+
+       list_for_each_entry(trans, &rxrpc_transports, link) {
+               if (trans->local == local && trans->peer == peer)
+                       goto found_extant_transport;
+       }
+
+       read_unlock_bh(&rxrpc_transport_lock);
+       _leave(" = NULL");
+       return NULL;
+
+found_extant_transport:
+       atomic_inc(&trans->usage);
+       read_unlock_bh(&rxrpc_transport_lock);
+       _leave(" = %p", trans);
+       return trans;
+}
+
+/*
+ * release a transport session
+ */
+void rxrpc_put_transport(struct rxrpc_transport *trans)
+{
+       _enter("%p{u=%d}", trans, atomic_read(&trans->usage));
+
+       ASSERTCMP(atomic_read(&trans->usage), >, 0);
+
+       trans->put_time = xtime.tv_sec;
+       if (unlikely(atomic_dec_and_test(&trans->usage)))
+               _debug("zombie");
+               /* let the reaper determine the timeout to avoid a race with
+                * overextending the timeout if the reaper is running at the
+                * same time */
+               rxrpc_queue_delayed_work(&rxrpc_transport_reap, 0);
+       _leave("");
+}
+
+/*
+ * clean up a transport session
+ */
+static void rxrpc_cleanup_transport(struct rxrpc_transport *trans)
+{
+       _net("DESTROY TRANS %d", trans->debug_id);
+
+       rxrpc_purge_queue(&trans->error_queue);
+
+       rxrpc_put_local(trans->local);
+       rxrpc_put_peer(trans->peer);
+       kfree(trans);
+}
+
+/*
+ * reap dead transports that have passed their expiry date
+ */
+static void rxrpc_transport_reaper(struct work_struct *work)
+{
+       struct rxrpc_transport *trans, *_p;
+       unsigned long now, earliest, reap_time;
+
+       LIST_HEAD(graveyard);
+
+       _enter("");
+
+       now = xtime.tv_sec;
+       earliest = ULONG_MAX;
+
+       /* extract all the transports that have been dead too long */
+       write_lock_bh(&rxrpc_transport_lock);
+       list_for_each_entry_safe(trans, _p, &rxrpc_transports, link) {
+               _debug("reap TRANS %d { u=%d t=%ld }",
+                      trans->debug_id, atomic_read(&trans->usage),
+                      (long) now - (long) trans->put_time);
+
+               if (likely(atomic_read(&trans->usage) > 0))
+                       continue;
+
+               reap_time = trans->put_time + rxrpc_transport_timeout;
+               if (reap_time <= now)
+                       list_move_tail(&trans->link, &graveyard);
+               else if (reap_time < earliest)
+                       earliest = reap_time;
+       }
+       write_unlock_bh(&rxrpc_transport_lock);
+
+       if (earliest != ULONG_MAX) {
+               _debug("reschedule reaper %ld", (long) earliest - now);
+               ASSERTCMP(earliest, >, now);
+               rxrpc_queue_delayed_work(&rxrpc_transport_reap,
+                                        (earliest - now) * HZ);
+       }
+
+       /* then destroy all those pulled out */
+       while (!list_empty(&graveyard)) {
+               trans = list_entry(graveyard.next, struct rxrpc_transport,
+                                  link);
+               list_del_init(&trans->link);
+
+               ASSERTCMP(atomic_read(&trans->usage), ==, 0);
+               rxrpc_cleanup_transport(trans);
+       }
+
+       _leave("");
+}
+
+/*
+ * preemptively destroy all the transport session records rather than waiting
+ * for them to time out
+ */
+void __exit rxrpc_destroy_all_transports(void)
+{
+       _enter("");
+
+       rxrpc_transport_timeout = 0;
+       cancel_delayed_work(&rxrpc_transport_reap);
+       rxrpc_queue_delayed_work(&rxrpc_transport_reap, 0);
+
+       _leave("");
+}
diff --git a/net/rxrpc/call.c b/net/rxrpc/call.c
deleted file mode 100644 (file)
index d07122b..0000000
+++ /dev/null
@@ -1,2277 +0,0 @@
-/* call.c: Rx call routines
- *
- * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <rxrpc/rxrpc.h>
-#include <rxrpc/transport.h>
-#include <rxrpc/peer.h>
-#include <rxrpc/connection.h>
-#include <rxrpc/call.h>
-#include <rxrpc/message.h>
-#include "internal.h"
-
-__RXACCT_DECL(atomic_t rxrpc_call_count);
-__RXACCT_DECL(atomic_t rxrpc_message_count);
-
-LIST_HEAD(rxrpc_calls);
-DECLARE_RWSEM(rxrpc_calls_sem);
-
-unsigned rxrpc_call_rcv_timeout                        = HZ/3;
-static unsigned rxrpc_call_acks_timeout                = HZ/3;
-static unsigned rxrpc_call_dfr_ack_timeout     = HZ/20;
-static unsigned short rxrpc_call_max_resend    = HZ/10;
-
-const char *rxrpc_call_states[] = {
-       "COMPLETE",
-       "ERROR",
-       "SRVR_RCV_OPID",
-       "SRVR_RCV_ARGS",
-       "SRVR_GOT_ARGS",
-       "SRVR_SND_REPLY",
-       "SRVR_RCV_FINAL_ACK",
-       "CLNT_SND_ARGS",
-       "CLNT_RCV_REPLY",
-       "CLNT_GOT_REPLY"
-};
-
-const char *rxrpc_call_error_states[] = {
-       "NO_ERROR",
-       "LOCAL_ABORT",
-       "PEER_ABORT",
-       "LOCAL_ERROR",
-       "REMOTE_ERROR"
-};
-
-const char *rxrpc_pkts[] = {
-       "?00",
-       "data", "ack", "busy", "abort", "ackall", "chall", "resp", "debug",
-       "?09", "?10", "?11", "?12", "?13", "?14", "?15"
-};
-
-static const char *rxrpc_acks[] = {
-       "---", "REQ", "DUP", "SEQ", "WIN", "MEM", "PNG", "PNR", "DLY", "IDL",
-       "-?-"
-};
-
-static const char _acktype[] = "NA-";
-
-static void rxrpc_call_receive_packet(struct rxrpc_call *call);
-static void rxrpc_call_receive_data_packet(struct rxrpc_call *call,
-                                          struct rxrpc_message *msg);
-static void rxrpc_call_receive_ack_packet(struct rxrpc_call *call,
-                                         struct rxrpc_message *msg);
-static void rxrpc_call_definitively_ACK(struct rxrpc_call *call,
-                                       rxrpc_seq_t higest);
-static void rxrpc_call_resend(struct rxrpc_call *call, rxrpc_seq_t highest);
-static int __rxrpc_call_read_data(struct rxrpc_call *call);
-
-static int rxrpc_call_record_ACK(struct rxrpc_call *call,
-                                struct rxrpc_message *msg,
-                                rxrpc_seq_t seq,
-                                size_t count);
-
-static int rxrpc_call_flush(struct rxrpc_call *call);
-
-#define _state(call) \
-       _debug("[[[ state %s ]]]", rxrpc_call_states[call->app_call_state]);
-
-static void rxrpc_call_default_attn_func(struct rxrpc_call *call)
-{
-       wake_up(&call->waitq);
-}
-
-static void rxrpc_call_default_error_func(struct rxrpc_call *call)
-{
-       wake_up(&call->waitq);
-}
-
-static void rxrpc_call_default_aemap_func(struct rxrpc_call *call)
-{
-       switch (call->app_err_state) {
-       case RXRPC_ESTATE_LOCAL_ABORT:
-               call->app_abort_code = -call->app_errno;
-       case RXRPC_ESTATE_PEER_ABORT:
-               call->app_errno = -ECONNABORTED;
-       default:
-               break;
-       }
-}
-
-static void __rxrpc_call_acks_timeout(unsigned long _call)
-{
-       struct rxrpc_call *call = (struct rxrpc_call *) _call;
-
-       _debug("ACKS TIMEOUT %05lu", jiffies - call->cjif);
-
-       call->flags |= RXRPC_CALL_ACKS_TIMO;
-       rxrpc_krxiod_queue_call(call);
-}
-
-static void __rxrpc_call_rcv_timeout(unsigned long _call)
-{
-       struct rxrpc_call *call = (struct rxrpc_call *) _call;
-
-       _debug("RCV TIMEOUT %05lu", jiffies - call->cjif);
-
-       call->flags |= RXRPC_CALL_RCV_TIMO;
-       rxrpc_krxiod_queue_call(call);
-}
-
-static void __rxrpc_call_ackr_timeout(unsigned long _call)
-{
-       struct rxrpc_call *call = (struct rxrpc_call *) _call;
-
-       _debug("ACKR TIMEOUT %05lu",jiffies - call->cjif);
-
-       call->flags |= RXRPC_CALL_ACKR_TIMO;
-       rxrpc_krxiod_queue_call(call);
-}
-
-/*****************************************************************************/
-/*
- * calculate a timeout based on an RTT value
- */
-static inline unsigned long __rxrpc_rtt_based_timeout(struct rxrpc_call *call,
-                                                     unsigned long val)
-{
-       unsigned long expiry = call->conn->peer->rtt / (1000000 / HZ);
-
-       expiry += 10;
-       if (expiry < HZ / 25)
-               expiry = HZ / 25;
-       if (expiry > HZ)
-               expiry = HZ;
-
-       _leave(" = %lu jiffies", expiry);
-       return jiffies + expiry;
-} /* end __rxrpc_rtt_based_timeout() */
-
-/*****************************************************************************/
-/*
- * create a new call record
- */
-static inline int __rxrpc_create_call(struct rxrpc_connection *conn,
-                                     struct rxrpc_call **_call)
-{
-       struct rxrpc_call *call;
-
-       _enter("%p", conn);
-
-       /* allocate and initialise a call record */
-       call = (struct rxrpc_call *) get_zeroed_page(GFP_KERNEL);
-       if (!call) {
-               _leave(" ENOMEM");
-               return -ENOMEM;
-       }
-
-       atomic_set(&call->usage, 1);
-
-       init_waitqueue_head(&call->waitq);
-       spin_lock_init(&call->lock);
-       INIT_LIST_HEAD(&call->link);
-       INIT_LIST_HEAD(&call->acks_pendq);
-       INIT_LIST_HEAD(&call->rcv_receiveq);
-       INIT_LIST_HEAD(&call->rcv_krxiodq_lk);
-       INIT_LIST_HEAD(&call->app_readyq);
-       INIT_LIST_HEAD(&call->app_unreadyq);
-       INIT_LIST_HEAD(&call->app_link);
-       INIT_LIST_HEAD(&call->app_attn_link);
-
-       init_timer(&call->acks_timeout);
-       call->acks_timeout.data = (unsigned long) call;
-       call->acks_timeout.function = __rxrpc_call_acks_timeout;
-
-       init_timer(&call->rcv_timeout);
-       call->rcv_timeout.data = (unsigned long) call;
-       call->rcv_timeout.function = __rxrpc_call_rcv_timeout;
-
-       init_timer(&call->ackr_dfr_timo);
-       call->ackr_dfr_timo.data = (unsigned long) call;
-       call->ackr_dfr_timo.function = __rxrpc_call_ackr_timeout;
-
-       call->conn = conn;
-       call->ackr_win_bot = 1;
-       call->ackr_win_top = call->ackr_win_bot + RXRPC_CALL_ACK_WINDOW_SIZE - 1;
-       call->ackr_prev_seq = 0;
-       call->app_mark = RXRPC_APP_MARK_EOF;
-       call->app_attn_func = rxrpc_call_default_attn_func;
-       call->app_error_func = rxrpc_call_default_error_func;
-       call->app_aemap_func = rxrpc_call_default_aemap_func;
-       call->app_scr_alloc = call->app_scratch;
-
-       call->cjif = jiffies;
-
-       _leave(" = 0 (%p)", call);
-
-       *_call = call;
-
-       return 0;
-} /* end __rxrpc_create_call() */
-
-/*****************************************************************************/
-/*
- * create a new call record for outgoing calls
- */
-int rxrpc_create_call(struct rxrpc_connection *conn,
-                     rxrpc_call_attn_func_t attn,
-                     rxrpc_call_error_func_t error,
-                     rxrpc_call_aemap_func_t aemap,
-                     struct rxrpc_call **_call)
-{
-       DECLARE_WAITQUEUE(myself, current);
-
-       struct rxrpc_call *call;
-       int ret, cix, loop;
-
-       _enter("%p", conn);
-
-       /* allocate and initialise a call record */
-       ret = __rxrpc_create_call(conn, &call);
-       if (ret < 0) {
-               _leave(" = %d", ret);
-               return ret;
-       }
-
-       call->app_call_state = RXRPC_CSTATE_CLNT_SND_ARGS;
-       if (attn)
-               call->app_attn_func = attn;
-       if (error)
-               call->app_error_func = error;
-       if (aemap)
-               call->app_aemap_func = aemap;
-
-       _state(call);
-
-       spin_lock(&conn->lock);
-       set_current_state(TASK_INTERRUPTIBLE);
-       add_wait_queue(&conn->chanwait, &myself);
-
- try_again:
-       /* try to find an unused channel */
-       for (cix = 0; cix < 4; cix++)
-               if (!conn->channels[cix])
-                       goto obtained_chan;
-
-       /* no free channels - wait for one to become available */
-       ret = -EINTR;
-       if (signal_pending(current))
-               goto error_unwait;
-
-       spin_unlock(&conn->lock);
-
-       schedule();
-       set_current_state(TASK_INTERRUPTIBLE);
-
-       spin_lock(&conn->lock);
-       goto try_again;
-
-       /* got a channel - now attach to the connection */
- obtained_chan:
-       remove_wait_queue(&conn->chanwait, &myself);
-       set_current_state(TASK_RUNNING);
-
-       /* concoct a unique call number */
- next_callid:
-       call->call_id = htonl(++conn->call_counter);
-       for (loop = 0; loop < 4; loop++)
-               if (conn->channels[loop] &&
-                   conn->channels[loop]->call_id == call->call_id)
-                       goto next_callid;
-
-       rxrpc_get_connection(conn);
-       conn->channels[cix] = call; /* assign _after_ done callid check loop */
-       do_gettimeofday(&conn->atime);
-       call->chan_ix = htonl(cix);
-
-       spin_unlock(&conn->lock);
-
-       down_write(&rxrpc_calls_sem);
-       list_add_tail(&call->call_link, &rxrpc_calls);
-       up_write(&rxrpc_calls_sem);
-
-       __RXACCT(atomic_inc(&rxrpc_call_count));
-       *_call = call;
-
-       _leave(" = 0 (call=%p cix=%u)", call, cix);
-       return 0;
-
- error_unwait:
-       remove_wait_queue(&conn->chanwait, &myself);
-       set_current_state(TASK_RUNNING);
-       spin_unlock(&conn->lock);
-
-       free_page((unsigned long) call);
-       _leave(" = %d", ret);
-       return ret;
-} /* end rxrpc_create_call() */
-
-/*****************************************************************************/
-/*
- * create a new call record for incoming calls
- */
-int rxrpc_incoming_call(struct rxrpc_connection *conn,
-                       struct rxrpc_message *msg,
-                       struct rxrpc_call **_call)
-{
-       struct rxrpc_call *call;
-       unsigned cix;
-       int ret;
-
-       cix = ntohl(msg->hdr.cid) & RXRPC_CHANNELMASK;
-
-       _enter("%p,%u,%u", conn, ntohl(msg->hdr.callNumber), cix);
-
-       /* allocate and initialise a call record */
-       ret = __rxrpc_create_call(conn, &call);
-       if (ret < 0) {
-               _leave(" = %d", ret);
-               return ret;
-       }
-
-       call->pkt_rcv_count = 1;
-       call->app_call_state = RXRPC_CSTATE_SRVR_RCV_OPID;
-       call->app_mark = sizeof(uint32_t);
-
-       _state(call);
-
-       /* attach to the connection */
-       ret = -EBUSY;
-       call->chan_ix = htonl(cix);
-       call->call_id = msg->hdr.callNumber;
-
-       spin_lock(&conn->lock);
-
-       if (!conn->channels[cix] ||
-           conn->channels[cix]->app_call_state == RXRPC_CSTATE_COMPLETE ||
-           conn->channels[cix]->app_call_state == RXRPC_CSTATE_ERROR
-           ) {
-               conn->channels[cix] = call;
-               rxrpc_get_connection(conn);
-               ret = 0;
-       }
-
-       spin_unlock(&conn->lock);
-
-       if (ret < 0) {
-               free_page((unsigned long) call);
-               call = NULL;
-       }
-
-       if (ret == 0) {
-               down_write(&rxrpc_calls_sem);
-               list_add_tail(&call->call_link, &rxrpc_calls);
-               up_write(&rxrpc_calls_sem);
-               __RXACCT(atomic_inc(&rxrpc_call_count));
-               *_call = call;
-       }
-
-       _leave(" = %d [%p]", ret, call);
-       return ret;
-} /* end rxrpc_incoming_call() */
-
-/*****************************************************************************/
-/*
- * free a call record
- */
-void rxrpc_put_call(struct rxrpc_call *call)
-{
-       struct rxrpc_connection *conn = call->conn;
-       struct rxrpc_message *msg;
-
-       _enter("%p{u=%d}",call,atomic_read(&call->usage));
-
-       /* sanity check */
-       if (atomic_read(&call->usage) <= 0)
-               BUG();
-
-       /* to prevent a race, the decrement and the de-list must be effectively
-        * atomic */
-       spin_lock(&conn->lock);
-       if (likely(!atomic_dec_and_test(&call->usage))) {
-               spin_unlock(&conn->lock);
-               _leave("");
-               return;
-       }
-
-       if (conn->channels[ntohl(call->chan_ix)] == call)
-               conn->channels[ntohl(call->chan_ix)] = NULL;
-
-       spin_unlock(&conn->lock);
-
-       wake_up(&conn->chanwait);
-
-       rxrpc_put_connection(conn);
-
-       /* clear the timers and dequeue from krxiod */
-       del_timer_sync(&call->acks_timeout);
-       del_timer_sync(&call->rcv_timeout);
-       del_timer_sync(&call->ackr_dfr_timo);
-
-       rxrpc_krxiod_dequeue_call(call);
-
-       /* clean up the contents of the struct */
-       if (call->snd_nextmsg)
-               rxrpc_put_message(call->snd_nextmsg);
-
-       if (call->snd_ping)
-               rxrpc_put_message(call->snd_ping);
-
-       while (!list_empty(&call->acks_pendq)) {
-               msg = list_entry(call->acks_pendq.next,
-                                struct rxrpc_message, link);
-               list_del(&msg->link);
-               rxrpc_put_message(msg);
-       }
-
-       while (!list_empty(&call->rcv_receiveq)) {
-               msg = list_entry(call->rcv_receiveq.next,
-                                struct rxrpc_message, link);
-               list_del(&msg->link);
-               rxrpc_put_message(msg);
-       }
-
-       while (!list_empty(&call->app_readyq)) {
-               msg = list_entry(call->app_readyq.next,
-                                struct rxrpc_message, link);
-               list_del(&msg->link);
-               rxrpc_put_message(msg);
-       }
-
-       while (!list_empty(&call->app_unreadyq)) {
-               msg = list_entry(call->app_unreadyq.next,
-                                struct rxrpc_message, link);
-               list_del(&msg->link);
-               rxrpc_put_message(msg);
-       }
-
-       module_put(call->owner);
-
-       down_write(&rxrpc_calls_sem);
-       list_del(&call->call_link);
-       up_write(&rxrpc_calls_sem);
-
-       __RXACCT(atomic_dec(&rxrpc_call_count));
-       free_page((unsigned long) call);
-
-       _leave(" [destroyed]");
-} /* end rxrpc_put_call() */
-
-/*****************************************************************************/
-/*
- * actually generate a normal ACK
- */
-static inline int __rxrpc_call_gen_normal_ACK(struct rxrpc_call *call,
-                                             rxrpc_seq_t seq)
-{
-       struct rxrpc_message *msg;
-       struct kvec diov[3];
-       __be32 aux[4];
-       int delta, ret;
-
-       /* ACKs default to DELAY */
-       if (!call->ackr.reason)
-               call->ackr.reason = RXRPC_ACK_DELAY;
-
-       _proto("Rx %05lu Sending ACK { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
-              jiffies - call->cjif,
-              ntohs(call->ackr.maxSkew),
-              ntohl(call->ackr.firstPacket),
-              ntohl(call->ackr.previousPacket),
-              ntohl(call->ackr.serial),
-              rxrpc_acks[call->ackr.reason],
-              call->ackr.nAcks);
-
-       aux[0] = htonl(call->conn->peer->if_mtu);       /* interface MTU */
-       aux[1] = htonl(1444);                           /* max MTU */
-       aux[2] = htonl(16);                             /* rwind */
-       aux[3] = htonl(4);                              /* max packets */
-
-       diov[0].iov_len  = sizeof(struct rxrpc_ackpacket);
-       diov[0].iov_base = &call->ackr;
-       diov[1].iov_len  = call->ackr_pend_cnt + 3;
-       diov[1].iov_base = call->ackr_array;
-       diov[2].iov_len  = sizeof(aux);
-       diov[2].iov_base = &aux;
-
-       /* build and send the message */
-       ret = rxrpc_conn_newmsg(call->conn,call, RXRPC_PACKET_TYPE_ACK,
-                               3, diov, GFP_KERNEL, &msg);
-       if (ret < 0)
-               goto out;
-
-       msg->seq = seq;
-       msg->hdr.seq = htonl(seq);
-       msg->hdr.flags |= RXRPC_SLOW_START_OK;
-
-       ret = rxrpc_conn_sendmsg(call->conn, msg);
-       rxrpc_put_message(msg);
-       if (ret < 0)
-               goto out;
-       call->pkt_snd_count++;
-
-       /* count how many actual ACKs there were at the front */
-       for (delta = 0; delta < call->ackr_pend_cnt; delta++)
-               if (call->ackr_array[delta] != RXRPC_ACK_TYPE_ACK)
-                       break;
-
-       call->ackr_pend_cnt -= delta; /* all ACK'd to this point */
-
-       /* crank the ACK window around */
-       if (delta == 0) {
-               /* un-ACK'd window */
-       }
-       else if (delta < RXRPC_CALL_ACK_WINDOW_SIZE) {
-               /* partially ACK'd window
-                * - shuffle down to avoid losing out-of-sequence packets
-                */
-               call->ackr_win_bot += delta;
-               call->ackr_win_top += delta;
-
-               memmove(&call->ackr_array[0],
-                       &call->ackr_array[delta],
-                       call->ackr_pend_cnt);
-
-               memset(&call->ackr_array[call->ackr_pend_cnt],
-                      RXRPC_ACK_TYPE_NACK,
-                      sizeof(call->ackr_array) - call->ackr_pend_cnt);
-       }
-       else {
-               /* fully ACK'd window
-                * - just clear the whole thing
-                */
-               memset(&call->ackr_array,
-                      RXRPC_ACK_TYPE_NACK,
-                      sizeof(call->ackr_array));
-       }
-
-       /* clear this ACK */
-       memset(&call->ackr, 0, sizeof(call->ackr));
-
- out:
-       if (!call->app_call_state)
-               printk("___ STATE 0 ___\n");
-       return ret;
-} /* end __rxrpc_call_gen_normal_ACK() */
-
-/*****************************************************************************/
-/*
- * note the reception of a packet in the call's ACK records and generate an
- * appropriate ACK packet if necessary
- * - returns 0 if packet should be processed, 1 if packet should be ignored
- *   and -ve on an error
- */
-static int rxrpc_call_generate_ACK(struct rxrpc_call *call,
-                                  struct rxrpc_header *hdr,
-                                  struct rxrpc_ackpacket *ack)
-{
-       struct rxrpc_message *msg;
-       rxrpc_seq_t seq;
-       unsigned offset;
-       int ret = 0, err;
-       u8 special_ACK, do_ACK, force;
-
-       _enter("%p,%p { seq=%d tp=%d fl=%02x }",
-              call, hdr, ntohl(hdr->seq), hdr->type, hdr->flags);
-
-       seq = ntohl(hdr->seq);
-       offset = seq - call->ackr_win_bot;
-       do_ACK = RXRPC_ACK_DELAY;
-       special_ACK = 0;
-       force = (seq == 1);
-
-       if (call->ackr_high_seq < seq)
-               call->ackr_high_seq = seq;
-
-       /* deal with generation of obvious special ACKs first */
-       if (ack && ack->reason == RXRPC_ACK_PING) {
-               special_ACK = RXRPC_ACK_PING_RESPONSE;
-               ret = 1;
-               goto gen_ACK;
-       }
-
-       if (seq < call->ackr_win_bot) {
-               special_ACK = RXRPC_ACK_DUPLICATE;
-               ret = 1;
-               goto gen_ACK;
-       }
-
-       if (seq >= call->ackr_win_top) {
-               special_ACK = RXRPC_ACK_EXCEEDS_WINDOW;
-               ret = 1;
-               goto gen_ACK;
-       }
-
-       if (call->ackr_array[offset] != RXRPC_ACK_TYPE_NACK) {
-               special_ACK = RXRPC_ACK_DUPLICATE;
-               ret = 1;
-               goto gen_ACK;
-       }
-
-       /* okay... it's a normal data packet inside the ACK window */
-       call->ackr_array[offset] = RXRPC_ACK_TYPE_ACK;
-
-       if (offset < call->ackr_pend_cnt) {
-       }
-       else if (offset > call->ackr_pend_cnt) {
-               do_ACK = RXRPC_ACK_OUT_OF_SEQUENCE;
-               call->ackr_pend_cnt = offset;
-               goto gen_ACK;
-       }
-
-       if (hdr->flags & RXRPC_REQUEST_ACK) {
-               do_ACK = RXRPC_ACK_REQUESTED;
-       }
-
-       /* generate an ACK on the final packet of a reply just received */
-       if (hdr->flags & RXRPC_LAST_PACKET) {
-               if (call->conn->out_clientflag)
-                       force = 1;
-       }
-       else if (!(hdr->flags & RXRPC_MORE_PACKETS)) {
-               do_ACK = RXRPC_ACK_REQUESTED;
-       }
-
-       /* re-ACK packets previously received out-of-order */
-       for (offset++; offset < RXRPC_CALL_ACK_WINDOW_SIZE; offset++)
-               if (call->ackr_array[offset] != RXRPC_ACK_TYPE_ACK)
-                       break;
-
-       call->ackr_pend_cnt = offset;
-
-       /* generate an ACK if we fill up the window */
-       if (call->ackr_pend_cnt >= RXRPC_CALL_ACK_WINDOW_SIZE)
-               force = 1;
-
- gen_ACK:
-       _debug("%05lu ACKs pend=%u norm=%s special=%s%s",
-              jiffies - call->cjif,
-              call->ackr_pend_cnt,
-              rxrpc_acks[do_ACK],
-              rxrpc_acks[special_ACK],
-              force ? " immediate" :
-              do_ACK == RXRPC_ACK_REQUESTED ? " merge-req" :
-              hdr->flags & RXRPC_LAST_PACKET ? " finalise" :
-              " defer"
-              );
-
-       /* send any pending normal ACKs if need be */
-       if (call->ackr_pend_cnt > 0) {
-               /* fill out the appropriate form */
-               call->ackr.bufferSpace  = htons(RXRPC_CALL_ACK_WINDOW_SIZE);
-               call->ackr.maxSkew      = htons(min(call->ackr_high_seq - seq,
-                                                   65535U));
-               call->ackr.firstPacket  = htonl(call->ackr_win_bot);
-               call->ackr.previousPacket = call->ackr_prev_seq;
-               call->ackr.serial       = hdr->serial;
-               call->ackr.nAcks        = call->ackr_pend_cnt;
-
-               if (do_ACK == RXRPC_ACK_REQUESTED)
-                       call->ackr.reason = do_ACK;
-
-               /* generate the ACK immediately if necessary */
-               if (special_ACK || force) {
-                       err = __rxrpc_call_gen_normal_ACK(
-                               call, do_ACK == RXRPC_ACK_DELAY ? 0 : seq);
-                       if (err < 0) {
-                               ret = err;
-                               goto out;
-                       }
-               }
-       }
-
-       if (call->ackr.reason == RXRPC_ACK_REQUESTED)
-               call->ackr_dfr_seq = seq;
-
-       /* start the ACK timer if not running if there are any pending deferred
-        * ACKs */
-       if (call->ackr_pend_cnt > 0 &&
-           call->ackr.reason != RXRPC_ACK_REQUESTED &&
-           !timer_pending(&call->ackr_dfr_timo)
-           ) {
-               unsigned long timo;
-
-               timo = rxrpc_call_dfr_ack_timeout + jiffies;
-
-               _debug("START ACKR TIMER for cj=%lu", timo - call->cjif);
-
-               spin_lock(&call->lock);
-               mod_timer(&call->ackr_dfr_timo, timo);
-               spin_unlock(&call->lock);
-       }
-       else if ((call->ackr_pend_cnt == 0 ||
-                 call->ackr.reason == RXRPC_ACK_REQUESTED) &&
-                timer_pending(&call->ackr_dfr_timo)
-                ) {
-               /* stop timer if no pending ACKs */
-               _debug("CLEAR ACKR TIMER");
-               del_timer_sync(&call->ackr_dfr_timo);
-       }
-
-       /* send a special ACK if one is required */
-       if (special_ACK) {
-               struct rxrpc_ackpacket ack;
-               struct kvec diov[2];
-               uint8_t acks[1] = { RXRPC_ACK_TYPE_ACK };
-
-               /* fill out the appropriate form */
-               ack.bufferSpace = htons(RXRPC_CALL_ACK_WINDOW_SIZE);
-               ack.maxSkew     = htons(min(call->ackr_high_seq - seq,
-                                           65535U));
-               ack.firstPacket = htonl(call->ackr_win_bot);
-               ack.previousPacket = call->ackr_prev_seq;
-               ack.serial      = hdr->serial;
-               ack.reason      = special_ACK;
-               ack.nAcks       = 0;
-
-               _proto("Rx Sending s-ACK"
-                      " { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
-                      ntohs(ack.maxSkew),
-                      ntohl(ack.firstPacket),
-                      ntohl(ack.previousPacket),
-                      ntohl(ack.serial),
-                      rxrpc_acks[ack.reason],
-                      ack.nAcks);
-
-               diov[0].iov_len  = sizeof(struct rxrpc_ackpacket);
-               diov[0].iov_base = &ack;
-               diov[1].iov_len  = sizeof(acks);
-               diov[1].iov_base = acks;
-
-               /* build and send the message */
-               err = rxrpc_conn_newmsg(call->conn,call, RXRPC_PACKET_TYPE_ACK,
-                                       hdr->seq ? 2 : 1, diov,
-                                       GFP_KERNEL,
-                                       &msg);
-               if (err < 0) {
-                       ret = err;
-                       goto out;
-               }
-
-               msg->seq = seq;
-               msg->hdr.seq = htonl(seq);
-               msg->hdr.flags |= RXRPC_SLOW_START_OK;
-
-               err = rxrpc_conn_sendmsg(call->conn, msg);
-               rxrpc_put_message(msg);
-               if (err < 0) {
-                       ret = err;
-                       goto out;
-               }
-               call->pkt_snd_count++;
-       }
-
- out:
-       if (hdr->seq)
-               call->ackr_prev_seq = hdr->seq;
-
-       _leave(" = %d", ret);
-       return ret;
-} /* end rxrpc_call_generate_ACK() */
-
-/*****************************************************************************/
-/*
- * handle work to be done on a call
- * - includes packet reception and timeout processing
- */
-void rxrpc_call_do_stuff(struct rxrpc_call *call)
-{
-       _enter("%p{flags=%lx}", call, call->flags);
-
-       /* handle packet reception */
-       if (call->flags & RXRPC_CALL_RCV_PKT) {
-               _debug("- receive packet");
-               call->flags &= ~RXRPC_CALL_RCV_PKT;
-               rxrpc_call_receive_packet(call);
-       }
-
-       /* handle overdue ACKs */
-       if (call->flags & RXRPC_CALL_ACKS_TIMO) {
-               _debug("- overdue ACK timeout");
-               call->flags &= ~RXRPC_CALL_ACKS_TIMO;
-               rxrpc_call_resend(call, call->snd_seq_count);
-       }
-
-       /* handle lack of reception */
-       if (call->flags & RXRPC_CALL_RCV_TIMO) {
-               _debug("- reception timeout");
-               call->flags &= ~RXRPC_CALL_RCV_TIMO;
-               rxrpc_call_abort(call, -EIO);
-       }
-
-       /* handle deferred ACKs */
-       if (call->flags & RXRPC_CALL_ACKR_TIMO ||
-           (call->ackr.nAcks > 0 && call->ackr.reason == RXRPC_ACK_REQUESTED)
-           ) {
-               _debug("- deferred ACK timeout: cj=%05lu r=%s n=%u",
-                      jiffies - call->cjif,
-                      rxrpc_acks[call->ackr.reason],
-                      call->ackr.nAcks);
-
-               call->flags &= ~RXRPC_CALL_ACKR_TIMO;
-
-               if (call->ackr.nAcks > 0 &&
-                   call->app_call_state != RXRPC_CSTATE_ERROR) {
-                       /* generate ACK */
-                       __rxrpc_call_gen_normal_ACK(call, call->ackr_dfr_seq);
-                       call->ackr_dfr_seq = 0;
-               }
-       }
-
-       _leave("");
-
-} /* end rxrpc_call_do_stuff() */
-
-/*****************************************************************************/
-/*
- * send an abort message at call or connection level
- * - must be called with call->lock held
- * - the supplied error code is sent as the packet data
- */
-static int __rxrpc_call_abort(struct rxrpc_call *call, int errno)
-{
-       struct rxrpc_connection *conn = call->conn;
-       struct rxrpc_message *msg;
-       struct kvec diov[1];
-       int ret;
-       __be32 _error;
-
-       _enter("%p{%08x},%p{%d},%d",
-              conn, ntohl(conn->conn_id), call, ntohl(call->call_id), errno);
-
-       /* if this call is already aborted, then just wake up any waiters */
-       if (call->app_call_state == RXRPC_CSTATE_ERROR) {
-               spin_unlock(&call->lock);
-               call->app_error_func(call);
-               _leave(" = 0");
-               return 0;
-       }
-
-       rxrpc_get_call(call);
-
-       /* change the state _with_ the lock still held */
-       call->app_call_state    = RXRPC_CSTATE_ERROR;
-       call->app_err_state     = RXRPC_ESTATE_LOCAL_ABORT;
-       call->app_errno         = errno;
-       call->app_mark          = RXRPC_APP_MARK_EOF;
-       call->app_read_buf      = NULL;
-       call->app_async_read    = 0;
-
-       _state(call);
-
-       /* ask the app to translate the error code */
-       call->app_aemap_func(call);
-
-       spin_unlock(&call->lock);
-
-       /* flush any outstanding ACKs */
-       del_timer_sync(&call->acks_timeout);
-       del_timer_sync(&call->rcv_timeout);
-       del_timer_sync(&call->ackr_dfr_timo);
-
-       if (rxrpc_call_is_ack_pending(call))
-               __rxrpc_call_gen_normal_ACK(call, 0);
-
-       /* send the abort packet only if we actually traded some other
-        * packets */
-       ret = 0;
-       if (call->pkt_snd_count || call->pkt_rcv_count) {
-               /* actually send the abort */
-               _proto("Rx Sending Call ABORT { data=%d }",
-                      call->app_abort_code);
-
-               _error = htonl(call->app_abort_code);
-
-               diov[0].iov_len  = sizeof(_error);
-               diov[0].iov_base = &_error;
-
-               ret = rxrpc_conn_newmsg(conn, call, RXRPC_PACKET_TYPE_ABORT,
-                                       1, diov, GFP_KERNEL, &msg);
-               if (ret == 0) {
-                       ret = rxrpc_conn_sendmsg(conn, msg);
-                       rxrpc_put_message(msg);
-               }
-       }
-
-       /* tell the app layer to let go */
-       call->app_error_func(call);
-
-       rxrpc_put_call(call);
-
-       _leave(" = %d", ret);
-       return ret;
-} /* end __rxrpc_call_abort() */
-
-/*****************************************************************************/
-/*
- * send an abort message at call or connection level
- * - the supplied error code is sent as the packet data
- */
-int rxrpc_call_abort(struct rxrpc_call *call, int error)
-{
-       spin_lock(&call->lock);
-
-       return __rxrpc_call_abort(call, error);
-
-} /* end rxrpc_call_abort() */
-
-/*****************************************************************************/
-/*
- * process packets waiting for this call
- */
-static void rxrpc_call_receive_packet(struct rxrpc_call *call)
-{
-       struct rxrpc_message *msg;
-       struct list_head *_p;
-
-       _enter("%p", call);
-
-       rxrpc_get_call(call); /* must not go away too soon if aborted by
-                              * app-layer */
-
-       while (!list_empty(&call->rcv_receiveq)) {
-               /* try to get next packet */
-               _p = NULL;
-               spin_lock(&call->lock);
-               if (!list_empty(&call->rcv_receiveq)) {
-                       _p = call->rcv_receiveq.next;
-                       list_del_init(_p);
-               }
-               spin_unlock(&call->lock);
-
-               if (!_p)
-                       break;
-
-               msg = list_entry(_p, struct rxrpc_message, link);
-
-               _proto("Rx %05lu Received %s packet (%%%u,#%u,%c%c%c%c%c)",
-                      jiffies - call->cjif,
-                      rxrpc_pkts[msg->hdr.type],
-                      ntohl(msg->hdr.serial),
-                      msg->seq,
-                      msg->hdr.flags & RXRPC_JUMBO_PACKET      ? 'j' : '-',
-                      msg->hdr.flags & RXRPC_MORE_PACKETS      ? 'm' : '-',
-                      msg->hdr.flags & RXRPC_LAST_PACKET       ? 'l' : '-',
-                      msg->hdr.flags & RXRPC_REQUEST_ACK       ? 'r' : '-',
-                      msg->hdr.flags & RXRPC_CLIENT_INITIATED  ? 'C' : 'S'
-                      );
-
-               switch (msg->hdr.type) {
-                       /* deal with data packets */
-               case RXRPC_PACKET_TYPE_DATA:
-                       /* ACK the packet if necessary */
-                       switch (rxrpc_call_generate_ACK(call, &msg->hdr,
-                                                       NULL)) {
-                       case 0: /* useful packet */
-                               rxrpc_call_receive_data_packet(call, msg);
-                               break;
-                       case 1: /* duplicate or out-of-window packet */
-                               break;
-                       default:
-                               rxrpc_put_message(msg);
-                               goto out;
-                       }
-                       break;
-
-                       /* deal with ACK packets */
-               case RXRPC_PACKET_TYPE_ACK:
-                       rxrpc_call_receive_ack_packet(call, msg);
-                       break;
-
-                       /* deal with abort packets */
-               case RXRPC_PACKET_TYPE_ABORT: {
-                       __be32 _dbuf, *dp;
-
-                       dp = skb_header_pointer(msg->pkt, msg->offset,
-                                               sizeof(_dbuf), &_dbuf);
-                       if (dp == NULL)
-                               printk("Rx Received short ABORT packet\n");
-
-                       _proto("Rx Received Call ABORT { data=%d }",
-                              (dp ? ntohl(*dp) : 0));
-
-                       spin_lock(&call->lock);
-                       call->app_call_state    = RXRPC_CSTATE_ERROR;
-                       call->app_err_state     = RXRPC_ESTATE_PEER_ABORT;
-                       call->app_abort_code    = (dp ? ntohl(*dp) : 0);
-                       call->app_errno         = -ECONNABORTED;
-                       call->app_mark          = RXRPC_APP_MARK_EOF;
-                       call->app_read_buf      = NULL;
-                       call->app_async_read    = 0;
-
-                       /* ask the app to translate the error code */
-                       call->app_aemap_func(call);
-                       _state(call);
-                       spin_unlock(&call->lock);
-                       call->app_error_func(call);
-                       break;
-               }
-               default:
-                       /* deal with other packet types */
-                       _proto("Rx Unsupported packet type %u (#%u)",
-                              msg->hdr.type, msg->seq);
-                       break;
-               }
-
-               rxrpc_put_message(msg);
-       }
-
- out:
-       rxrpc_put_call(call);
-       _leave("");
-} /* end rxrpc_call_receive_packet() */
-
-/*****************************************************************************/
-/*
- * process next data packet
- * - as the next data packet arrives:
- *   - it is queued on app_readyq _if_ it is the next one expected
- *     (app_ready_seq+1)
- *   - it is queued on app_unreadyq _if_ it is not the next one expected
- *   - if a packet placed on app_readyq completely fills a hole leading up to
- *     the first packet on app_unreadyq, then packets now in sequence are
- *     tranferred to app_readyq
- * - the application layer can only see packets on app_readyq
- *   (app_ready_qty bytes)
- * - the application layer is prodded every time a new packet arrives
- */
-static void rxrpc_call_receive_data_packet(struct rxrpc_call *call,
-                                          struct rxrpc_message *msg)
-{
-       const struct rxrpc_operation *optbl, *op;
-       struct rxrpc_message *pmsg;
-       struct list_head *_p;
-       int ret, lo, hi, rmtimo;
-       __be32 opid;
-
-       _enter("%p{%u},%p{%u}", call, ntohl(call->call_id), msg, msg->seq);
-
-       rxrpc_get_message(msg);
-
-       /* add to the unready queue if we'd have to create a hole in the ready
-        * queue otherwise */
-       if (msg->seq != call->app_ready_seq + 1) {
-               _debug("Call add packet %d to unreadyq", msg->seq);
-
-               /* insert in seq order */
-               list_for_each(_p, &call->app_unreadyq) {
-                       pmsg = list_entry(_p, struct rxrpc_message, link);
-                       if (pmsg->seq > msg->seq)
-                               break;
-               }
-
-               list_add_tail(&msg->link, _p);
-
-               _leave(" [unreadyq]");
-               return;
-       }
-
-       /* next in sequence - simply append into the call's ready queue */
-       _debug("Call add packet %d to readyq (+%Zd => %Zd bytes)",
-              msg->seq, msg->dsize, call->app_ready_qty);
-
-       spin_lock(&call->lock);
-       call->app_ready_seq = msg->seq;
-       call->app_ready_qty += msg->dsize;
-       list_add_tail(&msg->link, &call->app_readyq);
-
-       /* move unready packets to the readyq if we got rid of a hole */
-       while (!list_empty(&call->app_unreadyq)) {
-               pmsg = list_entry(call->app_unreadyq.next,
-                                 struct rxrpc_message, link);
-
-               if (pmsg->seq != call->app_ready_seq + 1)
-                       break;
-
-               /* next in sequence - just move list-to-list */
-               _debug("Call transfer packet %d to readyq (+%Zd => %Zd bytes)",
-                      pmsg->seq, pmsg->dsize, call->app_ready_qty);
-
-               call->app_ready_seq = pmsg->seq;
-               call->app_ready_qty += pmsg->dsize;
-               list_move_tail(&pmsg->link, &call->app_readyq);
-       }
-
-       /* see if we've got the last packet yet */
-       if (!list_empty(&call->app_readyq)) {
-               pmsg = list_entry(call->app_readyq.prev,
-                                 struct rxrpc_message, link);
-               if (pmsg->hdr.flags & RXRPC_LAST_PACKET) {
-                       call->app_last_rcv = 1;
-                       _debug("Last packet on readyq");
-               }
-       }
-
-       switch (call->app_call_state) {
-               /* do nothing if call already aborted */
-       case RXRPC_CSTATE_ERROR:
-               spin_unlock(&call->lock);
-               _leave(" [error]");
-               return;
-
-               /* extract the operation ID from an incoming call if that's not
-                * yet been done */
-       case RXRPC_CSTATE_SRVR_RCV_OPID:
-               spin_unlock(&call->lock);
-
-               /* handle as yet insufficient data for the operation ID */
-               if (call->app_ready_qty < 4) {
-                       if (call->app_last_rcv)
-                               /* trouble - last packet seen */
-                               rxrpc_call_abort(call, -EINVAL);
-
-                       _leave("");
-                       return;
-               }
-
-               /* pull the operation ID out of the buffer */
-               ret = rxrpc_call_read_data(call, &opid, sizeof(opid), 0);
-               if (ret < 0) {
-                       printk("Unexpected error from read-data: %d\n", ret);
-                       if (call->app_call_state != RXRPC_CSTATE_ERROR)
-                               rxrpc_call_abort(call, ret);
-                       _leave("");
-                       return;
-               }
-               call->app_opcode = ntohl(opid);
-
-               /* locate the operation in the available ops table */
-               optbl = call->conn->service->ops_begin;
-               lo = 0;
-               hi = call->conn->service->ops_end - optbl;
-
-               while (lo < hi) {
-                       int mid = (hi + lo) / 2;
-                       op = &optbl[mid];
-                       if (call->app_opcode == op->id)
-                               goto found_op;
-                       if (call->app_opcode > op->id)
-                               lo = mid + 1;
-                       else
-                               hi = mid;
-               }
-
-               /* search failed */
-               kproto("Rx Client requested operation %d from %s service",
-                      call->app_opcode, call->conn->service->name);
-               rxrpc_call_abort(call, -EINVAL);
-               _leave(" [inval]");
-               return;
-
-       found_op:
-               _proto("Rx Client requested operation %s from %s service",
-                      op->name, call->conn->service->name);
-
-               /* we're now waiting for the argument block (unless the call
-                * was aborted) */
-               spin_lock(&call->lock);
-               if (call->app_call_state == RXRPC_CSTATE_SRVR_RCV_OPID ||
-                   call->app_call_state == RXRPC_CSTATE_SRVR_SND_REPLY) {
-                       if (!call->app_last_rcv)
-                               call->app_call_state =
-                                       RXRPC_CSTATE_SRVR_RCV_ARGS;
-                       else if (call->app_ready_qty > 0)
-                               call->app_call_state =
-                                       RXRPC_CSTATE_SRVR_GOT_ARGS;
-                       else
-                               call->app_call_state =
-                                       RXRPC_CSTATE_SRVR_SND_REPLY;
-                       call->app_mark = op->asize;
-                       call->app_user = op->user;
-               }
-               spin_unlock(&call->lock);
-
-               _state(call);
-               break;
-
-       case RXRPC_CSTATE_SRVR_RCV_ARGS:
-               /* change state if just received last packet of arg block */
-               if (call->app_last_rcv)
-                       call->app_call_state = RXRPC_CSTATE_SRVR_GOT_ARGS;
-               spin_unlock(&call->lock);
-
-               _state(call);
-               break;
-
-       case RXRPC_CSTATE_CLNT_RCV_REPLY:
-               /* change state if just received last packet of reply block */
-               rmtimo = 0;
-               if (call->app_last_rcv) {
-                       call->app_call_state = RXRPC_CSTATE_CLNT_GOT_REPLY;
-                       rmtimo = 1;
-               }
-               spin_unlock(&call->lock);
-
-               if (rmtimo) {
-                       del_timer_sync(&call->acks_timeout);
-                       del_timer_sync(&call->rcv_timeout);
-                       del_timer_sync(&call->ackr_dfr_timo);
-               }
-
-               _state(call);
-               break;
-
-       default:
-               /* deal with data reception in an unexpected state */
-               printk("Unexpected state [[[ %u ]]]\n", call->app_call_state);
-               __rxrpc_call_abort(call, -EBADMSG);
-               _leave("");
-               return;
-       }
-
-       if (call->app_call_state == RXRPC_CSTATE_CLNT_RCV_REPLY &&
-           call->app_last_rcv)
-               BUG();
-
-       /* otherwise just invoke the data function whenever we can satisfy its desire for more
-        * data
-        */
-       _proto("Rx Received Op Data: st=%u qty=%Zu mk=%Zu%s",
-              call->app_call_state, call->app_ready_qty, call->app_mark,
-              call->app_last_rcv ? " last-rcvd" : "");
-
-       spin_lock(&call->lock);
-
-       ret = __rxrpc_call_read_data(call);
-       switch (ret) {
-       case 0:
-               spin_unlock(&call->lock);
-               call->app_attn_func(call);
-               break;
-       case -EAGAIN:
-               spin_unlock(&call->lock);
-               break;
-       case -ECONNABORTED:
-               spin_unlock(&call->lock);
-               break;
-       default:
-               __rxrpc_call_abort(call, ret);
-               break;
-       }
-
-       _state(call);
-
-       _leave("");
-
-} /* end rxrpc_call_receive_data_packet() */
-
-/*****************************************************************************/
-/*
- * received an ACK packet
- */
-static void rxrpc_call_receive_ack_packet(struct rxrpc_call *call,
-                                         struct rxrpc_message *msg)
-{
-       struct rxrpc_ackpacket _ack, *ap;
-       rxrpc_serial_net_t serial;
-       rxrpc_seq_t seq;
-       int ret;
-
-       _enter("%p{%u},%p{%u}", call, ntohl(call->call_id), msg, msg->seq);
-
-       /* extract the basic ACK record */
-       ap = skb_header_pointer(msg->pkt, msg->offset, sizeof(_ack), &_ack);
-       if (ap == NULL) {
-               printk("Rx Received short ACK packet\n");
-               return;
-       }
-       msg->offset += sizeof(_ack);
-
-       serial = ap->serial;
-       seq = ntohl(ap->firstPacket);
-
-       _proto("Rx Received ACK %%%d { b=%hu m=%hu f=%u p=%u s=%u r=%s n=%u }",
-              ntohl(msg->hdr.serial),
-              ntohs(ap->bufferSpace),
-              ntohs(ap->maxSkew),
-              seq,
-              ntohl(ap->previousPacket),
-              ntohl(serial),
-              rxrpc_acks[ap->reason],
-              call->ackr.nAcks
-              );
-
-       /* check the other side isn't ACK'ing a sequence number I haven't sent
-        * yet */
-       if (ap->nAcks > 0 &&
-           (seq > call->snd_seq_count ||
-            seq + ap->nAcks - 1 > call->snd_seq_count)) {
-               printk("Received ACK (#%u-#%u) for unsent packet\n",
-                      seq, seq + ap->nAcks - 1);
-               rxrpc_call_abort(call, -EINVAL);
-               _leave("");
-               return;
-       }
-
-       /* deal with RTT calculation */
-       if (serial) {
-               struct rxrpc_message *rttmsg;
-
-               /* find the prompting packet */
-               spin_lock(&call->lock);
-               if (call->snd_ping && call->snd_ping->hdr.serial == serial) {
-                       /* it was a ping packet */
-                       rttmsg = call->snd_ping;
-                       call->snd_ping = NULL;
-                       spin_unlock(&call->lock);
-
-                       if (rttmsg) {
-                               rttmsg->rttdone = 1;
-                               rxrpc_peer_calculate_rtt(call->conn->peer,
-                                                        rttmsg, msg);
-                               rxrpc_put_message(rttmsg);
-                       }
-               }
-               else {
-                       struct list_head *_p;
-
-                       /* it ought to be a data packet - look in the pending
-                        * ACK list */
-                       list_for_each(_p, &call->acks_pendq) {
-                               rttmsg = list_entry(_p, struct rxrpc_message,
-                                                   link);
-                               if (rttmsg->hdr.serial == serial) {
-                                       if (rttmsg->rttdone)
-                                               /* never do RTT twice without
-                                                * resending */
-                                               break;
-
-                                       rttmsg->rttdone = 1;
-                                       rxrpc_peer_calculate_rtt(
-                                               call->conn->peer, rttmsg, msg);
-                                       break;
-                               }
-                       }
-                       spin_unlock(&call->lock);
-               }
-       }
-
-       switch (ap->reason) {
-               /* deal with negative/positive acknowledgement of data
-                * packets */
-       case RXRPC_ACK_REQUESTED:
-       case RXRPC_ACK_DELAY:
-       case RXRPC_ACK_IDLE:
-               rxrpc_call_definitively_ACK(call, seq - 1);
-
-       case RXRPC_ACK_DUPLICATE:
-       case RXRPC_ACK_OUT_OF_SEQUENCE:
-       case RXRPC_ACK_EXCEEDS_WINDOW:
-               call->snd_resend_cnt = 0;
-               ret = rxrpc_call_record_ACK(call, msg, seq, ap->nAcks);
-               if (ret < 0)
-                       rxrpc_call_abort(call, ret);
-               break;
-
-               /* respond to ping packets immediately */
-       case RXRPC_ACK_PING:
-               rxrpc_call_generate_ACK(call, &msg->hdr, ap);
-               break;
-
-               /* only record RTT on ping response packets */
-       case RXRPC_ACK_PING_RESPONSE:
-               if (call->snd_ping) {
-                       struct rxrpc_message *rttmsg;
-
-                       /* only do RTT stuff if the response matches the
-                        * retained ping */
-                       rttmsg = NULL;
-                       spin_lock(&call->lock);
-                       if (call->snd_ping &&
-                           call->snd_ping->hdr.serial == ap->serial) {
-                               rttmsg = call->snd_ping;
-                               call->snd_ping = NULL;
-                       }
-                       spin_unlock(&call->lock);
-
-                       if (rttmsg) {
-                               rttmsg->rttdone = 1;
-                               rxrpc_peer_calculate_rtt(call->conn->peer,
-                                                        rttmsg, msg);
-                               rxrpc_put_message(rttmsg);
-                       }
-               }
-               break;
-
-       default:
-               printk("Unsupported ACK reason %u\n", ap->reason);
-               break;
-       }
-
-       _leave("");
-} /* end rxrpc_call_receive_ack_packet() */
-
-/*****************************************************************************/
-/*
- * record definitive ACKs for all messages up to and including the one with the
- * 'highest' seq
- */
-static void rxrpc_call_definitively_ACK(struct rxrpc_call *call,
-                                       rxrpc_seq_t highest)
-{
-       struct rxrpc_message *msg;
-       int now_complete;
-
-       _enter("%p{ads=%u},%u", call, call->acks_dftv_seq, highest);
-
-       while (call->acks_dftv_seq < highest) {
-               call->acks_dftv_seq++;
-
-               _proto("Definitive ACK on packet #%u", call->acks_dftv_seq);
-
-               /* discard those at front of queue until message with highest
-                * ACK is found */
-               spin_lock(&call->lock);
-               msg = NULL;
-               if (!list_empty(&call->acks_pendq)) {
-                       msg = list_entry(call->acks_pendq.next,
-                                        struct rxrpc_message, link);
-                       list_del_init(&msg->link); /* dequeue */
-                       if (msg->state == RXRPC_MSG_SENT)
-                               call->acks_pend_cnt--;
-               }
-               spin_unlock(&call->lock);
-
-               /* insanity check */
-               if (!msg)
-                       panic("%s(): acks_pendq unexpectedly empty\n",
-                             __FUNCTION__);
-
-               if (msg->seq != call->acks_dftv_seq)
-                       panic("%s(): Packet #%u expected at front of acks_pendq"
-                             " (#%u found)\n",
-                             __FUNCTION__, call->acks_dftv_seq, msg->seq);
-
-               /* discard the message */
-               msg->state = RXRPC_MSG_DONE;
-               rxrpc_put_message(msg);
-       }
-
-       /* if all sent packets are definitively ACK'd then prod any sleepers just in case */
-       now_complete = 0;
-       spin_lock(&call->lock);
-       if (call->acks_dftv_seq == call->snd_seq_count) {
-               if (call->app_call_state != RXRPC_CSTATE_COMPLETE) {
-                       call->app_call_state = RXRPC_CSTATE_COMPLETE;
-                       _state(call);
-                       now_complete = 1;
-               }
-       }
-       spin_unlock(&call->lock);
-
-       if (now_complete) {
-               del_timer_sync(&call->acks_timeout);
-               del_timer_sync(&call->rcv_timeout);
-               del_timer_sync(&call->ackr_dfr_timo);
-               call->app_attn_func(call);
-       }
-
-       _leave("");
-} /* end rxrpc_call_definitively_ACK() */
-
-/*****************************************************************************/
-/*
- * record the specified amount of ACKs/NAKs
- */
-static int rxrpc_call_record_ACK(struct rxrpc_call *call,
-                                struct rxrpc_message *msg,
-                                rxrpc_seq_t seq,
-                                size_t count)
-{
-       struct rxrpc_message *dmsg;
-       struct list_head *_p;
-       rxrpc_seq_t highest;
-       unsigned ix;
-       size_t chunk;
-       char resend, now_complete;
-       u8 acks[16];
-
-       _enter("%p{apc=%u ads=%u},%p,%u,%Zu",
-              call, call->acks_pend_cnt, call->acks_dftv_seq,
-              msg, seq, count);
-
-       /* handle re-ACK'ing of definitively ACK'd packets (may be out-of-order
-        * ACKs) */
-       if (seq <= call->acks_dftv_seq) {
-               unsigned delta = call->acks_dftv_seq - seq;
-
-               if (count <= delta) {
-                       _leave(" = 0 [all definitively ACK'd]");
-                       return 0;
-               }
-
-               seq += delta;
-               count -= delta;
-               msg->offset += delta;
-       }
-
-       highest = seq + count - 1;
-       resend = 0;
-       while (count > 0) {
-               /* extract up to 16 ACK slots at a time */
-               chunk = min(count, sizeof(acks));
-               count -= chunk;
-
-               memset(acks, 2, sizeof(acks));
-
-               if (skb_copy_bits(msg->pkt, msg->offset, &acks, chunk) < 0) {
-                       printk("Rx Received short ACK packet\n");
-                       _leave(" = -EINVAL");
-                       return -EINVAL;
-               }
-               msg->offset += chunk;
-
-               /* check that the ACK set is valid */
-               for (ix = 0; ix < chunk; ix++) {
-                       switch (acks[ix]) {
-                       case RXRPC_ACK_TYPE_ACK:
-                               break;
-                       case RXRPC_ACK_TYPE_NACK:
-                               resend = 1;
-                               break;
-                       default:
-                               printk("Rx Received unsupported ACK state"
-                                      " %u\n", acks[ix]);
-                               _leave(" = -EINVAL");
-                               return -EINVAL;
-                       }
-               }
-
-               _proto("Rx ACK of packets #%u-#%u "
-                      "[%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c] (pend=%u)",
-                      seq, (unsigned) (seq + chunk - 1),
-                      _acktype[acks[0x0]],
-                      _acktype[acks[0x1]],
-                      _acktype[acks[0x2]],
-                      _acktype[acks[0x3]],
-                      _acktype[acks[0x4]],
-                      _acktype[acks[0x5]],
-                      _acktype[acks[0x6]],
-                      _acktype[acks[0x7]],
-                      _acktype[acks[0x8]],
-                      _acktype[acks[0x9]],
-                      _acktype[acks[0xA]],
-                      _acktype[acks[0xB]],
-                      _acktype[acks[0xC]],
-                      _acktype[acks[0xD]],
-                      _acktype[acks[0xE]],
-                      _acktype[acks[0xF]],
-                      call->acks_pend_cnt
-                      );
-
-               /* mark the packets in the ACK queue as being provisionally
-                * ACK'd */
-               ix = 0;
-               spin_lock(&call->lock);
-
-               /* find the first packet ACK'd/NAK'd here */
-               list_for_each(_p, &call->acks_pendq) {
-                       dmsg = list_entry(_p, struct rxrpc_message, link);
-                       if (dmsg->seq == seq)
-                               goto found_first;
-                       _debug("- %u: skipping #%u", ix, dmsg->seq);
-               }
-               goto bad_queue;
-
-       found_first:
-               do {
-                       _debug("- %u: processing #%u (%c) apc=%u",
-                              ix, dmsg->seq, _acktype[acks[ix]],
-                              call->acks_pend_cnt);
-
-                       if (acks[ix] == RXRPC_ACK_TYPE_ACK) {
-                               if (dmsg->state == RXRPC_MSG_SENT)
-                                       call->acks_pend_cnt--;
-                               dmsg->state = RXRPC_MSG_ACKED;
-                       }
-                       else {
-                               if (dmsg->state == RXRPC_MSG_ACKED)
-                                       call->acks_pend_cnt++;
-                               dmsg->state = RXRPC_MSG_SENT;
-                       }
-                       ix++;
-                       seq++;
-
-                       _p = dmsg->link.next;
-                       dmsg = list_entry(_p, struct rxrpc_message, link);
-               } while(ix < chunk &&
-                       _p != &call->acks_pendq &&
-                       dmsg->seq == seq);
-
-               if (ix < chunk)
-                       goto bad_queue;
-
-               spin_unlock(&call->lock);
-       }
-
-       if (resend)
-               rxrpc_call_resend(call, highest);
-
-       /* if all packets are provisionally ACK'd, then wake up anyone who's
-        * waiting for that */
-       now_complete = 0;
-       spin_lock(&call->lock);
-       if (call->acks_pend_cnt == 0) {
-               if (call->app_call_state == RXRPC_CSTATE_SRVR_RCV_FINAL_ACK) {
-                       call->app_call_state = RXRPC_CSTATE_COMPLETE;
-                       _state(call);
-               }
-               now_complete = 1;
-       }
-       spin_unlock(&call->lock);
-
-       if (now_complete) {
-               _debug("- wake up waiters");
-               del_timer_sync(&call->acks_timeout);
-               del_timer_sync(&call->rcv_timeout);
-               del_timer_sync(&call->ackr_dfr_timo);
-               call->app_attn_func(call);
-       }
-
-       _leave(" = 0 (apc=%u)", call->acks_pend_cnt);
-       return 0;
-
- bad_queue:
-       panic("%s(): acks_pendq in bad state (packet #%u absent)\n",
-             __FUNCTION__, seq);
-
-} /* end rxrpc_call_record_ACK() */
-
-/*****************************************************************************/
-/*
- * transfer data from the ready packet queue to the asynchronous read buffer
- * - since this func is the only one going to look at packets queued on
- *   app_readyq, we don't need a lock to modify or access them, only to modify
- *   the queue pointers
- * - called with call->lock held
- * - the buffer must be in kernel space
- * - returns:
- *     0 if buffer filled
- *     -EAGAIN if buffer not filled and more data to come
- *     -EBADMSG if last packet received and insufficient data left
- *     -ECONNABORTED if the call has in an error state
- */
-static int __rxrpc_call_read_data(struct rxrpc_call *call)
-{
-       struct rxrpc_message *msg;
-       size_t qty;
-       int ret;
-
-       _enter("%p{as=%d buf=%p qty=%Zu/%Zu}",
-              call,
-              call->app_async_read, call->app_read_buf,
-              call->app_ready_qty, call->app_mark);
-
-       /* check the state */
-       switch (call->app_call_state) {
-       case RXRPC_CSTATE_SRVR_RCV_ARGS:
-       case RXRPC_CSTATE_CLNT_RCV_REPLY:
-               if (call->app_last_rcv) {
-                       printk("%s(%p,%p,%Zd):"
-                              " Inconsistent call state (%s, last pkt)",
-                              __FUNCTION__,
-                              call, call->app_read_buf, call->app_mark,
-                              rxrpc_call_states[call->app_call_state]);
-                       BUG();
-               }
-               break;
-
-       case RXRPC_CSTATE_SRVR_RCV_OPID:
-       case RXRPC_CSTATE_SRVR_GOT_ARGS:
-       case RXRPC_CSTATE_CLNT_GOT_REPLY:
-               break;
-
-       case RXRPC_CSTATE_SRVR_SND_REPLY:
-               if (!call->app_last_rcv) {
-                       printk("%s(%p,%p,%Zd):"
-                              " Inconsistent call state (%s, not last pkt)",
-                              __FUNCTION__,
-                              call, call->app_read_buf, call->app_mark,
-                              rxrpc_call_states[call->app_call_state]);
-                       BUG();
-               }
-               _debug("Trying to read data from call in SND_REPLY state");
-               break;
-
-       case RXRPC_CSTATE_ERROR:
-               _leave(" = -ECONNABORTED");
-               return -ECONNABORTED;
-
-       default:
-               printk("reading in unexpected state [[[ %u ]]]\n",
-                      call->app_call_state);
-               BUG();
-       }
-
-       /* handle the case of not having an async buffer */
-       if (!call->app_async_read) {
-               if (call->app_mark == RXRPC_APP_MARK_EOF) {
-                       ret = call->app_last_rcv ? 0 : -EAGAIN;
-               }
-               else {
-                       if (call->app_mark >= call->app_ready_qty) {
-                               call->app_mark = RXRPC_APP_MARK_EOF;
-                               ret = 0;
-                       }
-                       else {
-                               ret = call->app_last_rcv ? -EBADMSG : -EAGAIN;
-                       }
-               }
-
-               _leave(" = %d [no buf]", ret);
-               return 0;
-       }
-
-       while (!list_empty(&call->app_readyq) && call->app_mark > 0) {
-               msg = list_entry(call->app_readyq.next,
-                                struct rxrpc_message, link);
-
-               /* drag as much data as we need out of this packet */
-               qty = min(call->app_mark, msg->dsize);
-
-               _debug("reading %Zu from skb=%p off=%lu",
-                      qty, msg->pkt, msg->offset);
-
-               if (call->app_read_buf)
-                       if (skb_copy_bits(msg->pkt, msg->offset,
-                                         call->app_read_buf, qty) < 0)
-                               panic("%s: Failed to copy data from packet:"
-                                     " (%p,%p,%Zd)",
-                                     __FUNCTION__,
-                                     call, call->app_read_buf, qty);
-
-               /* if that packet is now empty, discard it */
-               call->app_ready_qty -= qty;
-               msg->dsize -= qty;
-
-               if (msg->dsize == 0) {
-                       list_del_init(&msg->link);
-                       rxrpc_put_message(msg);
-               }
-               else {
-                       msg->offset += qty;
-               }
-
-               call->app_mark -= qty;
-               if (call->app_read_buf)
-                       call->app_read_buf += qty;
-       }
-
-       if (call->app_mark == 0) {
-               call->app_async_read = 0;
-               call->app_mark = RXRPC_APP_MARK_EOF;
-               call->app_read_buf = NULL;
-
-               /* adjust the state if used up all packets */
-               if (list_empty(&call->app_readyq) && call->app_last_rcv) {
-                       switch (call->app_call_state) {
-                       case RXRPC_CSTATE_SRVR_RCV_OPID:
-                               call->app_call_state = RXRPC_CSTATE_SRVR_SND_REPLY;
-                               call->app_mark = RXRPC_APP_MARK_EOF;
-                               _state(call);
-                               del_timer_sync(&call->rcv_timeout);
-                               break;
-                       case RXRPC_CSTATE_SRVR_GOT_ARGS:
-                               call->app_call_state = RXRPC_CSTATE_SRVR_SND_REPLY;
-                               _state(call);
-                               del_timer_sync(&call->rcv_timeout);
-                               break;
-                       default:
-                               call->app_call_state = RXRPC_CSTATE_COMPLETE;
-                               _state(call);
-                               del_timer_sync(&call->acks_timeout);
-                               del_timer_sync(&call->ackr_dfr_timo);
-                               del_timer_sync(&call->rcv_timeout);
-                               break;
-                       }
-               }
-
-               _leave(" = 0");
-               return 0;
-       }
-
-       if (call->app_last_rcv) {
-               _debug("Insufficient data (%Zu/%Zu)",
-                      call->app_ready_qty, call->app_mark);
-               call->app_async_read = 0;
-               call->app_mark = RXRPC_APP_MARK_EOF;
-               call->app_read_buf = NULL;
-
-               _leave(" = -EBADMSG");
-               return -EBADMSG;
-       }
-
-       _leave(" = -EAGAIN");
-       return -EAGAIN;
-} /* end __rxrpc_call_read_data() */
-
-/*****************************************************************************/
-/*
- * attempt to read the specified amount of data from the call's ready queue
- * into the buffer provided
- * - since this func is the only one going to look at packets queued on
- *   app_readyq, we don't need a lock to modify or access them, only to modify
- *   the queue pointers
- * - if the buffer pointer is NULL, then data is merely drained, not copied
- * - if flags&RXRPC_CALL_READ_BLOCK, then the function will wait until there is
- *   enough data or an error will be generated
- *   - note that the caller must have added the calling task to the call's wait
- *     queue beforehand
- * - if flags&RXRPC_CALL_READ_ALL, then an error will be generated if this
- *   function doesn't read all available data
- */
-int rxrpc_call_read_data(struct rxrpc_call *call,
-                        void *buffer, size_t size, int flags)
-{
-       int ret;
-
-       _enter("%p{arq=%Zu},%p,%Zd,%x",
-              call, call->app_ready_qty, buffer, size, flags);
-
-       spin_lock(&call->lock);
-
-       if (unlikely(!!call->app_read_buf)) {
-               spin_unlock(&call->lock);
-               _leave(" = -EBUSY");
-               return -EBUSY;
-       }
-
-       call->app_mark = size;
-       call->app_read_buf = buffer;
-       call->app_async_read = 1;
-       call->app_read_count++;
-
-       /* read as much data as possible */
-       ret = __rxrpc_call_read_data(call);
-       switch (ret) {
-       case 0:
-               if (flags & RXRPC_CALL_READ_ALL &&
-                   (!call->app_last_rcv || call->app_ready_qty > 0)) {
-                       _leave(" = -EBADMSG");
-                       __rxrpc_call_abort(call, -EBADMSG);
-                       return -EBADMSG;
-               }
-
-               spin_unlock(&call->lock);
-               call->app_attn_func(call);
-               _leave(" = 0");
-               return ret;
-
-       case -ECONNABORTED:
-               spin_unlock(&call->lock);
-               _leave(" = %d [aborted]", ret);
-               return ret;
-
-       default:
-               __rxrpc_call_abort(call, ret);
-               _leave(" = %d", ret);
-               return ret;
-
-       case -EAGAIN:
-               spin_unlock(&call->lock);
-
-               if (!(flags & RXRPC_CALL_READ_BLOCK)) {
-                       _leave(" = -EAGAIN");
-                       return -EAGAIN;
-               }
-
-               /* wait for the data to arrive */
-               _debug("blocking for data arrival");
-
-               for (;;) {
-                       set_current_state(TASK_INTERRUPTIBLE);
-                       if (!call->app_async_read || signal_pending(current))
-                               break;
-                       schedule();
-               }
-               set_current_state(TASK_RUNNING);
-
-               if (signal_pending(current)) {
-                       _leave(" = -EINTR");
-                       return -EINTR;
-               }
-
-               if (call->app_call_state == RXRPC_CSTATE_ERROR) {
-                       _leave(" = -ECONNABORTED");
-                       return -ECONNABORTED;
-               }
-
-               _leave(" = 0");
-               return 0;
-       }
-
-} /* end rxrpc_call_read_data() */
-
-/*****************************************************************************/
-/*
- * write data to a call
- * - the data may not be sent immediately if it doesn't fill a buffer
- * - if we can't queue all the data for buffering now, siov[] will have been
- *   adjusted to take account of what has been sent
- */
-int rxrpc_call_write_data(struct rxrpc_call *call,
-                         size_t sioc,
-                         struct kvec *siov,
-                         u8 rxhdr_flags,
-                         gfp_t alloc_flags,
-                         int dup_data,
-                         size_t *size_sent)
-{
-       struct rxrpc_message *msg;
-       struct kvec *sptr;
-       size_t space, size, chunk, tmp;
-       char *buf;
-       int ret;
-
-       _enter("%p,%Zu,%p,%02x,%x,%d,%p",
-              call, sioc, siov, rxhdr_flags, alloc_flags, dup_data,
-              size_sent);
-
-       *size_sent = 0;
-       size = 0;
-       ret = -EINVAL;
-
-       /* can't send more if we've sent last packet from this end */
-       switch (call->app_call_state) {
-       case RXRPC_CSTATE_SRVR_SND_REPLY:
-       case RXRPC_CSTATE_CLNT_SND_ARGS:
-               break;
-       case RXRPC_CSTATE_ERROR:
-               ret = call->app_errno;
-       default:
-               goto out;
-       }
-
-       /* calculate how much data we've been given */
-       sptr = siov;
-       for (; sioc > 0; sptr++, sioc--) {
-               if (!sptr->iov_len)
-                       continue;
-
-               if (!sptr->iov_base)
-                       goto out;
-
-               size += sptr->iov_len;
-       }
-
-       _debug("- size=%Zu mtu=%Zu", size, call->conn->mtu_size);
-
-       do {
-               /* make sure there's a message under construction */
-               if (!call->snd_nextmsg) {
-                       /* no - allocate a message with no data yet attached */
-                       ret = rxrpc_conn_newmsg(call->conn, call,
-                                               RXRPC_PACKET_TYPE_DATA,
-                                               0, NULL, alloc_flags,
-                                               &call->snd_nextmsg);
-                       if (ret < 0)
-                               goto out;
-                       _debug("- allocated new message [ds=%Zu]",
-                              call->snd_nextmsg->dsize);
-               }
-
-               msg = call->snd_nextmsg;
-               msg->hdr.flags |= rxhdr_flags;
-
-               /* deal with zero-length terminal packet */
-               if (size == 0) {
-                       if (rxhdr_flags & RXRPC_LAST_PACKET) {
-                               ret = rxrpc_call_flush(call);
-                               if (ret < 0)
-                                       goto out;
-                       }
-                       break;
-               }
-
-               /* work out how much space current packet has available */
-               space = call->conn->mtu_size - msg->dsize;
-               chunk = min(space, size);
-
-               _debug("- [before] space=%Zu chunk=%Zu", space, chunk);
-
-               while (!siov->iov_len)
-                       siov++;
-
-               /* if we are going to have to duplicate the data then coalesce
-                * it too */
-               if (dup_data) {
-                       /* don't allocate more that 1 page at a time */
-                       if (chunk > PAGE_SIZE)
-                               chunk = PAGE_SIZE;
-
-                       /* allocate a data buffer and attach to the message */
-                       buf = kmalloc(chunk, alloc_flags);
-                       if (unlikely(!buf)) {
-                               if (msg->dsize ==
-                                   sizeof(struct rxrpc_header)) {
-                                       /* discard an empty msg and wind back
-                                        * the seq counter */
-                                       rxrpc_put_message(msg);
-                                       call->snd_nextmsg = NULL;
-                                       call->snd_seq_count--;
-                               }
-
-                               ret = -ENOMEM;
-                               goto out;
-                       }
-
-                       tmp = msg->dcount++;
-                       set_bit(tmp, &msg->dfree);
-                       msg->data[tmp].iov_base = buf;
-                       msg->data[tmp].iov_len = chunk;
-                       msg->dsize += chunk;
-                       *size_sent += chunk;
-                       size -= chunk;
-
-                       /* load the buffer with data */
-                       while (chunk > 0) {
-                               tmp = min(chunk, siov->iov_len);
-                               memcpy(buf, siov->iov_base, tmp);
-                               buf += tmp;
-                               siov->iov_base += tmp;
-                               siov->iov_len -= tmp;
-                               if (!siov->iov_len)
-                                       siov++;
-                               chunk -= tmp;
-                       }
-               }
-               else {
-                       /* we want to attach the supplied buffers directly */
-                       while (chunk > 0 &&
-                              msg->dcount < RXRPC_MSG_MAX_IOCS) {
-                               tmp = msg->dcount++;
-                               msg->data[tmp].iov_base = siov->iov_base;
-                               msg->data[tmp].iov_len = siov->iov_len;
-                               msg->dsize += siov->iov_len;
-                               *size_sent += siov->iov_len;
-                               size -= siov->iov_len;
-                               chunk -= siov->iov_len;
-                               siov++;
-                       }
-               }
-
-               _debug("- [loaded] chunk=%Zu size=%Zu", chunk, size);
-
-               /* dispatch the message when full, final or requesting ACK */
-               if (msg->dsize >= call->conn->mtu_size || rxhdr_flags) {
-                       ret = rxrpc_call_flush(call);
-                       if (ret < 0)
-                               goto out;
-               }
-
-       } while(size > 0);
-
-       ret = 0;
- out:
-       _leave(" = %d (%Zd queued, %Zd rem)", ret, *size_sent, size);
-       return ret;
-
-} /* end rxrpc_call_write_data() */
-
-/*****************************************************************************/
-/*
- * flush outstanding packets to the network
- */
-static int rxrpc_call_flush(struct rxrpc_call *call)
-{
-       struct rxrpc_message *msg;
-       int ret = 0;
-
-       _enter("%p", call);
-
-       rxrpc_get_call(call);
-
-       /* if there's a packet under construction, then dispatch it now */
-       if (call->snd_nextmsg) {
-               msg = call->snd_nextmsg;
-               call->snd_nextmsg = NULL;
-
-               if (msg->hdr.flags & RXRPC_LAST_PACKET) {
-                       msg->hdr.flags &= ~RXRPC_MORE_PACKETS;
-                       if (call->app_call_state != RXRPC_CSTATE_CLNT_SND_ARGS)
-                               msg->hdr.flags |= RXRPC_REQUEST_ACK;
-               }
-               else {
-                       msg->hdr.flags |= RXRPC_MORE_PACKETS;
-               }
-
-               _proto("Sending DATA message { ds=%Zu dc=%u df=%02lu }",
-                      msg->dsize, msg->dcount, msg->dfree);
-
-               /* queue and adjust call state */
-               spin_lock(&call->lock);
-               list_add_tail(&msg->link, &call->acks_pendq);
-
-               /* decide what to do depending on current state and if this is
-                * the last packet */
-               ret = -EINVAL;
-               switch (call->app_call_state) {
-               case RXRPC_CSTATE_SRVR_SND_REPLY:
-                       if (msg->hdr.flags & RXRPC_LAST_PACKET) {
-                               call->app_call_state =
-                                       RXRPC_CSTATE_SRVR_RCV_FINAL_ACK;
-                               _state(call);
-                       }
-                       break;
-
-               case RXRPC_CSTATE_CLNT_SND_ARGS:
-                       if (msg->hdr.flags & RXRPC_LAST_PACKET) {
-                               call->app_call_state =
-                                       RXRPC_CSTATE_CLNT_RCV_REPLY;
-                               _state(call);
-                       }
-                       break;
-
-               case RXRPC_CSTATE_ERROR:
-                       ret = call->app_errno;
-               default:
-                       spin_unlock(&call->lock);
-                       goto out;
-               }
-
-               call->acks_pend_cnt++;
-
-               mod_timer(&call->acks_timeout,
-                         __rxrpc_rtt_based_timeout(call,
-                                                   rxrpc_call_acks_timeout));
-
-               spin_unlock(&call->lock);
-
-               ret = rxrpc_conn_sendmsg(call->conn, msg);
-               if (ret == 0)
-                       call->pkt_snd_count++;
-       }
-
- out:
-       rxrpc_put_call(call);
-
-       _leave(" = %d", ret);
-       return ret;
-
-} /* end rxrpc_call_flush() */
-
-/*****************************************************************************/
-/*
- * resend NAK'd or unacknowledged packets up to the highest one specified
- */
-static void rxrpc_call_resend(struct rxrpc_call *call, rxrpc_seq_t highest)
-{
-       struct rxrpc_message *msg;
-       struct list_head *_p;
-       rxrpc_seq_t seq = 0;
-
-       _enter("%p,%u", call, highest);
-
-       _proto("Rx Resend required");
-
-       /* handle too many resends */
-       if (call->snd_resend_cnt >= rxrpc_call_max_resend) {
-               _debug("Aborting due to too many resends (rcv=%d)",
-                      call->pkt_rcv_count);
-               rxrpc_call_abort(call,
-                                call->pkt_rcv_count > 0 ? -EIO : -ETIMEDOUT);
-               _leave("");
-               return;
-       }
-
-       spin_lock(&call->lock);
-       call->snd_resend_cnt++;
-       for (;;) {
-               /* determine which the next packet we might need to ACK is */
-               if (seq <= call->acks_dftv_seq)
-                       seq = call->acks_dftv_seq;
-               seq++;
-
-               if (seq > highest)
-                       break;
-
-               /* look for the packet in the pending-ACK queue */
-               list_for_each(_p, &call->acks_pendq) {
-                       msg = list_entry(_p, struct rxrpc_message, link);
-                       if (msg->seq == seq)
-                               goto found_msg;
-               }
-
-               panic("%s(%p,%d):"
-                     " Inconsistent pending-ACK queue (ds=%u sc=%u sq=%u)\n",
-                     __FUNCTION__, call, highest,
-                     call->acks_dftv_seq, call->snd_seq_count, seq);
-
-       found_msg:
-               if (msg->state != RXRPC_MSG_SENT)
-                       continue; /* only un-ACK'd packets */
-
-               rxrpc_get_message(msg);
-               spin_unlock(&call->lock);
-
-               /* send each message again (and ignore any errors we might
-                * incur) */
-               _proto("Resending DATA message { ds=%Zu dc=%u df=%02lu }",
-                      msg->dsize, msg->dcount, msg->dfree);
-
-               if (rxrpc_conn_sendmsg(call->conn, msg) == 0)
-                       call->pkt_snd_count++;
-
-               rxrpc_put_message(msg);
-
-               spin_lock(&call->lock);
-       }
-
-       /* reset the timeout */
-       mod_timer(&call->acks_timeout,
-                 __rxrpc_rtt_based_timeout(call, rxrpc_call_acks_timeout));
-
-       spin_unlock(&call->lock);
-
-       _leave("");
-} /* end rxrpc_call_resend() */
-
-/*****************************************************************************/
-/*
- * handle an ICMP error being applied to a call
- */
-void rxrpc_call_handle_error(struct rxrpc_call *call, int local, int errno)
-{
-       _enter("%p{%u},%d", call, ntohl(call->call_id), errno);
-
-       /* if this call is already aborted, then just wake up any waiters */
-       if (call->app_call_state == RXRPC_CSTATE_ERROR) {
-               call->app_error_func(call);
-       }
-       else {
-               /* tell the app layer what happened */
-               spin_lock(&call->lock);
-               call->app_call_state = RXRPC_CSTATE_ERROR;
-               _state(call);
-               if (local)
-                       call->app_err_state = RXRPC_ESTATE_LOCAL_ERROR;
-               else
-                       call->app_err_state = RXRPC_ESTATE_REMOTE_ERROR;
-               call->app_errno         = errno;
-               call->app_mark          = RXRPC_APP_MARK_EOF;
-               call->app_read_buf      = NULL;
-               call->app_async_read    = 0;
-
-               /* map the error */
-               call->app_aemap_func(call);
-
-               del_timer_sync(&call->acks_timeout);
-               del_timer_sync(&call->rcv_timeout);
-               del_timer_sync(&call->ackr_dfr_timo);
-
-               spin_unlock(&call->lock);
-
-               call->app_error_func(call);
-       }
-
-       _leave("");
-} /* end rxrpc_call_handle_error() */
diff --git a/net/rxrpc/connection.c b/net/rxrpc/connection.c
deleted file mode 100644 (file)
index a7c929a..0000000
+++ /dev/null
@@ -1,777 +0,0 @@
-/* connection.c: Rx connection routines
- *
- * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <rxrpc/rxrpc.h>
-#include <rxrpc/transport.h>
-#include <rxrpc/peer.h>
-#include <rxrpc/connection.h>
-#include <rxrpc/call.h>
-#include <rxrpc/message.h>
-#include <linux/udp.h>
-#include <linux/ip.h>
-#include <net/sock.h>
-#include <asm/uaccess.h>
-#include "internal.h"
-
-__RXACCT_DECL(atomic_t rxrpc_connection_count);
-
-LIST_HEAD(rxrpc_conns);
-DECLARE_RWSEM(rxrpc_conns_sem);
-unsigned long rxrpc_conn_timeout = 60 * 60;
-
-static void rxrpc_conn_do_timeout(struct rxrpc_connection *conn);
-
-static void __rxrpc_conn_timeout(rxrpc_timer_t *timer)
-{
-       struct rxrpc_connection *conn =
-               list_entry(timer, struct rxrpc_connection, timeout);
-
-       _debug("Rx CONN TIMEOUT [%p{u=%d}]", conn, atomic_read(&conn->usage));
-
-       rxrpc_conn_do_timeout(conn);
-}
-
-static const struct rxrpc_timer_ops rxrpc_conn_timer_ops = {
-       .timed_out      = __rxrpc_conn_timeout,
-};
-
-/*****************************************************************************/
-/*
- * create a new connection record
- */
-static inline int __rxrpc_create_connection(struct rxrpc_peer *peer,
-                                           struct rxrpc_connection **_conn)
-{
-       struct rxrpc_connection *conn;
-
-       _enter("%p",peer);
-
-       /* allocate and initialise a connection record */
-       conn = kzalloc(sizeof(struct rxrpc_connection), GFP_KERNEL);
-       if (!conn) {
-               _leave(" = -ENOMEM");
-               return -ENOMEM;
-       }
-
-       atomic_set(&conn->usage, 1);
-
-       INIT_LIST_HEAD(&conn->link);
-       INIT_LIST_HEAD(&conn->id_link);
-       init_waitqueue_head(&conn->chanwait);
-       spin_lock_init(&conn->lock);
-       rxrpc_timer_init(&conn->timeout, &rxrpc_conn_timer_ops);
-
-       do_gettimeofday(&conn->atime);
-       conn->mtu_size = 1024;
-       conn->peer = peer;
-       conn->trans = peer->trans;
-
-       __RXACCT(atomic_inc(&rxrpc_connection_count));
-       *_conn = conn;
-       _leave(" = 0 (%p)", conn);
-
-       return 0;
-} /* end __rxrpc_create_connection() */
-
-/*****************************************************************************/
-/*
- * create a new connection record for outgoing connections
- */
-int rxrpc_create_connection(struct rxrpc_transport *trans,
-                           __be16 port,
-                           __be32 addr,
-                           uint16_t service_id,
-                           void *security,
-                           struct rxrpc_connection **_conn)
-{
-       struct rxrpc_connection *candidate, *conn;
-       struct rxrpc_peer *peer;
-       struct list_head *_p;
-       __be32 connid;
-       int ret;
-
-       _enter("%p{%hu},%u,%hu", trans, trans->port, ntohs(port), service_id);
-
-       /* get a peer record */
-       ret = rxrpc_peer_lookup(trans, addr, &peer);
-       if (ret < 0) {
-               _leave(" = %d", ret);
-               return ret;
-       }
-
-       /* allocate and initialise a connection record */
-       ret = __rxrpc_create_connection(peer, &candidate);
-       if (ret < 0) {
-               rxrpc_put_peer(peer);
-               _leave(" = %d", ret);
-               return ret;
-       }
-
-       /* fill in the specific bits */
-       candidate->addr.sin_family      = AF_INET;
-       candidate->addr.sin_port        = port;
-       candidate->addr.sin_addr.s_addr = addr;
-
-       candidate->in_epoch             = rxrpc_epoch;
-       candidate->out_epoch            = rxrpc_epoch;
-       candidate->in_clientflag        = 0;
-       candidate->out_clientflag       = RXRPC_CLIENT_INITIATED;
-       candidate->service_id           = htons(service_id);
-
-       /* invent a unique connection ID */
-       write_lock(&peer->conn_idlock);
-
- try_next_id:
-       connid = htonl(peer->conn_idcounter & RXRPC_CIDMASK);
-       peer->conn_idcounter += RXRPC_MAXCALLS;
-
-       list_for_each(_p, &peer->conn_idlist) {
-               conn = list_entry(_p, struct rxrpc_connection, id_link);
-               if (connid == conn->conn_id)
-                       goto try_next_id;
-               if (connid > conn->conn_id)
-                       break;
-       }
-
-       _debug("selected candidate conn ID %x.%u",
-              ntohl(peer->addr.s_addr), ntohl(connid));
-
-       candidate->conn_id = connid;
-       list_add_tail(&candidate->id_link, _p);
-
-       write_unlock(&peer->conn_idlock);
-
-       /* attach to peer */
-       candidate->peer = peer;
-
-       write_lock(&peer->conn_lock);
-
-       /* search the peer's transport graveyard list */
-       spin_lock(&peer->conn_gylock);
-       list_for_each(_p, &peer->conn_graveyard) {
-               conn = list_entry(_p, struct rxrpc_connection, link);
-               if (conn->addr.sin_port == candidate->addr.sin_port     &&
-                   conn->security_ix   == candidate->security_ix       &&
-                   conn->service_id    == candidate->service_id        &&
-                   conn->in_clientflag == 0)
-                       goto found_in_graveyard;
-       }
-       spin_unlock(&peer->conn_gylock);
-
-       /* pick the new candidate */
-       _debug("created connection: {%08x} [out]", ntohl(candidate->conn_id));
-       atomic_inc(&peer->conn_count);
-       conn = candidate;
-       candidate = NULL;
-
- make_active:
-       list_add_tail(&conn->link, &peer->conn_active);
-       write_unlock(&peer->conn_lock);
-
-       if (candidate) {
-               write_lock(&peer->conn_idlock);
-               list_del(&candidate->id_link);
-               write_unlock(&peer->conn_idlock);
-
-               __RXACCT(atomic_dec(&rxrpc_connection_count));
-               kfree(candidate);
-       }
-       else {
-               down_write(&rxrpc_conns_sem);
-               list_add_tail(&conn->proc_link, &rxrpc_conns);
-               up_write(&rxrpc_conns_sem);
-       }
-
-       *_conn = conn;
-       _leave(" = 0 (%p)", conn);
-
-       return 0;
-
-       /* handle resurrecting a connection from the graveyard */
- found_in_graveyard:
-       _debug("resurrecting connection: {%08x} [out]", ntohl(conn->conn_id));
-       rxrpc_get_connection(conn);
-       rxrpc_krxtimod_del_timer(&conn->timeout);
-       list_del_init(&conn->link);
-       spin_unlock(&peer->conn_gylock);
-       goto make_active;
-} /* end rxrpc_create_connection() */
-
-/*****************************************************************************/
-/*
- * lookup the connection for an incoming packet
- * - create a new connection record for unrecorded incoming connections
- */
-int rxrpc_connection_lookup(struct rxrpc_peer *peer,
-                           struct rxrpc_message *msg,
-                           struct rxrpc_connection **_conn)
-{
-       struct rxrpc_connection *conn, *candidate = NULL;
-       struct list_head *_p;
-       struct sk_buff *pkt = msg->pkt;
-       int ret, fresh = 0;
-       __be32 x_epoch, x_connid;
-       __be16 x_port, x_servid;
-       __u32 x_secix;
-       u8 x_clflag;
-
-       _enter("%p{{%hu}},%u,%hu",
-              peer,
-              peer->trans->port,
-              ntohs(pkt->h.uh->source),
-              ntohs(msg->hdr.serviceId));
-
-       x_port          = pkt->h.uh->source;
-       x_epoch         = msg->hdr.epoch;
-       x_clflag        = msg->hdr.flags & RXRPC_CLIENT_INITIATED;
-       x_connid        = htonl(ntohl(msg->hdr.cid) & RXRPC_CIDMASK);
-       x_servid        = msg->hdr.serviceId;
-       x_secix         = msg->hdr.securityIndex;
-
-       /* [common case] search the transport's active list first */
-       read_lock(&peer->conn_lock);
-       list_for_each(_p, &peer->conn_active) {
-               conn = list_entry(_p, struct rxrpc_connection, link);
-               if (conn->addr.sin_port         == x_port       &&
-                   conn->in_epoch              == x_epoch      &&
-                   conn->conn_id               == x_connid     &&
-                   conn->security_ix           == x_secix      &&
-                   conn->service_id            == x_servid     &&
-                   conn->in_clientflag         == x_clflag)
-                       goto found_active;
-       }
-       read_unlock(&peer->conn_lock);
-
-       /* [uncommon case] not active
-        * - create a candidate for a new record if an inbound connection
-        * - only examine the graveyard for an outbound connection
-        */
-       if (x_clflag) {
-               ret = __rxrpc_create_connection(peer, &candidate);
-               if (ret < 0) {
-                       _leave(" = %d", ret);
-                       return ret;
-               }
-
-               /* fill in the specifics */
-               candidate->addr.sin_family      = AF_INET;
-               candidate->addr.sin_port        = x_port;
-               candidate->addr.sin_addr.s_addr = pkt->nh.iph->saddr;
-               candidate->in_epoch             = x_epoch;
-               candidate->out_epoch            = x_epoch;
-               candidate->in_clientflag        = RXRPC_CLIENT_INITIATED;
-               candidate->out_clientflag       = 0;
-               candidate->conn_id              = x_connid;
-               candidate->service_id           = x_servid;
-               candidate->security_ix          = x_secix;
-       }
-
-       /* search the active list again, just in case it appeared whilst we
-        * were busy */
-       write_lock(&peer->conn_lock);
-       list_for_each(_p, &peer->conn_active) {
-               conn = list_entry(_p, struct rxrpc_connection, link);
-               if (conn->addr.sin_port         == x_port       &&
-                   conn->in_epoch              == x_epoch      &&
-                   conn->conn_id               == x_connid     &&
-                   conn->security_ix           == x_secix      &&
-                   conn->service_id            == x_servid     &&
-                   conn->in_clientflag         == x_clflag)
-                       goto found_active_second_chance;
-       }
-
-       /* search the transport's graveyard list */
-       spin_lock(&peer->conn_gylock);
-       list_for_each(_p, &peer->conn_graveyard) {
-               conn = list_entry(_p, struct rxrpc_connection, link);
-               if (conn->addr.sin_port         == x_port       &&
-                   conn->in_epoch              == x_epoch      &&
-                   conn->conn_id               == x_connid     &&
-                   conn->security_ix           == x_secix      &&
-                   conn->service_id            == x_servid     &&
-                   conn->in_clientflag         == x_clflag)
-                       goto found_in_graveyard;
-       }
-       spin_unlock(&peer->conn_gylock);
-
-       /* outbound connections aren't created here */
-       if (!x_clflag) {
-               write_unlock(&peer->conn_lock);
-               _leave(" = -ENOENT");
-               return -ENOENT;
-       }
-
-       /* we can now add the new candidate to the list */
-       _debug("created connection: {%08x} [in]", ntohl(candidate->conn_id));
-       rxrpc_get_peer(peer);
-       conn = candidate;
-       candidate = NULL;
-       atomic_inc(&peer->conn_count);
-       fresh = 1;
-
- make_active:
-       list_add_tail(&conn->link, &peer->conn_active);
-
- success_uwfree:
-       write_unlock(&peer->conn_lock);
-
-       if (candidate) {
-               write_lock(&peer->conn_idlock);
-               list_del(&candidate->id_link);
-               write_unlock(&peer->conn_idlock);
-
-               __RXACCT(atomic_dec(&rxrpc_connection_count));
-               kfree(candidate);
-       }
-
-       if (fresh) {
-               down_write(&rxrpc_conns_sem);
-               list_add_tail(&conn->proc_link, &rxrpc_conns);
-               up_write(&rxrpc_conns_sem);
-       }
-
- success:
-       *_conn = conn;
-       _leave(" = 0 (%p)", conn);
-       return 0;
-
-       /* handle the connection being found in the active list straight off */
- found_active:
-       rxrpc_get_connection(conn);
-       read_unlock(&peer->conn_lock);
-       goto success;
-
-       /* handle resurrecting a connection from the graveyard */
- found_in_graveyard:
-       _debug("resurrecting connection: {%08x} [in]", ntohl(conn->conn_id));
-       rxrpc_get_peer(peer);
-       rxrpc_get_connection(conn);
-       rxrpc_krxtimod_del_timer(&conn->timeout);
-       list_del_init(&conn->link);
-       spin_unlock(&peer->conn_gylock);
-       goto make_active;
-
-       /* handle finding the connection on the second time through the active
-        * list */
- found_active_second_chance:
-       rxrpc_get_connection(conn);
-       goto success_uwfree;
-
-} /* end rxrpc_connection_lookup() */
-
-/*****************************************************************************/
-/*
- * finish using a connection record
- * - it will be transferred to the peer's connection graveyard when refcount
- *   reaches 0
- */
-void rxrpc_put_connection(struct rxrpc_connection *conn)
-{
-       struct rxrpc_peer *peer;
-
-       if (!conn)
-               return;
-
-       _enter("%p{u=%d p=%hu}",
-              conn, atomic_read(&conn->usage), ntohs(conn->addr.sin_port));
-
-       peer = conn->peer;
-       spin_lock(&peer->conn_gylock);
-
-       /* sanity check */
-       if (atomic_read(&conn->usage) <= 0)
-               BUG();
-
-       if (likely(!atomic_dec_and_test(&conn->usage))) {
-               spin_unlock(&peer->conn_gylock);
-               _leave("");
-               return;
-       }
-
-       /* move to graveyard queue */
-       _debug("burying connection: {%08x}", ntohl(conn->conn_id));
-       list_move_tail(&conn->link, &peer->conn_graveyard);
-
-       rxrpc_krxtimod_add_timer(&conn->timeout, rxrpc_conn_timeout * HZ);
-
-       spin_unlock(&peer->conn_gylock);
-
-       rxrpc_put_peer(conn->peer);
-
-       _leave(" [killed]");
-} /* end rxrpc_put_connection() */
-
-/*****************************************************************************/
-/*
- * free a connection record
- */
-static void rxrpc_conn_do_timeout(struct rxrpc_connection *conn)
-{
-       struct rxrpc_peer *peer;
-
-       _enter("%p{u=%d p=%hu}",
-              conn, atomic_read(&conn->usage), ntohs(conn->addr.sin_port));
-
-       peer = conn->peer;
-
-       if (atomic_read(&conn->usage) < 0)
-               BUG();
-
-       /* remove from graveyard if still dead */
-       spin_lock(&peer->conn_gylock);
-       if (atomic_read(&conn->usage) == 0) {
-               list_del_init(&conn->link);
-       }
-       else {
-               conn = NULL;
-       }
-       spin_unlock(&peer->conn_gylock);
-
-       if (!conn) {
-               _leave("");
-               return; /* resurrected */
-       }
-
-       _debug("--- Destroying Connection %p{%08x} ---",
-              conn, ntohl(conn->conn_id));
-
-       down_write(&rxrpc_conns_sem);
-       list_del(&conn->proc_link);
-       up_write(&rxrpc_conns_sem);
-
-       write_lock(&peer->conn_idlock);
-       list_del(&conn->id_link);
-       write_unlock(&peer->conn_idlock);
-
-       __RXACCT(atomic_dec(&rxrpc_connection_count));
-       kfree(conn);
-
-       /* if the graveyard is now empty, wake up anyone waiting for that */
-       if (atomic_dec_and_test(&peer->conn_count))
-               wake_up(&peer->conn_gy_waitq);
-
-       _leave(" [destroyed]");
-} /* end rxrpc_conn_do_timeout() */
-
-/*****************************************************************************/
-/*
- * clear all connection records from a peer endpoint
- */
-void rxrpc_conn_clearall(struct rxrpc_peer *peer)
-{
-       DECLARE_WAITQUEUE(myself, current);
-
-       struct rxrpc_connection *conn;
-       int err;
-
-       _enter("%p", peer);
-
-       /* there shouldn't be any active conns remaining */
-       if (!list_empty(&peer->conn_active))
-               BUG();
-
-       /* manually timeout all conns in the graveyard */
-       spin_lock(&peer->conn_gylock);
-       while (!list_empty(&peer->conn_graveyard)) {
-               conn = list_entry(peer->conn_graveyard.next,
-                                 struct rxrpc_connection, link);
-               err = rxrpc_krxtimod_del_timer(&conn->timeout);
-               spin_unlock(&peer->conn_gylock);
-
-               if (err == 0)
-                       rxrpc_conn_do_timeout(conn);
-
-               spin_lock(&peer->conn_gylock);
-       }
-       spin_unlock(&peer->conn_gylock);
-
-       /* wait for the the conn graveyard to be completely cleared */
-       set_current_state(TASK_UNINTERRUPTIBLE);
-       add_wait_queue(&peer->conn_gy_waitq, &myself);
-
-       while (atomic_read(&peer->conn_count) != 0) {
-               schedule();
-               set_current_state(TASK_UNINTERRUPTIBLE);
-       }
-
-       remove_wait_queue(&peer->conn_gy_waitq, &myself);
-       set_current_state(TASK_RUNNING);
-
-       _leave("");
-} /* end rxrpc_conn_clearall() */
-
-/*****************************************************************************/
-/*
- * allocate and prepare a message for sending out through the transport
- * endpoint
- */
-int rxrpc_conn_newmsg(struct rxrpc_connection *conn,
-                     struct rxrpc_call *call,
-                     uint8_t type,
-                     int dcount,
-                     struct kvec diov[],
-                     gfp_t alloc_flags,
-                     struct rxrpc_message **_msg)
-{
-       struct rxrpc_message *msg;
-       int loop;
-
-       _enter("%p{%d},%p,%u", conn, ntohs(conn->addr.sin_port), call, type);
-
-       if (dcount > 3) {
-               _leave(" = -EINVAL");
-               return -EINVAL;
-       }
-
-       msg = kzalloc(sizeof(struct rxrpc_message), alloc_flags);
-       if (!msg) {
-               _leave(" = -ENOMEM");
-               return -ENOMEM;
-       }
-
-       atomic_set(&msg->usage, 1);
-
-       INIT_LIST_HEAD(&msg->link);
-
-       msg->state = RXRPC_MSG_PREPARED;
-
-       msg->hdr.epoch          = conn->out_epoch;
-       msg->hdr.cid            = conn->conn_id | (call ? call->chan_ix : 0);
-       msg->hdr.callNumber     = call ? call->call_id : 0;
-       msg->hdr.type           = type;
-       msg->hdr.flags          = conn->out_clientflag;
-       msg->hdr.securityIndex  = conn->security_ix;
-       msg->hdr.serviceId      = conn->service_id;
-
-       /* generate sequence numbers for data packets */
-       if (call) {
-               switch (type) {
-               case RXRPC_PACKET_TYPE_DATA:
-                       msg->seq = ++call->snd_seq_count;
-                       msg->hdr.seq = htonl(msg->seq);
-                       break;
-               case RXRPC_PACKET_TYPE_ACK:
-                       /* ACK sequence numbers are complicated. The following
-                        * may be wrong:
-                        * - jumbo packet ACKs should have a seq number
-                        * - normal ACKs should not
-                        */
-               default:
-                       break;
-               }
-       }
-
-       msg->dcount = dcount + 1;
-       msg->dsize = sizeof(msg->hdr);
-       msg->data[0].iov_len = sizeof(msg->hdr);
-       msg->data[0].iov_base = &msg->hdr;
-
-       for (loop=0; loop < dcount; loop++) {
-               msg->dsize += diov[loop].iov_len;
-               msg->data[loop+1].iov_len  = diov[loop].iov_len;
-               msg->data[loop+1].iov_base = diov[loop].iov_base;
-       }
-
-       __RXACCT(atomic_inc(&rxrpc_message_count));
-       *_msg = msg;
-       _leave(" = 0 (%p) #%d", msg, atomic_read(&rxrpc_message_count));
-       return 0;
-} /* end rxrpc_conn_newmsg() */
-
-/*****************************************************************************/
-/*
- * free a message
- */
-void __rxrpc_put_message(struct rxrpc_message *msg)
-{
-       int loop;
-
-       _enter("%p #%d", msg, atomic_read(&rxrpc_message_count));
-
-       if (msg->pkt)
-               kfree_skb(msg->pkt);
-       rxrpc_put_connection(msg->conn);
-
-       for (loop = 0; loop < 8; loop++)
-               if (test_bit(loop, &msg->dfree))
-                       kfree(msg->data[loop].iov_base);
-
-       __RXACCT(atomic_dec(&rxrpc_message_count));
-       kfree(msg);
-
-       _leave("");
-} /* end __rxrpc_put_message() */
-
-/*****************************************************************************/
-/*
- * send a message out through the transport endpoint
- */
-int rxrpc_conn_sendmsg(struct rxrpc_connection *conn,
-                      struct rxrpc_message *msg)
-{
-       struct msghdr msghdr;
-       int ret;
-
-       _enter("%p{%d}", conn, ntohs(conn->addr.sin_port));
-
-       /* fill in some fields in the header */
-       spin_lock(&conn->lock);
-       msg->hdr.serial = htonl(++conn->serial_counter);
-       msg->rttdone = 0;
-       spin_unlock(&conn->lock);
-
-       /* set up the message to be transmitted */
-       msghdr.msg_name         = &conn->addr;
-       msghdr.msg_namelen      = sizeof(conn->addr);
-       msghdr.msg_control      = NULL;
-       msghdr.msg_controllen   = 0;
-       msghdr.msg_flags        = MSG_CONFIRM | MSG_DONTWAIT;
-
-       _net("Sending message type %d of %Zd bytes to %08x:%d",
-            msg->hdr.type,
-            msg->dsize,
-            ntohl(conn->addr.sin_addr.s_addr),
-            ntohs(conn->addr.sin_port));
-
-       /* send the message */
-       ret = kernel_sendmsg(conn->trans->socket, &msghdr,
-                            msg->data, msg->dcount, msg->dsize);
-       if (ret < 0) {
-               msg->state = RXRPC_MSG_ERROR;
-       } else {
-               msg->state = RXRPC_MSG_SENT;
-               ret = 0;
-
-               spin_lock(&conn->lock);
-               do_gettimeofday(&conn->atime);
-               msg->stamp = conn->atime;
-               spin_unlock(&conn->lock);
-       }
-
-       _leave(" = %d", ret);
-
-       return ret;
-} /* end rxrpc_conn_sendmsg() */
-
-/*****************************************************************************/
-/*
- * deal with a subsequent call packet
- */
-int rxrpc_conn_receive_call_packet(struct rxrpc_connection *conn,
-                                  struct rxrpc_call *call,
-                                  struct rxrpc_message *msg)
-{
-       struct rxrpc_message *pmsg;
-       struct dst_entry *dst;
-       struct list_head *_p;
-       unsigned cix, seq;
-       int ret = 0;
-
-       _enter("%p,%p,%p", conn, call, msg);
-
-       if (!call) {
-               cix = ntohl(msg->hdr.cid) & RXRPC_CHANNELMASK;
-
-               spin_lock(&conn->lock);
-               call = conn->channels[cix];
-
-               if (!call || call->call_id != msg->hdr.callNumber) {
-                       spin_unlock(&conn->lock);
-                       rxrpc_trans_immediate_abort(conn->trans, msg, -ENOENT);
-                       goto out;
-               }
-               else {
-                       rxrpc_get_call(call);
-                       spin_unlock(&conn->lock);
-               }
-       }
-       else {
-               rxrpc_get_call(call);
-       }
-
-       _proto("Received packet %%%u [%u] on call %hu:%u:%u",
-              ntohl(msg->hdr.serial),
-              ntohl(msg->hdr.seq),
-              ntohs(msg->hdr.serviceId),
-              ntohl(conn->conn_id),
-              ntohl(call->call_id));
-
-       call->pkt_rcv_count++;
-
-       dst = msg->pkt->dst;
-       if (dst && dst->dev)
-               conn->peer->if_mtu =
-                       dst->dev->mtu - dst->dev->hard_header_len;
-
-       /* queue on the call in seq order */
-       rxrpc_get_message(msg);
-       seq = msg->seq;
-
-       spin_lock(&call->lock);
-       list_for_each(_p, &call->rcv_receiveq) {
-               pmsg = list_entry(_p, struct rxrpc_message, link);
-               if (pmsg->seq > seq)
-                       break;
-       }
-       list_add_tail(&msg->link, _p);
-
-       /* reset the activity timeout */
-       call->flags |= RXRPC_CALL_RCV_PKT;
-       mod_timer(&call->rcv_timeout,jiffies + rxrpc_call_rcv_timeout * HZ);
-
-       spin_unlock(&call->lock);
-
-       rxrpc_krxiod_queue_call(call);
-
-       rxrpc_put_call(call);
- out:
-       _leave(" = %d", ret);
-       return ret;
-} /* end rxrpc_conn_receive_call_packet() */
-
-/*****************************************************************************/
-/*
- * handle an ICMP error being applied to a connection
- */
-void rxrpc_conn_handle_error(struct rxrpc_connection *conn,
-                            int local, int errno)
-{
-       struct rxrpc_call *calls[4];
-       int loop;
-
-       _enter("%p{%d},%d", conn, ntohs(conn->addr.sin_port), errno);
-
-       /* get a ref to all my calls in one go */
-       memset(calls, 0, sizeof(calls));
-       spin_lock(&conn->lock);
-
-       for (loop = 3; loop >= 0; loop--) {
-               if (conn->channels[loop]) {
-                       calls[loop] = conn->channels[loop];
-                       rxrpc_get_call(calls[loop]);
-               }
-       }
-
-       spin_unlock(&conn->lock);
-
-       /* now kick them all */
-       for (loop = 3; loop >= 0; loop--) {
-               if (calls[loop]) {
-                       rxrpc_call_handle_error(calls[loop], local, errno);
-                       rxrpc_put_call(calls[loop]);
-               }
-       }
-
-       _leave("");
-} /* end rxrpc_conn_handle_error() */
diff --git a/net/rxrpc/internal.h b/net/rxrpc/internal.h
deleted file mode 100644 (file)
index cc0c579..0000000
+++ /dev/null
@@ -1,106 +0,0 @@
-/* internal.h: internal Rx RPC stuff
- *
- * Copyright (c) 2002   David Howells (dhowells@redhat.com).
- */
-
-#ifndef RXRPC_INTERNAL_H
-#define RXRPC_INTERNAL_H
-
-#include <linux/compiler.h>
-#include <linux/kernel.h>
-
-/*
- * debug accounting
- */
-#if 1
-#define __RXACCT_DECL(X) X
-#define __RXACCT(X) do { X; } while(0)
-#else
-#define __RXACCT_DECL(X)
-#define __RXACCT(X) do { } while(0)
-#endif
-
-__RXACCT_DECL(extern atomic_t rxrpc_transport_count);
-__RXACCT_DECL(extern atomic_t rxrpc_peer_count);
-__RXACCT_DECL(extern atomic_t rxrpc_connection_count);
-__RXACCT_DECL(extern atomic_t rxrpc_call_count);
-__RXACCT_DECL(extern atomic_t rxrpc_message_count);
-
-/*
- * debug tracing
- */
-#define kenter(FMT, a...)      printk("==> %s("FMT")\n",__FUNCTION__ , ##a)
-#define kleave(FMT, a...)      printk("<== %s()"FMT"\n",__FUNCTION__ , ##a)
-#define kdebug(FMT, a...)      printk("    "FMT"\n" , ##a)
-#define kproto(FMT, a...)      printk("### "FMT"\n" , ##a)
-#define knet(FMT, a...)                printk("    "FMT"\n" , ##a)
-
-#if 0
-#define _enter(FMT, a...)      kenter(FMT , ##a)
-#define _leave(FMT, a...)      kleave(FMT , ##a)
-#define _debug(FMT, a...)      kdebug(FMT , ##a)
-#define _proto(FMT, a...)      kproto(FMT , ##a)
-#define _net(FMT, a...)                knet(FMT , ##a)
-#else
-#define _enter(FMT, a...)      do { if (rxrpc_ktrace) kenter(FMT , ##a); } while(0)
-#define _leave(FMT, a...)      do { if (rxrpc_ktrace) kleave(FMT , ##a); } while(0)
-#define _debug(FMT, a...)      do { if (rxrpc_kdebug) kdebug(FMT , ##a); } while(0)
-#define _proto(FMT, a...)      do { if (rxrpc_kproto) kproto(FMT , ##a); } while(0)
-#define _net(FMT, a...)                do { if (rxrpc_knet)   knet  (FMT , ##a); } while(0)
-#endif
-
-static inline void rxrpc_discard_my_signals(void)
-{
-       while (signal_pending(current)) {
-               siginfo_t sinfo;
-
-               spin_lock_irq(&current->sighand->siglock);
-               dequeue_signal(current, &current->blocked, &sinfo);
-               spin_unlock_irq(&current->sighand->siglock);
-       }
-}
-
-/*
- * call.c
- */
-extern struct list_head rxrpc_calls;
-extern struct rw_semaphore rxrpc_calls_sem;
-
-/*
- * connection.c
- */
-extern struct list_head rxrpc_conns;
-extern struct rw_semaphore rxrpc_conns_sem;
-extern unsigned long rxrpc_conn_timeout;
-
-extern void rxrpc_conn_clearall(struct rxrpc_peer *peer);
-
-/*
- * peer.c
- */
-extern struct list_head rxrpc_peers;
-extern struct rw_semaphore rxrpc_peers_sem;
-extern unsigned long rxrpc_peer_timeout;
-
-extern void rxrpc_peer_calculate_rtt(struct rxrpc_peer *peer,
-                                    struct rxrpc_message *msg,
-                                    struct rxrpc_message *resp);
-
-extern void rxrpc_peer_clearall(struct rxrpc_transport *trans);
-
-
-/*
- * proc.c
- */
-#ifdef CONFIG_PROC_FS
-extern int rxrpc_proc_init(void);
-extern void rxrpc_proc_cleanup(void);
-#endif
-
-/*
- * transport.c
- */
-extern struct list_head rxrpc_proc_transports;
-extern struct rw_semaphore rxrpc_proc_transports_sem;
-
-#endif /* RXRPC_INTERNAL_H */
diff --git a/net/rxrpc/krxiod.c b/net/rxrpc/krxiod.c
deleted file mode 100644 (file)
index bbbcd6c..0000000
+++ /dev/null
@@ -1,262 +0,0 @@
-/* krxiod.c: Rx I/O daemon
- *
- * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/sched.h>
-#include <linux/completion.h>
-#include <linux/spinlock.h>
-#include <linux/init.h>
-#include <linux/freezer.h>
-#include <rxrpc/krxiod.h>
-#include <rxrpc/transport.h>
-#include <rxrpc/peer.h>
-#include <rxrpc/call.h>
-#include "internal.h"
-
-static DECLARE_WAIT_QUEUE_HEAD(rxrpc_krxiod_sleepq);
-static DECLARE_COMPLETION(rxrpc_krxiod_dead);
-
-static atomic_t rxrpc_krxiod_qcount = ATOMIC_INIT(0);
-
-static LIST_HEAD(rxrpc_krxiod_transportq);
-static DEFINE_SPINLOCK(rxrpc_krxiod_transportq_lock);
-
-static LIST_HEAD(rxrpc_krxiod_callq);
-static DEFINE_SPINLOCK(rxrpc_krxiod_callq_lock);
-
-static volatile int rxrpc_krxiod_die;
-
-/*****************************************************************************/
-/*
- * Rx I/O daemon
- */
-static int rxrpc_krxiod(void *arg)
-{
-       DECLARE_WAITQUEUE(krxiod,current);
-
-       printk("Started krxiod %d\n",current->pid);
-
-       daemonize("krxiod");
-
-       /* loop around waiting for work to do */
-       do {
-               /* wait for work or to be told to exit */
-               _debug("### Begin Wait");
-               if (!atomic_read(&rxrpc_krxiod_qcount)) {
-                       set_current_state(TASK_INTERRUPTIBLE);
-
-                       add_wait_queue(&rxrpc_krxiod_sleepq, &krxiod);
-
-                       for (;;) {
-                               set_current_state(TASK_INTERRUPTIBLE);
-                               if (atomic_read(&rxrpc_krxiod_qcount) ||
-                                   rxrpc_krxiod_die ||
-                                   signal_pending(current))
-                                       break;
-
-                               schedule();
-                       }
-
-                       remove_wait_queue(&rxrpc_krxiod_sleepq, &krxiod);
-                       set_current_state(TASK_RUNNING);
-               }
-               _debug("### End Wait");
-
-               /* do work if been given some to do */
-               _debug("### Begin Work");
-
-               /* see if there's a transport in need of attention */
-               if (!list_empty(&rxrpc_krxiod_transportq)) {
-                       struct rxrpc_transport *trans = NULL;
-
-                       spin_lock_irq(&rxrpc_krxiod_transportq_lock);
-
-                       if (!list_empty(&rxrpc_krxiod_transportq)) {
-                               trans = list_entry(
-                                       rxrpc_krxiod_transportq.next,
-                                       struct rxrpc_transport,
-                                       krxiodq_link);
-
-                               list_del_init(&trans->krxiodq_link);
-                               atomic_dec(&rxrpc_krxiod_qcount);
-
-                               /* make sure it hasn't gone away and doesn't go
-                                * away */
-                               if (atomic_read(&trans->usage)>0)
-                                       rxrpc_get_transport(trans);
-                               else
-                                       trans = NULL;
-                       }
-
-                       spin_unlock_irq(&rxrpc_krxiod_transportq_lock);
-
-                       if (trans) {
-                               rxrpc_trans_receive_packet(trans);
-                               rxrpc_put_transport(trans);
-                       }
-               }
-
-               /* see if there's a call in need of attention */
-               if (!list_empty(&rxrpc_krxiod_callq)) {
-                       struct rxrpc_call *call = NULL;
-
-                       spin_lock_irq(&rxrpc_krxiod_callq_lock);
-
-                       if (!list_empty(&rxrpc_krxiod_callq)) {
-                               call = list_entry(rxrpc_krxiod_callq.next,
-                                                 struct rxrpc_call,
-                                                 rcv_krxiodq_lk);
-                               list_del_init(&call->rcv_krxiodq_lk);
-                               atomic_dec(&rxrpc_krxiod_qcount);
-
-                               /* make sure it hasn't gone away and doesn't go
-                                * away */
-                               if (atomic_read(&call->usage) > 0) {
-                                       _debug("@@@ KRXIOD"
-                                              " Begin Attend Call %p", call);
-                                       rxrpc_get_call(call);
-                               }
-                               else {
-                                       call = NULL;
-                               }
-                       }
-
-                       spin_unlock_irq(&rxrpc_krxiod_callq_lock);
-
-                       if (call) {
-                               rxrpc_call_do_stuff(call);
-                               rxrpc_put_call(call);
-                               _debug("@@@ KRXIOD End Attend Call %p", call);
-                       }
-               }
-
-               _debug("### End Work");
-
-               try_to_freeze();
-
-               /* discard pending signals */
-               rxrpc_discard_my_signals();
-
-       } while (!rxrpc_krxiod_die);
-
-       /* and that's all */
-       complete_and_exit(&rxrpc_krxiod_dead, 0);
-
-} /* end rxrpc_krxiod() */
-
-/*****************************************************************************/
-/*
- * start up a krxiod daemon
- */
-int __init rxrpc_krxiod_init(void)
-{
-       return kernel_thread(rxrpc_krxiod, NULL, 0);
-
-} /* end rxrpc_krxiod_init() */
-
-/*****************************************************************************/
-/*
- * kill the krxiod daemon and wait for it to complete
- */
-void rxrpc_krxiod_kill(void)
-{
-       rxrpc_krxiod_die = 1;
-       wake_up_all(&rxrpc_krxiod_sleepq);
-       wait_for_completion(&rxrpc_krxiod_dead);
-
-} /* end rxrpc_krxiod_kill() */
-
-/*****************************************************************************/
-/*
- * queue a transport for attention by krxiod
- */
-void rxrpc_krxiod_queue_transport(struct rxrpc_transport *trans)
-{
-       unsigned long flags;
-
-       _enter("");
-
-       if (list_empty(&trans->krxiodq_link)) {
-               spin_lock_irqsave(&rxrpc_krxiod_transportq_lock, flags);
-
-               if (list_empty(&trans->krxiodq_link)) {
-                       if (atomic_read(&trans->usage) > 0) {
-                               list_add_tail(&trans->krxiodq_link,
-                                             &rxrpc_krxiod_transportq);
-                               atomic_inc(&rxrpc_krxiod_qcount);
-                       }
-               }
-
-               spin_unlock_irqrestore(&rxrpc_krxiod_transportq_lock, flags);
-               wake_up_all(&rxrpc_krxiod_sleepq);
-       }
-
-       _leave("");
-
-} /* end rxrpc_krxiod_queue_transport() */
-
-/*****************************************************************************/
-/*
- * dequeue a transport from krxiod's attention queue
- */
-void rxrpc_krxiod_dequeue_transport(struct rxrpc_transport *trans)
-{
-       unsigned long flags;
-
-       _enter("");
-
-       spin_lock_irqsave(&rxrpc_krxiod_transportq_lock, flags);
-       if (!list_empty(&trans->krxiodq_link)) {
-               list_del_init(&trans->krxiodq_link);
-               atomic_dec(&rxrpc_krxiod_qcount);
-       }
-       spin_unlock_irqrestore(&rxrpc_krxiod_transportq_lock, flags);
-
-       _leave("");
-
-} /* end rxrpc_krxiod_dequeue_transport() */
-
-/*****************************************************************************/
-/*
- * queue a call for attention by krxiod
- */
-void rxrpc_krxiod_queue_call(struct rxrpc_call *call)
-{
-       unsigned long flags;
-
-       if (list_empty(&call->rcv_krxiodq_lk)) {
-               spin_lock_irqsave(&rxrpc_krxiod_callq_lock, flags);
-               if (atomic_read(&call->usage) > 0) {
-                       list_add_tail(&call->rcv_krxiodq_lk,
-                                     &rxrpc_krxiod_callq);
-                       atomic_inc(&rxrpc_krxiod_qcount);
-               }
-               spin_unlock_irqrestore(&rxrpc_krxiod_callq_lock, flags);
-       }
-       wake_up_all(&rxrpc_krxiod_sleepq);
-
-} /* end rxrpc_krxiod_queue_call() */
-
-/*****************************************************************************/
-/*
- * dequeue a call from krxiod's attention queue
- */
-void rxrpc_krxiod_dequeue_call(struct rxrpc_call *call)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&rxrpc_krxiod_callq_lock, flags);
-       if (!list_empty(&call->rcv_krxiodq_lk)) {
-               list_del_init(&call->rcv_krxiodq_lk);
-               atomic_dec(&rxrpc_krxiod_qcount);
-       }
-       spin_unlock_irqrestore(&rxrpc_krxiod_callq_lock, flags);
-
-} /* end rxrpc_krxiod_dequeue_call() */
diff --git a/net/rxrpc/krxsecd.c b/net/rxrpc/krxsecd.c
deleted file mode 100644 (file)
index 9a1e7f5..0000000
+++ /dev/null
@@ -1,270 +0,0 @@
-/* krxsecd.c: Rx security daemon
- *
- * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
- * This daemon deals with:
- * - consulting the application as to whether inbound peers and calls should be authorised
- * - generating security challenges for inbound connections
- * - responding to security challenges on outbound connections
- */
-
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/completion.h>
-#include <linux/spinlock.h>
-#include <linux/init.h>
-#include <rxrpc/krxsecd.h>
-#include <rxrpc/transport.h>
-#include <rxrpc/connection.h>
-#include <rxrpc/message.h>
-#include <rxrpc/peer.h>
-#include <rxrpc/call.h>
-#include <linux/udp.h>
-#include <linux/ip.h>
-#include <linux/freezer.h>
-#include <net/sock.h>
-#include "internal.h"
-
-static DECLARE_WAIT_QUEUE_HEAD(rxrpc_krxsecd_sleepq);
-static DECLARE_COMPLETION(rxrpc_krxsecd_dead);
-static volatile int rxrpc_krxsecd_die;
-
-static atomic_t rxrpc_krxsecd_qcount;
-
-/* queue of unprocessed inbound messages with seqno #1 and
- * RXRPC_CLIENT_INITIATED flag set */
-static LIST_HEAD(rxrpc_krxsecd_initmsgq);
-static DEFINE_SPINLOCK(rxrpc_krxsecd_initmsgq_lock);
-
-static void rxrpc_krxsecd_process_incoming_call(struct rxrpc_message *msg);
-
-/*****************************************************************************/
-/*
- * Rx security daemon
- */
-static int rxrpc_krxsecd(void *arg)
-{
-       DECLARE_WAITQUEUE(krxsecd, current);
-
-       int die;
-
-       printk("Started krxsecd %d\n", current->pid);
-
-       daemonize("krxsecd");
-
-       /* loop around waiting for work to do */
-       do {
-               /* wait for work or to be told to exit */
-               _debug("### Begin Wait");
-               if (!atomic_read(&rxrpc_krxsecd_qcount)) {
-                       set_current_state(TASK_INTERRUPTIBLE);
-
-                       add_wait_queue(&rxrpc_krxsecd_sleepq, &krxsecd);
-
-                       for (;;) {
-                               set_current_state(TASK_INTERRUPTIBLE);
-                               if (atomic_read(&rxrpc_krxsecd_qcount) ||
-                                   rxrpc_krxsecd_die ||
-                                   signal_pending(current))
-                                       break;
-
-                               schedule();
-                       }
-
-                       remove_wait_queue(&rxrpc_krxsecd_sleepq, &krxsecd);
-                       set_current_state(TASK_RUNNING);
-               }
-               die = rxrpc_krxsecd_die;
-               _debug("### End Wait");
-
-               /* see if there're incoming calls in need of authenticating */
-               _debug("### Begin Inbound Calls");
-
-               if (!list_empty(&rxrpc_krxsecd_initmsgq)) {
-                       struct rxrpc_message *msg = NULL;
-
-                       spin_lock(&rxrpc_krxsecd_initmsgq_lock);
-
-                       if (!list_empty(&rxrpc_krxsecd_initmsgq)) {
-                               msg = list_entry(rxrpc_krxsecd_initmsgq.next,
-                                                struct rxrpc_message, link);
-                               list_del_init(&msg->link);
-                               atomic_dec(&rxrpc_krxsecd_qcount);
-                       }
-
-                       spin_unlock(&rxrpc_krxsecd_initmsgq_lock);
-
-                       if (msg) {
-                               rxrpc_krxsecd_process_incoming_call(msg);
-                               rxrpc_put_message(msg);
-                       }
-               }
-
-               _debug("### End Inbound Calls");
-
-               try_to_freeze();
-
-               /* discard pending signals */
-               rxrpc_discard_my_signals();
-
-       } while (!die);
-
-       /* and that's all */
-       complete_and_exit(&rxrpc_krxsecd_dead, 0);
-
-} /* end rxrpc_krxsecd() */
-
-/*****************************************************************************/
-/*
- * start up a krxsecd daemon
- */
-int __init rxrpc_krxsecd_init(void)
-{
-       return kernel_thread(rxrpc_krxsecd, NULL, 0);
-
-} /* end rxrpc_krxsecd_init() */
-
-/*****************************************************************************/
-/*
- * kill the krxsecd daemon and wait for it to complete
- */
-void rxrpc_krxsecd_kill(void)
-{
-       rxrpc_krxsecd_die = 1;
-       wake_up_all(&rxrpc_krxsecd_sleepq);
-       wait_for_completion(&rxrpc_krxsecd_dead);
-
-} /* end rxrpc_krxsecd_kill() */
-
-/*****************************************************************************/
-/*
- * clear all pending incoming calls for the specified transport
- */
-void rxrpc_krxsecd_clear_transport(struct rxrpc_transport *trans)
-{
-       LIST_HEAD(tmp);
-
-       struct rxrpc_message *msg;
-       struct list_head *_p, *_n;
-
-       _enter("%p",trans);
-
-       /* move all the messages for this transport onto a temp list */
-       spin_lock(&rxrpc_krxsecd_initmsgq_lock);
-
-       list_for_each_safe(_p, _n, &rxrpc_krxsecd_initmsgq) {
-               msg = list_entry(_p, struct rxrpc_message, link);
-               if (msg->trans == trans) {
-                       list_move_tail(&msg->link, &tmp);
-                       atomic_dec(&rxrpc_krxsecd_qcount);
-               }
-       }
-
-       spin_unlock(&rxrpc_krxsecd_initmsgq_lock);
-
-       /* zap all messages on the temp list */
-       while (!list_empty(&tmp)) {
-               msg = list_entry(tmp.next, struct rxrpc_message, link);
-               list_del_init(&msg->link);
-               rxrpc_put_message(msg);
-       }
-
-       _leave("");
-} /* end rxrpc_krxsecd_clear_transport() */
-
-/*****************************************************************************/
-/*
- * queue a message on the incoming calls list
- */
-void rxrpc_krxsecd_queue_incoming_call(struct rxrpc_message *msg)
-{
-       _enter("%p", msg);
-
-       /* queue for processing by krxsecd */
-       spin_lock(&rxrpc_krxsecd_initmsgq_lock);
-
-       if (!rxrpc_krxsecd_die) {
-               rxrpc_get_message(msg);
-               list_add_tail(&msg->link, &rxrpc_krxsecd_initmsgq);
-               atomic_inc(&rxrpc_krxsecd_qcount);
-       }
-
-       spin_unlock(&rxrpc_krxsecd_initmsgq_lock);
-
-       wake_up(&rxrpc_krxsecd_sleepq);
-
-       _leave("");
-} /* end rxrpc_krxsecd_queue_incoming_call() */
-
-/*****************************************************************************/
-/*
- * process the initial message of an incoming call
- */
-void rxrpc_krxsecd_process_incoming_call(struct rxrpc_message *msg)
-{
-       struct rxrpc_transport *trans = msg->trans;
-       struct rxrpc_service *srv;
-       struct rxrpc_call *call;
-       struct list_head *_p;
-       unsigned short sid;
-       int ret;
-
-       _enter("%p{tr=%p}", msg, trans);
-
-       ret = rxrpc_incoming_call(msg->conn, msg, &call);
-       if (ret < 0)
-               goto out;
-
-       /* find the matching service on the transport */
-       sid = ntohs(msg->hdr.serviceId);
-       srv = NULL;
-
-       spin_lock(&trans->lock);
-       list_for_each(_p, &trans->services) {
-               srv = list_entry(_p, struct rxrpc_service, link);
-               if (srv->service_id == sid && try_module_get(srv->owner)) {
-                       /* found a match (made sure it won't vanish) */
-                       _debug("found service '%s'", srv->name);
-                       call->owner = srv->owner;
-                       break;
-               }
-       }
-       spin_unlock(&trans->lock);
-
-       /* report the new connection
-        * - the func must inc the call's usage count to keep it
-        */
-       ret = -ENOENT;
-       if (_p != &trans->services) {
-               /* attempt to accept the call */
-               call->conn->service = srv;
-               call->app_attn_func = srv->attn_func;
-               call->app_error_func = srv->error_func;
-               call->app_aemap_func = srv->aemap_func;
-
-               ret = srv->new_call(call);
-
-               /* send an abort if an error occurred */
-               if (ret < 0) {
-                       rxrpc_call_abort(call, ret);
-               }
-               else {
-                       /* formally receive and ACK the new packet */
-                       ret = rxrpc_conn_receive_call_packet(call->conn,
-                                                            call, msg);
-               }
-       }
-
-       rxrpc_put_call(call);
- out:
-       if (ret < 0)
-               rxrpc_trans_immediate_abort(trans, msg, ret);
-
-       _leave(" (%d)", ret);
-} /* end rxrpc_krxsecd_process_incoming_call() */
diff --git a/net/rxrpc/krxtimod.c b/net/rxrpc/krxtimod.c
deleted file mode 100644 (file)
index 9a9b613..0000000
+++ /dev/null
@@ -1,204 +0,0 @@
-/* krxtimod.c: RXRPC timeout daemon
- *
- * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/completion.h>
-#include <linux/freezer.h>
-#include <rxrpc/rxrpc.h>
-#include <rxrpc/krxtimod.h>
-#include <asm/errno.h>
-#include "internal.h"
-
-static DECLARE_COMPLETION(krxtimod_alive);
-static DECLARE_COMPLETION(krxtimod_dead);
-static DECLARE_WAIT_QUEUE_HEAD(krxtimod_sleepq);
-static int krxtimod_die;
-
-static LIST_HEAD(krxtimod_list);
-static DEFINE_SPINLOCK(krxtimod_lock);
-
-static int krxtimod(void *arg);
-
-/*****************************************************************************/
-/*
- * start the timeout daemon
- */
-int rxrpc_krxtimod_start(void)
-{
-       int ret;
-
-       ret = kernel_thread(krxtimod, NULL, 0);
-       if (ret < 0)
-               return ret;
-
-       wait_for_completion(&krxtimod_alive);
-
-       return ret;
-} /* end rxrpc_krxtimod_start() */
-
-/*****************************************************************************/
-/*
- * stop the timeout daemon
- */
-void rxrpc_krxtimod_kill(void)
-{
-       /* get rid of my daemon */
-       krxtimod_die = 1;
-       wake_up(&krxtimod_sleepq);
-       wait_for_completion(&krxtimod_dead);
-
-} /* end rxrpc_krxtimod_kill() */
-
-/*****************************************************************************/
-/*
- * timeout processing daemon
- */
-static int krxtimod(void *arg)
-{
-       DECLARE_WAITQUEUE(myself, current);
-
-       rxrpc_timer_t *timer;
-
-       printk("Started krxtimod %d\n", current->pid);
-
-       daemonize("krxtimod");
-
-       complete(&krxtimod_alive);
-
-       /* loop around looking for things to attend to */
- loop:
-       set_current_state(TASK_INTERRUPTIBLE);
-       add_wait_queue(&krxtimod_sleepq, &myself);
-
-       for (;;) {
-               unsigned long jif;
-               long timeout;
-
-               /* deal with the server being asked to die */
-               if (krxtimod_die) {
-                       remove_wait_queue(&krxtimod_sleepq, &myself);
-                       _leave("");
-                       complete_and_exit(&krxtimod_dead, 0);
-               }
-
-               try_to_freeze();
-
-               /* discard pending signals */
-               rxrpc_discard_my_signals();
-
-               /* work out the time to elapse before the next event */
-               spin_lock(&krxtimod_lock);
-               if (list_empty(&krxtimod_list)) {
-                       timeout = MAX_SCHEDULE_TIMEOUT;
-               }
-               else {
-                       timer = list_entry(krxtimod_list.next,
-                                          rxrpc_timer_t, link);
-                       timeout = timer->timo_jif;
-                       jif = jiffies;
-
-                       if (time_before_eq((unsigned long) timeout, jif))
-                               goto immediate;
-
-                       else {
-                               timeout = (long) timeout - (long) jiffies;
-                       }
-               }
-               spin_unlock(&krxtimod_lock);
-
-               schedule_timeout(timeout);
-
-               set_current_state(TASK_INTERRUPTIBLE);
-       }
-
-       /* the thing on the front of the queue needs processing
-        * - we come here with the lock held and timer pointing to the expired
-        *   entry
-        */
- immediate:
-       remove_wait_queue(&krxtimod_sleepq, &myself);
-       set_current_state(TASK_RUNNING);
-
-       _debug("@@@ Begin Timeout of %p", timer);
-
-       /* dequeue the timer */
-       list_del_init(&timer->link);
-       spin_unlock(&krxtimod_lock);
-
-       /* call the timeout function */
-       timer->ops->timed_out(timer);
-
-       _debug("@@@ End Timeout");
-       goto loop;
-
-} /* end krxtimod() */
-
-/*****************************************************************************/
-/*
- * (re-)queue a timer
- */
-void rxrpc_krxtimod_add_timer(rxrpc_timer_t *timer, unsigned long timeout)
-{
-       struct list_head *_p;
-       rxrpc_timer_t *ptimer;
-
-       _enter("%p,%lu", timer, timeout);
-
-       spin_lock(&krxtimod_lock);
-
-       list_del(&timer->link);
-
-       /* the timer was deferred or reset - put it back in the queue at the
-        * right place */
-       timer->timo_jif = jiffies + timeout;
-
-       list_for_each(_p, &krxtimod_list) {
-               ptimer = list_entry(_p, rxrpc_timer_t, link);
-               if (time_before(timer->timo_jif, ptimer->timo_jif))
-                       break;
-       }
-
-       list_add_tail(&timer->link, _p); /* insert before stopping point */
-
-       spin_unlock(&krxtimod_lock);
-
-       wake_up(&krxtimod_sleepq);
-
-       _leave("");
-} /* end rxrpc_krxtimod_add_timer() */
-
-/*****************************************************************************/
-/*
- * dequeue a timer
- * - returns 0 if the timer was deleted or -ENOENT if it wasn't queued
- */
-int rxrpc_krxtimod_del_timer(rxrpc_timer_t *timer)
-{
-       int ret = 0;
-
-       _enter("%p", timer);
-
-       spin_lock(&krxtimod_lock);
-
-       if (list_empty(&timer->link))
-               ret = -ENOENT;
-       else
-               list_del_init(&timer->link);
-
-       spin_unlock(&krxtimod_lock);
-
-       wake_up(&krxtimod_sleepq);
-
-       _leave(" = %d", ret);
-       return ret;
-} /* end rxrpc_krxtimod_del_timer() */
diff --git a/net/rxrpc/main.c b/net/rxrpc/main.c
deleted file mode 100644 (file)
index baec1f7..0000000
+++ /dev/null
@@ -1,180 +0,0 @@
-/* main.c: Rx RPC interface
- *
- * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/sched.h>
-#include <rxrpc/rxrpc.h>
-#include <rxrpc/krxiod.h>
-#include <rxrpc/krxsecd.h>
-#include <rxrpc/krxtimod.h>
-#include <rxrpc/transport.h>
-#include <rxrpc/connection.h>
-#include <rxrpc/call.h>
-#include <rxrpc/message.h>
-#include "internal.h"
-
-MODULE_DESCRIPTION("Rx RPC implementation");
-MODULE_AUTHOR("Red Hat, Inc.");
-MODULE_LICENSE("GPL");
-
-__be32 rxrpc_epoch;
-
-/*****************************************************************************/
-/*
- * initialise the Rx module
- */
-static int __init rxrpc_initialise(void)
-{
-       int ret;
-
-       /* my epoch value */
-       rxrpc_epoch = htonl(xtime.tv_sec);
-
-       /* register the /proc interface */
-#ifdef CONFIG_PROC_FS
-       ret = rxrpc_proc_init();
-       if (ret<0)
-               return ret;
-#endif
-
-       /* register the sysctl files */
-#ifdef CONFIG_SYSCTL
-       ret = rxrpc_sysctl_init();
-       if (ret<0)
-               goto error_proc;
-#endif
-
-       /* start the krxtimod daemon */
-       ret = rxrpc_krxtimod_start();
-       if (ret<0)
-               goto error_sysctl;
-
-       /* start the krxiod daemon */
-       ret = rxrpc_krxiod_init();
-       if (ret<0)
-               goto error_krxtimod;
-
-       /* start the krxsecd daemon */
-       ret = rxrpc_krxsecd_init();
-       if (ret<0)
-               goto error_krxiod;
-
-       kdebug("\n\n");
-
-       return 0;
-
- error_krxiod:
-       rxrpc_krxiod_kill();
- error_krxtimod:
-       rxrpc_krxtimod_kill();
- error_sysctl:
-#ifdef CONFIG_SYSCTL
-       rxrpc_sysctl_cleanup();
- error_proc:
-#endif
-#ifdef CONFIG_PROC_FS
-       rxrpc_proc_cleanup();
-#endif
-       return ret;
-} /* end rxrpc_initialise() */
-
-module_init(rxrpc_initialise);
-
-/*****************************************************************************/
-/*
- * clean up the Rx module
- */
-static void __exit rxrpc_cleanup(void)
-{
-       kenter("");
-
-       __RXACCT(printk("Outstanding Messages   : %d\n",
-                       atomic_read(&rxrpc_message_count)));
-       __RXACCT(printk("Outstanding Calls      : %d\n",
-                       atomic_read(&rxrpc_call_count)));
-       __RXACCT(printk("Outstanding Connections: %d\n",
-                       atomic_read(&rxrpc_connection_count)));
-       __RXACCT(printk("Outstanding Peers      : %d\n",
-                       atomic_read(&rxrpc_peer_count)));
-       __RXACCT(printk("Outstanding Transports : %d\n",
-                       atomic_read(&rxrpc_transport_count)));
-
-       rxrpc_krxsecd_kill();
-       rxrpc_krxiod_kill();
-       rxrpc_krxtimod_kill();
-#ifdef CONFIG_SYSCTL
-       rxrpc_sysctl_cleanup();
-#endif
-#ifdef CONFIG_PROC_FS
-       rxrpc_proc_cleanup();
-#endif
-
-       __RXACCT(printk("Outstanding Messages   : %d\n",
-                       atomic_read(&rxrpc_message_count)));
-       __RXACCT(printk("Outstanding Calls      : %d\n",
-                       atomic_read(&rxrpc_call_count)));
-       __RXACCT(printk("Outstanding Connections: %d\n",
-                       atomic_read(&rxrpc_connection_count)));
-       __RXACCT(printk("Outstanding Peers      : %d\n",
-                       atomic_read(&rxrpc_peer_count)));
-       __RXACCT(printk("Outstanding Transports : %d\n",
-                       atomic_read(&rxrpc_transport_count)));
-
-       kleave("");
-} /* end rxrpc_cleanup() */
-
-module_exit(rxrpc_cleanup);
-
-/*****************************************************************************/
-/*
- * clear the dead space between task_struct and kernel stack
- * - called by supplying -finstrument-functions to gcc
- */
-#if 0
-void __cyg_profile_func_enter (void *this_fn, void *call_site)
-__attribute__((no_instrument_function));
-
-void __cyg_profile_func_enter (void *this_fn, void *call_site)
-{
-       asm volatile("  movl    %%esp,%%edi     \n"
-                   "  andl    %0,%%edi        \n"
-                   "  addl    %1,%%edi        \n"
-                   "  movl    %%esp,%%ecx     \n"
-                   "  subl    %%edi,%%ecx     \n"
-                   "  shrl    $2,%%ecx        \n"
-                   "  movl    $0xedededed,%%eax     \n"
-                   "  rep stosl               \n"
-                   :
-                   : "i"(~(THREAD_SIZE-1)), "i"(sizeof(struct thread_info))
-                   : "eax", "ecx", "edi", "memory", "cc"
-                   );
-}
-
-void __cyg_profile_func_exit(void *this_fn, void *call_site)
-__attribute__((no_instrument_function));
-
-void __cyg_profile_func_exit(void *this_fn, void *call_site)
-{
-       asm volatile("  movl    %%esp,%%edi     \n"
-                   "  andl    %0,%%edi        \n"
-                   "  addl    %1,%%edi        \n"
-                   "  movl    %%esp,%%ecx     \n"
-                   "  subl    %%edi,%%ecx     \n"
-                   "  shrl    $2,%%ecx        \n"
-                   "  movl    $0xdadadada,%%eax     \n"
-                   "  rep stosl               \n"
-                   :
-                   : "i"(~(THREAD_SIZE-1)), "i"(sizeof(struct thread_info))
-                   : "eax", "ecx", "edi", "memory", "cc"
-                   );
-}
-#endif
diff --git a/net/rxrpc/peer.c b/net/rxrpc/peer.c
deleted file mode 100644 (file)
index 8a27515..0000000
+++ /dev/null
@@ -1,398 +0,0 @@
-/* peer.c: Rx RPC peer management
- *
- * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <rxrpc/rxrpc.h>
-#include <rxrpc/transport.h>
-#include <rxrpc/peer.h>
-#include <rxrpc/connection.h>
-#include <rxrpc/call.h>
-#include <rxrpc/message.h>
-#include <linux/udp.h>
-#include <linux/ip.h>
-#include <net/sock.h>
-#include <asm/uaccess.h>
-#include <asm/div64.h>
-#include "internal.h"
-
-__RXACCT_DECL(atomic_t rxrpc_peer_count);
-LIST_HEAD(rxrpc_peers);
-DECLARE_RWSEM(rxrpc_peers_sem);
-unsigned long rxrpc_peer_timeout = 12 * 60 * 60;
-
-static void rxrpc_peer_do_timeout(struct rxrpc_peer *peer);
-
-static void __rxrpc_peer_timeout(rxrpc_timer_t *timer)
-{
-       struct rxrpc_peer *peer =
-               list_entry(timer, struct rxrpc_peer, timeout);
-
-       _debug("Rx PEER TIMEOUT [%p{u=%d}]", peer, atomic_read(&peer->usage));
-
-       rxrpc_peer_do_timeout(peer);
-}
-
-static const struct rxrpc_timer_ops rxrpc_peer_timer_ops = {
-       .timed_out      = __rxrpc_peer_timeout,
-};
-
-/*****************************************************************************/
-/*
- * create a peer record
- */
-static int __rxrpc_create_peer(struct rxrpc_transport *trans, __be32 addr,
-                              struct rxrpc_peer **_peer)
-{
-       struct rxrpc_peer *peer;
-
-       _enter("%p,%08x", trans, ntohl(addr));
-
-       /* allocate and initialise a peer record */
-       peer = kzalloc(sizeof(struct rxrpc_peer), GFP_KERNEL);
-       if (!peer) {
-               _leave(" = -ENOMEM");
-               return -ENOMEM;
-       }
-
-       atomic_set(&peer->usage, 1);
-
-       INIT_LIST_HEAD(&peer->link);
-       INIT_LIST_HEAD(&peer->proc_link);
-       INIT_LIST_HEAD(&peer->conn_idlist);
-       INIT_LIST_HEAD(&peer->conn_active);
-       INIT_LIST_HEAD(&peer->conn_graveyard);
-       spin_lock_init(&peer->conn_gylock);
-       init_waitqueue_head(&peer->conn_gy_waitq);
-       rwlock_init(&peer->conn_idlock);
-       rwlock_init(&peer->conn_lock);
-       atomic_set(&peer->conn_count, 0);
-       spin_lock_init(&peer->lock);
-       rxrpc_timer_init(&peer->timeout, &rxrpc_peer_timer_ops);
-
-       peer->addr.s_addr = addr;
-
-       peer->trans = trans;
-       peer->ops = trans->peer_ops;
-
-       __RXACCT(atomic_inc(&rxrpc_peer_count));
-       *_peer = peer;
-       _leave(" = 0 (%p)", peer);
-
-       return 0;
-} /* end __rxrpc_create_peer() */
-
-/*****************************************************************************/
-/*
- * find a peer record on the specified transport
- * - returns (if successful) with peer record usage incremented
- * - resurrects it from the graveyard if found there
- */
-int rxrpc_peer_lookup(struct rxrpc_transport *trans, __be32 addr,
-                     struct rxrpc_peer **_peer)
-{
-       struct rxrpc_peer *peer, *candidate = NULL;
-       struct list_head *_p;
-       int ret;
-
-       _enter("%p{%hu},%08x", trans, trans->port, ntohl(addr));
-
-       /* [common case] search the transport's active list first */
-       read_lock(&trans->peer_lock);
-       list_for_each(_p, &trans->peer_active) {
-               peer = list_entry(_p, struct rxrpc_peer, link);
-               if (peer->addr.s_addr == addr)
-                       goto found_active;
-       }
-       read_unlock(&trans->peer_lock);
-
-       /* [uncommon case] not active - create a candidate for a new record */
-       ret = __rxrpc_create_peer(trans, addr, &candidate);
-       if (ret < 0) {
-               _leave(" = %d", ret);
-               return ret;
-       }
-
-       /* search the active list again, just in case it appeared whilst we
-        * were busy */
-       write_lock(&trans->peer_lock);
-       list_for_each(_p, &trans->peer_active) {
-               peer = list_entry(_p, struct rxrpc_peer, link);
-               if (peer->addr.s_addr == addr)
-                       goto found_active_second_chance;
-       }
-
-       /* search the transport's graveyard list */
-       spin_lock(&trans->peer_gylock);
-       list_for_each(_p, &trans->peer_graveyard) {
-               peer = list_entry(_p, struct rxrpc_peer, link);
-               if (peer->addr.s_addr == addr)
-                       goto found_in_graveyard;
-       }
-       spin_unlock(&trans->peer_gylock);
-
-       /* we can now add the new candidate to the list
-        * - tell the application layer that this peer has been added
-        */
-       rxrpc_get_transport(trans);
-       peer = candidate;
-       candidate = NULL;
-
-       if (peer->ops && peer->ops->adding) {
-               ret = peer->ops->adding(peer);
-               if (ret < 0) {
-                       write_unlock(&trans->peer_lock);
-                       __RXACCT(atomic_dec(&rxrpc_peer_count));
-                       kfree(peer);
-                       rxrpc_put_transport(trans);
-                       _leave(" = %d", ret);
-                       return ret;
-               }
-       }
-
-       atomic_inc(&trans->peer_count);
-
- make_active:
-       list_add_tail(&peer->link, &trans->peer_active);
-
- success_uwfree:
-       write_unlock(&trans->peer_lock);
-
-       if (candidate) {
-               __RXACCT(atomic_dec(&rxrpc_peer_count));
-               kfree(candidate);
-       }
-
-       if (list_empty(&peer->proc_link)) {
-               down_write(&rxrpc_peers_sem);
-               list_add_tail(&peer->proc_link, &rxrpc_peers);
-               up_write(&rxrpc_peers_sem);
-       }
-
- success:
-       *_peer = peer;
-
-       _leave(" = 0 (%p{u=%d cc=%d})",
-              peer,
-              atomic_read(&peer->usage),
-              atomic_read(&peer->conn_count));
-       return 0;
-
-       /* handle the peer being found in the active list straight off */
- found_active:
-       rxrpc_get_peer(peer);
-       read_unlock(&trans->peer_lock);
-       goto success;
-
-       /* handle resurrecting a peer from the graveyard */
- found_in_graveyard:
-       rxrpc_get_peer(peer);
-       rxrpc_get_transport(peer->trans);
-       rxrpc_krxtimod_del_timer(&peer->timeout);
-       list_del_init(&peer->link);
-       spin_unlock(&trans->peer_gylock);
-       goto make_active;
-
-       /* handle finding the peer on the second time through the active
-        * list */
- found_active_second_chance:
-       rxrpc_get_peer(peer);
-       goto success_uwfree;
-
-} /* end rxrpc_peer_lookup() */
-
-/*****************************************************************************/
-/*
- * finish with a peer record
- * - it gets sent to the graveyard from where it can be resurrected or timed
- *   out
- */
-void rxrpc_put_peer(struct rxrpc_peer *peer)
-{
-       struct rxrpc_transport *trans = peer->trans;
-
-       _enter("%p{cc=%d a=%08x}",
-              peer,
-              atomic_read(&peer->conn_count),
-              ntohl(peer->addr.s_addr));
-
-       /* sanity check */
-       if (atomic_read(&peer->usage) <= 0)
-               BUG();
-
-       write_lock(&trans->peer_lock);
-       spin_lock(&trans->peer_gylock);
-       if (likely(!atomic_dec_and_test(&peer->usage))) {
-               spin_unlock(&trans->peer_gylock);
-               write_unlock(&trans->peer_lock);
-               _leave("");
-               return;
-       }
-
-       /* move to graveyard queue */
-       list_del(&peer->link);
-       write_unlock(&trans->peer_lock);
-
-       list_add_tail(&peer->link, &trans->peer_graveyard);
-
-       BUG_ON(!list_empty(&peer->conn_active));
-
-       rxrpc_krxtimod_add_timer(&peer->timeout, rxrpc_peer_timeout * HZ);
-
-       spin_unlock(&trans->peer_gylock);
-
-       rxrpc_put_transport(trans);
-
-       _leave(" [killed]");
-} /* end rxrpc_put_peer() */
-
-/*****************************************************************************/
-/*
- * handle a peer timing out in the graveyard
- * - called from krxtimod
- */
-static void rxrpc_peer_do_timeout(struct rxrpc_peer *peer)
-{
-       struct rxrpc_transport *trans = peer->trans;
-
-       _enter("%p{u=%d cc=%d a=%08x}",
-              peer,
-              atomic_read(&peer->usage),
-              atomic_read(&peer->conn_count),
-              ntohl(peer->addr.s_addr));
-
-       BUG_ON(atomic_read(&peer->usage) < 0);
-
-       /* remove from graveyard if still dead */
-       spin_lock(&trans->peer_gylock);
-       if (atomic_read(&peer->usage) == 0)
-               list_del_init(&peer->link);
-       else
-               peer = NULL;
-       spin_unlock(&trans->peer_gylock);
-
-       if (!peer) {
-               _leave("");
-               return; /* resurrected */
-       }
-
-       /* clear all connections on this peer */
-       rxrpc_conn_clearall(peer);
-
-       BUG_ON(!list_empty(&peer->conn_active));
-       BUG_ON(!list_empty(&peer->conn_graveyard));
-
-       /* inform the application layer */
-       if (peer->ops && peer->ops->discarding)
-               peer->ops->discarding(peer);
-
-       if (!list_empty(&peer->proc_link)) {
-               down_write(&rxrpc_peers_sem);
-               list_del(&peer->proc_link);
-               up_write(&rxrpc_peers_sem);
-       }
-
-       __RXACCT(atomic_dec(&rxrpc_peer_count));
-       kfree(peer);
-
-       /* if the graveyard is now empty, wake up anyone waiting for that */
-       if (atomic_dec_and_test(&trans->peer_count))
-               wake_up(&trans->peer_gy_waitq);
-
-       _leave(" [destroyed]");
-} /* end rxrpc_peer_do_timeout() */
-
-/*****************************************************************************/
-/*
- * clear all peer records from a transport endpoint
- */
-void rxrpc_peer_clearall(struct rxrpc_transport *trans)
-{
-       DECLARE_WAITQUEUE(myself,current);
-
-       struct rxrpc_peer *peer;
-       int err;
-
-       _enter("%p",trans);
-
-       /* there shouldn't be any active peers remaining */
-       BUG_ON(!list_empty(&trans->peer_active));
-
-       /* manually timeout all peers in the graveyard */
-       spin_lock(&trans->peer_gylock);
-       while (!list_empty(&trans->peer_graveyard)) {
-               peer = list_entry(trans->peer_graveyard.next,
-                                 struct rxrpc_peer, link);
-               _debug("Clearing peer %p\n", peer);
-               err = rxrpc_krxtimod_del_timer(&peer->timeout);
-               spin_unlock(&trans->peer_gylock);
-
-               if (err == 0)
-                       rxrpc_peer_do_timeout(peer);
-
-               spin_lock(&trans->peer_gylock);
-       }
-       spin_unlock(&trans->peer_gylock);
-
-       /* wait for the the peer graveyard to be completely cleared */
-       set_current_state(TASK_UNINTERRUPTIBLE);
-       add_wait_queue(&trans->peer_gy_waitq, &myself);
-
-       while (atomic_read(&trans->peer_count) != 0) {
-               schedule();
-               set_current_state(TASK_UNINTERRUPTIBLE);
-       }
-
-       remove_wait_queue(&trans->peer_gy_waitq, &myself);
-       set_current_state(TASK_RUNNING);
-
-       _leave("");
-} /* end rxrpc_peer_clearall() */
-
-/*****************************************************************************/
-/*
- * calculate and cache the Round-Trip-Time for a message and its response
- */
-void rxrpc_peer_calculate_rtt(struct rxrpc_peer *peer,
-                             struct rxrpc_message *msg,
-                             struct rxrpc_message *resp)
-{
-       unsigned long long rtt;
-       int loop;
-
-       _enter("%p,%p,%p", peer, msg, resp);
-
-       /* calculate the latest RTT */
-       rtt = resp->stamp.tv_sec - msg->stamp.tv_sec;
-       rtt *= 1000000UL;
-       rtt += resp->stamp.tv_usec - msg->stamp.tv_usec;
-
-       /* add to cache */
-       peer->rtt_cache[peer->rtt_point] = rtt;
-       peer->rtt_point++;
-       peer->rtt_point %= RXRPC_RTT_CACHE_SIZE;
-
-       if (peer->rtt_usage < RXRPC_RTT_CACHE_SIZE)
-               peer->rtt_usage++;
-
-       /* recalculate RTT */
-       rtt = 0;
-       for (loop = peer->rtt_usage - 1; loop >= 0; loop--)
-               rtt += peer->rtt_cache[loop];
-
-       do_div(rtt, peer->rtt_usage);
-       peer->rtt = rtt;
-
-       _leave(" RTT=%lu.%lums",
-              (long) (peer->rtt / 1000), (long) (peer->rtt % 1000));
-
-} /* end rxrpc_peer_calculate_rtt() */
diff --git a/net/rxrpc/proc.c b/net/rxrpc/proc.c
deleted file mode 100644 (file)
index 8551c87..0000000
+++ /dev/null
@@ -1,617 +0,0 @@
-/* proc.c: /proc interface for RxRPC
- *
- * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <rxrpc/rxrpc.h>
-#include <rxrpc/transport.h>
-#include <rxrpc/peer.h>
-#include <rxrpc/connection.h>
-#include <rxrpc/call.h>
-#include <rxrpc/message.h>
-#include "internal.h"
-
-static struct proc_dir_entry *proc_rxrpc;
-
-static int rxrpc_proc_transports_open(struct inode *inode, struct file *file);
-static void *rxrpc_proc_transports_start(struct seq_file *p, loff_t *pos);
-static void *rxrpc_proc_transports_next(struct seq_file *p, void *v, loff_t *pos);
-static void rxrpc_proc_transports_stop(struct seq_file *p, void *v);
-static int rxrpc_proc_transports_show(struct seq_file *m, void *v);
-
-static struct seq_operations rxrpc_proc_transports_ops = {
-       .start  = rxrpc_proc_transports_start,
-       .next   = rxrpc_proc_transports_next,
-       .stop   = rxrpc_proc_transports_stop,
-       .show   = rxrpc_proc_transports_show,
-};
-
-static const struct file_operations rxrpc_proc_transports_fops = {
-       .open           = rxrpc_proc_transports_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = seq_release,
-};
-
-static int rxrpc_proc_peers_open(struct inode *inode, struct file *file);
-static void *rxrpc_proc_peers_start(struct seq_file *p, loff_t *pos);
-static void *rxrpc_proc_peers_next(struct seq_file *p, void *v, loff_t *pos);
-static void rxrpc_proc_peers_stop(struct seq_file *p, void *v);
-static int rxrpc_proc_peers_show(struct seq_file *m, void *v);
-
-static struct seq_operations rxrpc_proc_peers_ops = {
-       .start  = rxrpc_proc_peers_start,
-       .next   = rxrpc_proc_peers_next,
-       .stop   = rxrpc_proc_peers_stop,
-       .show   = rxrpc_proc_peers_show,
-};
-
-static const struct file_operations rxrpc_proc_peers_fops = {
-       .open           = rxrpc_proc_peers_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = seq_release,
-};
-
-static int rxrpc_proc_conns_open(struct inode *inode, struct file *file);
-static void *rxrpc_proc_conns_start(struct seq_file *p, loff_t *pos);
-static void *rxrpc_proc_conns_next(struct seq_file *p, void *v, loff_t *pos);
-static void rxrpc_proc_conns_stop(struct seq_file *p, void *v);
-static int rxrpc_proc_conns_show(struct seq_file *m, void *v);
-
-static struct seq_operations rxrpc_proc_conns_ops = {
-       .start  = rxrpc_proc_conns_start,
-       .next   = rxrpc_proc_conns_next,
-       .stop   = rxrpc_proc_conns_stop,
-       .show   = rxrpc_proc_conns_show,
-};
-
-static const struct file_operations rxrpc_proc_conns_fops = {
-       .open           = rxrpc_proc_conns_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = seq_release,
-};
-
-static int rxrpc_proc_calls_open(struct inode *inode, struct file *file);
-static void *rxrpc_proc_calls_start(struct seq_file *p, loff_t *pos);
-static void *rxrpc_proc_calls_next(struct seq_file *p, void *v, loff_t *pos);
-static void rxrpc_proc_calls_stop(struct seq_file *p, void *v);
-static int rxrpc_proc_calls_show(struct seq_file *m, void *v);
-
-static struct seq_operations rxrpc_proc_calls_ops = {
-       .start  = rxrpc_proc_calls_start,
-       .next   = rxrpc_proc_calls_next,
-       .stop   = rxrpc_proc_calls_stop,
-       .show   = rxrpc_proc_calls_show,
-};
-
-static const struct file_operations rxrpc_proc_calls_fops = {
-       .open           = rxrpc_proc_calls_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = seq_release,
-};
-
-static const char *rxrpc_call_states7[] = {
-       "complet",
-       "error  ",
-       "rcv_op ",
-       "rcv_arg",
-       "got_arg",
-       "snd_rpl",
-       "fin_ack",
-       "snd_arg",
-       "rcv_rpl",
-       "got_rpl"
-};
-
-static const char *rxrpc_call_error_states7[] = {
-       "no_err ",
-       "loc_abt",
-       "rmt_abt",
-       "loc_err",
-       "rmt_err"
-};
-
-/*****************************************************************************/
-/*
- * initialise the /proc/net/rxrpc/ directory
- */
-int rxrpc_proc_init(void)
-{
-       struct proc_dir_entry *p;
-
-       proc_rxrpc = proc_mkdir("rxrpc", proc_net);
-       if (!proc_rxrpc)
-               goto error;
-       proc_rxrpc->owner = THIS_MODULE;
-
-       p = create_proc_entry("calls", 0, proc_rxrpc);
-       if (!p)
-               goto error_proc;
-       p->proc_fops = &rxrpc_proc_calls_fops;
-       p->owner = THIS_MODULE;
-
-       p = create_proc_entry("connections", 0, proc_rxrpc);
-       if (!p)
-               goto error_calls;
-       p->proc_fops = &rxrpc_proc_conns_fops;
-       p->owner = THIS_MODULE;
-
-       p = create_proc_entry("peers", 0, proc_rxrpc);
-       if (!p)
-               goto error_calls;
-       p->proc_fops = &rxrpc_proc_peers_fops;
-       p->owner = THIS_MODULE;
-
-       p = create_proc_entry("transports", 0, proc_rxrpc);
-       if (!p)
-               goto error_conns;
-       p->proc_fops = &rxrpc_proc_transports_fops;
-       p->owner = THIS_MODULE;
-
-       return 0;
-
- error_conns:
-       remove_proc_entry("connections", proc_rxrpc);
- error_calls:
-       remove_proc_entry("calls", proc_rxrpc);
- error_proc:
-       remove_proc_entry("rxrpc", proc_net);
- error:
-       return -ENOMEM;
-} /* end rxrpc_proc_init() */
-
-/*****************************************************************************/
-/*
- * clean up the /proc/net/rxrpc/ directory
- */
-void rxrpc_proc_cleanup(void)
-{
-       remove_proc_entry("transports", proc_rxrpc);
-       remove_proc_entry("peers", proc_rxrpc);
-       remove_proc_entry("connections", proc_rxrpc);
-       remove_proc_entry("calls", proc_rxrpc);
-
-       remove_proc_entry("rxrpc", proc_net);
-
-} /* end rxrpc_proc_cleanup() */
-
-/*****************************************************************************/
-/*
- * open "/proc/net/rxrpc/transports" which provides a summary of extant transports
- */
-static int rxrpc_proc_transports_open(struct inode *inode, struct file *file)
-{
-       struct seq_file *m;
-       int ret;
-
-       ret = seq_open(file, &rxrpc_proc_transports_ops);
-       if (ret < 0)
-               return ret;
-
-       m = file->private_data;
-       m->private = PDE(inode)->data;
-
-       return 0;
-} /* end rxrpc_proc_transports_open() */
-
-/*****************************************************************************/
-/*
- * set up the iterator to start reading from the transports list and return the first item
- */
-static void *rxrpc_proc_transports_start(struct seq_file *m, loff_t *_pos)
-{
-       struct list_head *_p;
-       loff_t pos = *_pos;
-
-       /* lock the list against modification */
-       down_read(&rxrpc_proc_transports_sem);
-
-       /* allow for the header line */
-       if (!pos)
-               return SEQ_START_TOKEN;
-       pos--;
-
-       /* find the n'th element in the list */
-       list_for_each(_p, &rxrpc_proc_transports)
-               if (!pos--)
-                       break;
-
-       return _p != &rxrpc_proc_transports ? _p : NULL;
-} /* end rxrpc_proc_transports_start() */
-
-/*****************************************************************************/
-/*
- * move to next call in transports list
- */
-static void *rxrpc_proc_transports_next(struct seq_file *p, void *v, loff_t *pos)
-{
-       struct list_head *_p;
-
-       (*pos)++;
-
-       _p = v;
-       _p = (v == SEQ_START_TOKEN) ? rxrpc_proc_transports.next : _p->next;
-
-       return _p != &rxrpc_proc_transports ? _p : NULL;
-} /* end rxrpc_proc_transports_next() */
-
-/*****************************************************************************/
-/*
- * clean up after reading from the transports list
- */
-static void rxrpc_proc_transports_stop(struct seq_file *p, void *v)
-{
-       up_read(&rxrpc_proc_transports_sem);
-
-} /* end rxrpc_proc_transports_stop() */
-
-/*****************************************************************************/
-/*
- * display a header line followed by a load of call lines
- */
-static int rxrpc_proc_transports_show(struct seq_file *m, void *v)
-{
-       struct rxrpc_transport *trans =
-               list_entry(v, struct rxrpc_transport, proc_link);
-
-       /* display header on line 1 */
-       if (v == SEQ_START_TOKEN) {
-               seq_puts(m, "LOCAL USE\n");
-               return 0;
-       }
-
-       /* display one transport per line on subsequent lines */
-       seq_printf(m, "%5hu %3d\n",
-                  trans->port,
-                  atomic_read(&trans->usage)
-                  );
-
-       return 0;
-} /* end rxrpc_proc_transports_show() */
-
-/*****************************************************************************/
-/*
- * open "/proc/net/rxrpc/peers" which provides a summary of extant peers
- */
-static int rxrpc_proc_peers_open(struct inode *inode, struct file *file)
-{
-       struct seq_file *m;
-       int ret;
-
-       ret = seq_open(file, &rxrpc_proc_peers_ops);
-       if (ret < 0)
-               return ret;
-
-       m = file->private_data;
-       m->private = PDE(inode)->data;
-
-       return 0;
-} /* end rxrpc_proc_peers_open() */
-
-/*****************************************************************************/
-/*
- * set up the iterator to start reading from the peers list and return the
- * first item
- */
-static void *rxrpc_proc_peers_start(struct seq_file *m, loff_t *_pos)
-{
-       struct list_head *_p;
-       loff_t pos = *_pos;
-
-       /* lock the list against modification */
-       down_read(&rxrpc_peers_sem);
-
-       /* allow for the header line */
-       if (!pos)
-               return SEQ_START_TOKEN;
-       pos--;
-
-       /* find the n'th element in the list */
-       list_for_each(_p, &rxrpc_peers)
-               if (!pos--)
-                       break;
-
-       return _p != &rxrpc_peers ? _p : NULL;
-} /* end rxrpc_proc_peers_start() */
-
-/*****************************************************************************/
-/*
- * move to next conn in peers list
- */
-static void *rxrpc_proc_peers_next(struct seq_file *p, void *v, loff_t *pos)
-{
-       struct list_head *_p;
-
-       (*pos)++;
-
-       _p = v;
-       _p = (v == SEQ_START_TOKEN) ? rxrpc_peers.next : _p->next;
-
-       return _p != &rxrpc_peers ? _p : NULL;
-} /* end rxrpc_proc_peers_next() */
-
-/*****************************************************************************/
-/*
- * clean up after reading from the peers list
- */
-static void rxrpc_proc_peers_stop(struct seq_file *p, void *v)
-{
-       up_read(&rxrpc_peers_sem);
-
-} /* end rxrpc_proc_peers_stop() */
-
-/*****************************************************************************/
-/*
- * display a header line followed by a load of conn lines
- */
-static int rxrpc_proc_peers_show(struct seq_file *m, void *v)
-{
-       struct rxrpc_peer *peer = list_entry(v, struct rxrpc_peer, proc_link);
-       long timeout;
-
-       /* display header on line 1 */
-       if (v == SEQ_START_TOKEN) {
-               seq_puts(m, "LOCAL REMOTE   USAGE CONNS  TIMEOUT"
-                        "   MTU RTT(uS)\n");
-               return 0;
-       }
-
-       /* display one peer per line on subsequent lines */
-       timeout = 0;
-       if (!list_empty(&peer->timeout.link))
-               timeout = (long) peer->timeout.timo_jif -
-                       (long) jiffies;
-
-       seq_printf(m, "%5hu %08x %5d %5d %8ld %5Zu %7lu\n",
-                  peer->trans->port,
-                  ntohl(peer->addr.s_addr),
-                  atomic_read(&peer->usage),
-                  atomic_read(&peer->conn_count),
-                  timeout,
-                  peer->if_mtu,
-                  (long) peer->rtt
-                  );
-
-       return 0;
-} /* end rxrpc_proc_peers_show() */
-
-/*****************************************************************************/
-/*
- * open "/proc/net/rxrpc/connections" which provides a summary of extant
- * connections
- */
-static int rxrpc_proc_conns_open(struct inode *inode, struct file *file)
-{
-       struct seq_file *m;
-       int ret;
-
-       ret = seq_open(file, &rxrpc_proc_conns_ops);
-       if (ret < 0)
-               return ret;
-
-       m = file->private_data;
-       m->private = PDE(inode)->data;
-
-       return 0;
-} /* end rxrpc_proc_conns_open() */
-
-/*****************************************************************************/
-/*
- * set up the iterator to start reading from the conns list and return the
- * first item
- */
-static void *rxrpc_proc_conns_start(struct seq_file *m, loff_t *_pos)
-{
-       struct list_head *_p;
-       loff_t pos = *_pos;
-
-       /* lock the list against modification */
-       down_read(&rxrpc_conns_sem);
-
-       /* allow for the header line */
-       if (!pos)
-               return SEQ_START_TOKEN;
-       pos--;
-
-       /* find the n'th element in the list */
-       list_for_each(_p, &rxrpc_conns)
-               if (!pos--)
-                       break;
-
-       return _p != &rxrpc_conns ? _p : NULL;
-} /* end rxrpc_proc_conns_start() */
-
-/*****************************************************************************/
-/*
- * move to next conn in conns list
- */
-static void *rxrpc_proc_conns_next(struct seq_file *p, void *v, loff_t *pos)
-{
-       struct list_head *_p;
-
-       (*pos)++;
-
-       _p = v;
-       _p = (v == SEQ_START_TOKEN) ? rxrpc_conns.next : _p->next;
-
-       return _p != &rxrpc_conns ? _p : NULL;
-} /* end rxrpc_proc_conns_next() */
-
-/*****************************************************************************/
-/*
- * clean up after reading from the conns list
- */
-static void rxrpc_proc_conns_stop(struct seq_file *p, void *v)
-{
-       up_read(&rxrpc_conns_sem);
-
-} /* end rxrpc_proc_conns_stop() */
-
-/*****************************************************************************/
-/*
- * display a header line followed by a load of conn lines
- */
-static int rxrpc_proc_conns_show(struct seq_file *m, void *v)
-{
-       struct rxrpc_connection *conn;
-       long timeout;
-
-       conn = list_entry(v, struct rxrpc_connection, proc_link);
-
-       /* display header on line 1 */
-       if (v == SEQ_START_TOKEN) {
-               seq_puts(m,
-                        "LOCAL REMOTE   RPORT SRVC CONN     END SERIALNO "
-                        "CALLNO     MTU  TIMEOUT"
-                        "\n");
-               return 0;
-       }
-
-       /* display one conn per line on subsequent lines */
-       timeout = 0;
-       if (!list_empty(&conn->timeout.link))
-               timeout = (long) conn->timeout.timo_jif -
-                       (long) jiffies;
-
-       seq_printf(m,
-                  "%5hu %08x %5hu %04hx %08x %-3.3s %08x %08x %5Zu %8ld\n",
-                  conn->trans->port,
-                  ntohl(conn->addr.sin_addr.s_addr),
-                  ntohs(conn->addr.sin_port),
-                  ntohs(conn->service_id),
-                  ntohl(conn->conn_id),
-                  conn->out_clientflag ? "CLT" : "SRV",
-                  conn->serial_counter,
-                  conn->call_counter,
-                  conn->mtu_size,
-                  timeout
-                  );
-
-       return 0;
-} /* end rxrpc_proc_conns_show() */
-
-/*****************************************************************************/
-/*
- * open "/proc/net/rxrpc/calls" which provides a summary of extant calls
- */
-static int rxrpc_proc_calls_open(struct inode *inode, struct file *file)
-{
-       struct seq_file *m;
-       int ret;
-
-       ret = seq_open(file, &rxrpc_proc_calls_ops);
-       if (ret < 0)
-               return ret;
-
-       m = file->private_data;
-       m->private = PDE(inode)->data;
-
-       return 0;
-} /* end rxrpc_proc_calls_open() */
-
-/*****************************************************************************/
-/*
- * set up the iterator to start reading from the calls list and return the
- * first item
- */
-static void *rxrpc_proc_calls_start(struct seq_file *m, loff_t *_pos)
-{
-       struct list_head *_p;
-       loff_t pos = *_pos;
-
-       /* lock the list against modification */
-       down_read(&rxrpc_calls_sem);
-
-       /* allow for the header line */
-       if (!pos)
-               return SEQ_START_TOKEN;
-       pos--;
-
-       /* find the n'th element in the list */
-       list_for_each(_p, &rxrpc_calls)
-               if (!pos--)
-                       break;
-
-       return _p != &rxrpc_calls ? _p : NULL;
-} /* end rxrpc_proc_calls_start() */
-
-/*****************************************************************************/
-/*
- * move to next call in calls list
- */
-static void *rxrpc_proc_calls_next(struct seq_file *p, void *v, loff_t *pos)
-{
-       struct list_head *_p;
-
-       (*pos)++;
-
-       _p = v;
-       _p = (v == SEQ_START_TOKEN) ? rxrpc_calls.next : _p->next;
-
-       return _p != &rxrpc_calls ? _p : NULL;
-} /* end rxrpc_proc_calls_next() */
-
-/*****************************************************************************/
-/*
- * clean up after reading from the calls list
- */
-static void rxrpc_proc_calls_stop(struct seq_file *p, void *v)
-{
-       up_read(&rxrpc_calls_sem);
-
-} /* end rxrpc_proc_calls_stop() */
-
-/*****************************************************************************/
-/*
- * display a header line followed by a load of call lines
- */
-static int rxrpc_proc_calls_show(struct seq_file *m, void *v)
-{
-       struct rxrpc_call *call = list_entry(v, struct rxrpc_call, call_link);
-
-       /* display header on line 1 */
-       if (v == SEQ_START_TOKEN) {
-               seq_puts(m,
-                        "LOCAL REMOT SRVC CONN     CALL     DIR USE "
-                        " L STATE   OPCODE ABORT    ERRNO\n"
-                        );
-               return 0;
-       }
-
-       /* display one call per line on subsequent lines */
-       seq_printf(m,
-                  "%5hu %5hu %04hx %08x %08x %s %3u%c"
-                  " %c %-7.7s %6d %08x %5d\n",
-                  call->conn->trans->port,
-                  ntohs(call->conn->addr.sin_port),
-                  ntohs(call->conn->service_id),
-                  ntohl(call->conn->conn_id),
-                  ntohl(call->call_id),
-                  call->conn->service ? "SVC" : "CLT",
-                  atomic_read(&call->usage),
-                  waitqueue_active(&call->waitq) ? 'w' : ' ',
-                  call->app_last_rcv ? 'Y' : '-',
-                  (call->app_call_state!=RXRPC_CSTATE_ERROR ?
-                   rxrpc_call_states7[call->app_call_state] :
-                   rxrpc_call_error_states7[call->app_err_state]),
-                  call->app_opcode,
-                  call->app_abort_code,
-                  call->app_errno
-                  );
-
-       return 0;
-} /* end rxrpc_proc_calls_show() */
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
new file mode 100644 (file)
index 0000000..1eaf529
--- /dev/null
@@ -0,0 +1,1153 @@
+/* Kerberos-based RxRPC security
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/net.h>
+#include <linux/skbuff.h>
+#include <linux/udp.h>
+#include <linux/crypto.h>
+#include <linux/scatterlist.h>
+#include <linux/ctype.h>
+#include <net/sock.h>
+#include <net/af_rxrpc.h>
+#include "ar-internal.h"
+
+#define RXKAD_VERSION                  2
+#define MAXKRB5TICKETLEN               1024
+#define RXKAD_TKT_TYPE_KERBEROS_V5     256
+#define ANAME_SZ                       40      /* size of authentication name */
+#define INST_SZ                                40      /* size of principal's instance */
+#define REALM_SZ                       40      /* size of principal's auth domain */
+#define SNAME_SZ                       40      /* size of service name */
+
+unsigned rxrpc_debug;
+module_param_named(debug, rxrpc_debug, uint, S_IWUSR | S_IRUGO);
+MODULE_PARM_DESC(rxrpc_debug, "rxkad debugging mask");
+
+struct rxkad_level1_hdr {
+       __be32  data_size;      /* true data size (excluding padding) */
+};
+
+struct rxkad_level2_hdr {
+       __be32  data_size;      /* true data size (excluding padding) */
+       __be32  checksum;       /* decrypted data checksum */
+};
+
+MODULE_DESCRIPTION("RxRPC network protocol type-2 security (Kerberos)");
+MODULE_AUTHOR("Red Hat, Inc.");
+MODULE_LICENSE("GPL");
+
+/*
+ * this holds a pinned cipher so that keventd doesn't get called by the cipher
+ * alloc routine, but since we have it to hand, we use it to decrypt RESPONSE
+ * packets
+ */
+static struct crypto_blkcipher *rxkad_ci;
+static DEFINE_MUTEX(rxkad_ci_mutex);
+
+/*
+ * initialise connection security
+ */
+static int rxkad_init_connection_security(struct rxrpc_connection *conn)
+{
+       struct rxrpc_key_payload *payload;
+       struct crypto_blkcipher *ci;
+       int ret;
+
+       _enter("{%d},{%x}", conn->debug_id, key_serial(conn->key));
+
+       payload = conn->key->payload.data;
+       conn->security_ix = payload->k.security_index;
+
+       ci = crypto_alloc_blkcipher("pcbc(fcrypt)", 0, CRYPTO_ALG_ASYNC);
+       if (IS_ERR(ci)) {
+               _debug("no cipher");
+               ret = PTR_ERR(ci);
+               goto error;
+       }
+
+       if (crypto_blkcipher_setkey(ci, payload->k.session_key,
+                                   sizeof(payload->k.session_key)) < 0)
+               BUG();
+
+       switch (conn->security_level) {
+       case RXRPC_SECURITY_PLAIN:
+               break;
+       case RXRPC_SECURITY_AUTH:
+               conn->size_align = 8;
+               conn->security_size = sizeof(struct rxkad_level1_hdr);
+               conn->header_size += sizeof(struct rxkad_level1_hdr);
+               break;
+       case RXRPC_SECURITY_ENCRYPT:
+               conn->size_align = 8;
+               conn->security_size = sizeof(struct rxkad_level2_hdr);
+               conn->header_size += sizeof(struct rxkad_level2_hdr);
+               break;
+       default:
+               ret = -EKEYREJECTED;
+               goto error;
+       }
+
+       conn->cipher = ci;
+       ret = 0;
+error:
+       _leave(" = %d", ret);
+       return ret;
+}
+
+/*
+ * prime the encryption state with the invariant parts of a connection's
+ * description
+ */
+static void rxkad_prime_packet_security(struct rxrpc_connection *conn)
+{
+       struct rxrpc_key_payload *payload;
+       struct blkcipher_desc desc;
+       struct scatterlist sg[2];
+       struct rxrpc_crypt iv;
+       struct {
+               __be32 x[4];
+       } tmpbuf __attribute__((aligned(16))); /* must all be in same page */
+
+       _enter("");
+
+       if (!conn->key)
+               return;
+
+       payload = conn->key->payload.data;
+       memcpy(&iv, payload->k.session_key, sizeof(iv));
+
+       desc.tfm = conn->cipher;
+       desc.info = iv.x;
+       desc.flags = 0;
+
+       tmpbuf.x[0] = conn->epoch;
+       tmpbuf.x[1] = conn->cid;
+       tmpbuf.x[2] = 0;
+       tmpbuf.x[3] = htonl(conn->security_ix);
+
+       memset(sg, 0, sizeof(sg));
+       sg_set_buf(&sg[0], &tmpbuf, sizeof(tmpbuf));
+       sg_set_buf(&sg[1], &tmpbuf, sizeof(tmpbuf));
+       crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(tmpbuf));
+
+       memcpy(&conn->csum_iv, &tmpbuf.x[2], sizeof(conn->csum_iv));
+       ASSERTCMP(conn->csum_iv.n[0], ==, tmpbuf.x[2]);
+
+       _leave("");
+}
+
+/*
+ * partially encrypt a packet (level 1 security)
+ */
+static int rxkad_secure_packet_auth(const struct rxrpc_call *call,
+                                   struct sk_buff *skb,
+                                   u32 data_size,
+                                   void *sechdr)
+{
+       struct rxrpc_skb_priv *sp;
+       struct blkcipher_desc desc;
+       struct rxrpc_crypt iv;
+       struct scatterlist sg[2];
+       struct {
+               struct rxkad_level1_hdr hdr;
+               __be32  first;  /* first four bytes of data and padding */
+       } tmpbuf __attribute__((aligned(8))); /* must all be in same page */
+       u16 check;
+
+       sp = rxrpc_skb(skb);
+
+       _enter("");
+
+       check = ntohl(sp->hdr.seq ^ sp->hdr.callNumber);
+       data_size |= (u32) check << 16;
+
+       tmpbuf.hdr.data_size = htonl(data_size);
+       memcpy(&tmpbuf.first, sechdr + 4, sizeof(tmpbuf.first));
+
+       /* start the encryption afresh */
+       memset(&iv, 0, sizeof(iv));
+       desc.tfm = call->conn->cipher;
+       desc.info = iv.x;
+       desc.flags = 0;
+
+       memset(sg, 0, sizeof(sg));
+       sg_set_buf(&sg[0], &tmpbuf, sizeof(tmpbuf));
+       sg_set_buf(&sg[1], &tmpbuf, sizeof(tmpbuf));
+       crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(tmpbuf));
+
+       memcpy(sechdr, &tmpbuf, sizeof(tmpbuf));
+
+       _leave(" = 0");
+       return 0;
+}
+
+/*
+ * wholly encrypt a packet (level 2 security)
+ */
+static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call,
+                                       struct sk_buff *skb,
+                                       u32 data_size,
+                                       void *sechdr)
+{
+       const struct rxrpc_key_payload *payload;
+       struct rxkad_level2_hdr rxkhdr
+               __attribute__((aligned(8))); /* must be all on one page */
+       struct rxrpc_skb_priv *sp;
+       struct blkcipher_desc desc;
+       struct rxrpc_crypt iv;
+       struct scatterlist sg[16];
+       struct sk_buff *trailer;
+       unsigned len;
+       u16 check;
+       int nsg;
+
+       sp = rxrpc_skb(skb);
+
+       _enter("");
+
+       check = ntohl(sp->hdr.seq ^ sp->hdr.callNumber);
+
+       rxkhdr.data_size = htonl(data_size | (u32) check << 16);
+       rxkhdr.checksum = 0;
+
+       /* encrypt from the session key */
+       payload = call->conn->key->payload.data;
+       memcpy(&iv, payload->k.session_key, sizeof(iv));
+       desc.tfm = call->conn->cipher;
+       desc.info = iv.x;
+       desc.flags = 0;
+
+       memset(sg, 0, sizeof(sg[0]) * 2);
+       sg_set_buf(&sg[0], sechdr, sizeof(rxkhdr));
+       sg_set_buf(&sg[1], &rxkhdr, sizeof(rxkhdr));
+       crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(rxkhdr));
+
+       /* we want to encrypt the skbuff in-place */
+       nsg = skb_cow_data(skb, 0, &trailer);
+       if (nsg < 0 || nsg > 16)
+               return -ENOMEM;
+
+       len = data_size + call->conn->size_align - 1;
+       len &= ~(call->conn->size_align - 1);
+
+       skb_to_sgvec(skb, sg, 0, len);
+       crypto_blkcipher_encrypt_iv(&desc, sg, sg, len);
+
+       _leave(" = 0");
+       return 0;
+}
+
+/*
+ * checksum an RxRPC packet header
+ */
+static int rxkad_secure_packet(const struct rxrpc_call *call,
+                               struct sk_buff *skb,
+                               size_t data_size,
+                               void *sechdr)
+{
+       struct rxrpc_skb_priv *sp;
+       struct blkcipher_desc desc;
+       struct rxrpc_crypt iv;
+       struct scatterlist sg[2];
+       struct {
+               __be32 x[2];
+       } tmpbuf __attribute__((aligned(8))); /* must all be in same page */
+       __be32 x;
+       int ret;
+
+       sp = rxrpc_skb(skb);
+
+       _enter("{%d{%x}},{#%u},%zu,",
+              call->debug_id, key_serial(call->conn->key), ntohl(sp->hdr.seq),
+              data_size);
+
+       if (!call->conn->cipher)
+               return 0;
+
+       ret = key_validate(call->conn->key);
+       if (ret < 0)
+               return ret;
+
+       /* continue encrypting from where we left off */
+       memcpy(&iv, call->conn->csum_iv.x, sizeof(iv));
+       desc.tfm = call->conn->cipher;
+       desc.info = iv.x;
+       desc.flags = 0;
+
+       /* calculate the security checksum */
+       x = htonl(call->channel << (32 - RXRPC_CIDSHIFT));
+       x |= sp->hdr.seq & __constant_cpu_to_be32(0x3fffffff);
+       tmpbuf.x[0] = sp->hdr.callNumber;
+       tmpbuf.x[1] = x;
+
+       memset(&sg, 0, sizeof(sg));
+       sg_set_buf(&sg[0], &tmpbuf, sizeof(tmpbuf));
+       sg_set_buf(&sg[1], &tmpbuf, sizeof(tmpbuf));
+       crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(tmpbuf));
+
+       x = ntohl(tmpbuf.x[1]);
+       x = (x >> 16) & 0xffff;
+       if (x == 0)
+               x = 1; /* zero checksums are not permitted */
+       sp->hdr.cksum = htons(x);
+
+       switch (call->conn->security_level) {
+       case RXRPC_SECURITY_PLAIN:
+               ret = 0;
+               break;
+       case RXRPC_SECURITY_AUTH:
+               ret = rxkad_secure_packet_auth(call, skb, data_size, sechdr);
+               break;
+       case RXRPC_SECURITY_ENCRYPT:
+               ret = rxkad_secure_packet_encrypt(call, skb, data_size,
+                                                 sechdr);
+               break;
+       default:
+               ret = -EPERM;
+               break;
+       }
+
+       _leave(" = %d [set %hx]", ret, x);
+       return ret;
+}
+
+/*
+ * decrypt partial encryption on a packet (level 1 security)
+ */
+static int rxkad_verify_packet_auth(const struct rxrpc_call *call,
+                                   struct sk_buff *skb,
+                                   u32 *_abort_code)
+{
+       struct rxkad_level1_hdr sechdr;
+       struct rxrpc_skb_priv *sp;
+       struct blkcipher_desc desc;
+       struct rxrpc_crypt iv;
+       struct scatterlist sg[2];
+       struct sk_buff *trailer;
+       u32 data_size, buf;
+       u16 check;
+
+       _enter("");
+
+       sp = rxrpc_skb(skb);
+
+       /* we want to decrypt the skbuff in-place */
+       if (skb_cow_data(skb, 0, &trailer) < 0)
+               goto nomem;
+
+       skb_to_sgvec(skb, sg, 0, 8);
+
+       /* start the decryption afresh */
+       memset(&iv, 0, sizeof(iv));
+       desc.tfm = call->conn->cipher;
+       desc.info = iv.x;
+       desc.flags = 0;
+
+       crypto_blkcipher_decrypt_iv(&desc, sg, sg, 8);
+
+       /* remove the decrypted packet length */
+       if (skb_copy_bits(skb, 0, &sechdr, sizeof(sechdr)) < 0)
+               goto datalen_error;
+       if (!skb_pull(skb, sizeof(sechdr)))
+               BUG();
+
+       buf = ntohl(sechdr.data_size);
+       data_size = buf & 0xffff;
+
+       check = buf >> 16;
+       check ^= ntohl(sp->hdr.seq ^ sp->hdr.callNumber);
+       check &= 0xffff;
+       if (check != 0) {
+               *_abort_code = RXKADSEALEDINCON;
+               goto protocol_error;
+       }
+
+       /* shorten the packet to remove the padding */
+       if (data_size > skb->len)
+               goto datalen_error;
+       else if (data_size < skb->len)
+               skb->len = data_size;
+
+       _leave(" = 0 [dlen=%x]", data_size);
+       return 0;
+
+datalen_error:
+       *_abort_code = RXKADDATALEN;
+protocol_error:
+       _leave(" = -EPROTO");
+       return -EPROTO;
+
+nomem:
+       _leave(" = -ENOMEM");
+       return -ENOMEM;
+}
+
+/*
+ * wholly decrypt a packet (level 2 security)
+ */
+static int rxkad_verify_packet_encrypt(const struct rxrpc_call *call,
+                                      struct sk_buff *skb,
+                                      u32 *_abort_code)
+{
+       const struct rxrpc_key_payload *payload;
+       struct rxkad_level2_hdr sechdr;
+       struct rxrpc_skb_priv *sp;
+       struct blkcipher_desc desc;
+       struct rxrpc_crypt iv;
+       struct scatterlist _sg[4], *sg;
+       struct sk_buff *trailer;
+       u32 data_size, buf;
+       u16 check;
+       int nsg;
+
+       _enter(",{%d}", skb->len);
+
+       sp = rxrpc_skb(skb);
+
+       /* we want to decrypt the skbuff in-place */
+       nsg = skb_cow_data(skb, 0, &trailer);
+       if (nsg < 0)
+               goto nomem;
+
+       sg = _sg;
+       if (unlikely(nsg > 4)) {
+               sg = kmalloc(sizeof(*sg) * nsg, GFP_NOIO);
+               if (!sg)
+                       goto nomem;
+       }
+
+       skb_to_sgvec(skb, sg, 0, skb->len);
+
+       /* decrypt from the session key */
+       payload = call->conn->key->payload.data;
+       memcpy(&iv, payload->k.session_key, sizeof(iv));
+       desc.tfm = call->conn->cipher;
+       desc.info = iv.x;
+       desc.flags = 0;
+
+       crypto_blkcipher_decrypt_iv(&desc, sg, sg, skb->len);
+       if (sg != _sg)
+               kfree(sg);
+
+       /* remove the decrypted packet length */
+       if (skb_copy_bits(skb, 0, &sechdr, sizeof(sechdr)) < 0)
+               goto datalen_error;
+       if (!skb_pull(skb, sizeof(sechdr)))
+               BUG();
+
+       buf = ntohl(sechdr.data_size);
+       data_size = buf & 0xffff;
+
+       check = buf >> 16;
+       check ^= ntohl(sp->hdr.seq ^ sp->hdr.callNumber);
+       check &= 0xffff;
+       if (check != 0) {
+               *_abort_code = RXKADSEALEDINCON;
+               goto protocol_error;
+       }
+
+       /* shorten the packet to remove the padding */
+       if (data_size > skb->len)
+               goto datalen_error;
+       else if (data_size < skb->len)
+               skb->len = data_size;
+
+       _leave(" = 0 [dlen=%x]", data_size);
+       return 0;
+
+datalen_error:
+       *_abort_code = RXKADDATALEN;
+protocol_error:
+       _leave(" = -EPROTO");
+       return -EPROTO;
+
+nomem:
+       _leave(" = -ENOMEM");
+       return -ENOMEM;
+}
+
+/*
+ * verify the security on a received packet
+ */
+static int rxkad_verify_packet(const struct rxrpc_call *call,
+                              struct sk_buff *skb,
+                              u32 *_abort_code)
+{
+       struct blkcipher_desc desc;
+       struct rxrpc_skb_priv *sp;
+       struct rxrpc_crypt iv;
+       struct scatterlist sg[2];
+       struct {
+               __be32 x[2];
+       } tmpbuf __attribute__((aligned(8))); /* must all be in same page */
+       __be32 x;
+       __be16 cksum;
+       int ret;
+
+       sp = rxrpc_skb(skb);
+
+       _enter("{%d{%x}},{#%u}",
+              call->debug_id, key_serial(call->conn->key),
+              ntohl(sp->hdr.seq));
+
+       if (!call->conn->cipher)
+               return 0;
+
+       if (sp->hdr.securityIndex != 2) {
+               *_abort_code = RXKADINCONSISTENCY;
+               _leave(" = -EPROTO [not rxkad]");
+               return -EPROTO;
+       }
+
+       /* continue encrypting from where we left off */
+       memcpy(&iv, call->conn->csum_iv.x, sizeof(iv));
+       desc.tfm = call->conn->cipher;
+       desc.info = iv.x;
+       desc.flags = 0;
+
+       /* validate the security checksum */
+       x = htonl(call->channel << (32 - RXRPC_CIDSHIFT));
+       x |= sp->hdr.seq & __constant_cpu_to_be32(0x3fffffff);
+       tmpbuf.x[0] = call->call_id;
+       tmpbuf.x[1] = x;
+
+       memset(&sg, 0, sizeof(sg));
+       sg_set_buf(&sg[0], &tmpbuf, sizeof(tmpbuf));
+       sg_set_buf(&sg[1], &tmpbuf, sizeof(tmpbuf));
+       crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(tmpbuf));
+
+       x = ntohl(tmpbuf.x[1]);
+       x = (x >> 16) & 0xffff;
+       if (x == 0)
+               x = 1; /* zero checksums are not permitted */
+
+       cksum = htons(x);
+       if (sp->hdr.cksum != cksum) {
+               *_abort_code = RXKADSEALEDINCON;
+               _leave(" = -EPROTO [csum failed]");
+               return -EPROTO;
+       }
+
+       switch (call->conn->security_level) {
+       case RXRPC_SECURITY_PLAIN:
+               ret = 0;
+               break;
+       case RXRPC_SECURITY_AUTH:
+               ret = rxkad_verify_packet_auth(call, skb, _abort_code);
+               break;
+       case RXRPC_SECURITY_ENCRYPT:
+               ret = rxkad_verify_packet_encrypt(call, skb, _abort_code);
+               break;
+       default:
+               ret = -ENOANO;
+               break;
+       }
+
+       _leave(" = %d", ret);
+       return ret;
+}
+
+/*
+ * issue a challenge
+ */
+static int rxkad_issue_challenge(struct rxrpc_connection *conn)
+{
+       struct rxkad_challenge challenge;
+       struct rxrpc_header hdr;
+       struct msghdr msg;
+       struct kvec iov[2];
+       size_t len;
+       int ret;
+
+       _enter("{%d,%x}", conn->debug_id, key_serial(conn->key));
+
+       ret = key_validate(conn->key);
+       if (ret < 0)
+               return ret;
+
+       get_random_bytes(&conn->security_nonce, sizeof(conn->security_nonce));
+
+       challenge.version       = htonl(2);
+       challenge.nonce         = htonl(conn->security_nonce);
+       challenge.min_level     = htonl(0);
+       challenge.__padding     = 0;
+
+       msg.msg_name    = &conn->trans->peer->srx.transport.sin;
+       msg.msg_namelen = sizeof(conn->trans->peer->srx.transport.sin);
+       msg.msg_control = NULL;
+       msg.msg_controllen = 0;
+       msg.msg_flags   = 0;
+
+       hdr.epoch       = conn->epoch;
+       hdr.cid         = conn->cid;
+       hdr.callNumber  = 0;
+       hdr.seq         = 0;
+       hdr.type        = RXRPC_PACKET_TYPE_CHALLENGE;
+       hdr.flags       = conn->out_clientflag;
+       hdr.userStatus  = 0;
+       hdr.securityIndex = conn->security_ix;
+       hdr._rsvd       = 0;
+       hdr.serviceId   = conn->service_id;
+
+       iov[0].iov_base = &hdr;
+       iov[0].iov_len  = sizeof(hdr);
+       iov[1].iov_base = &challenge;
+       iov[1].iov_len  = sizeof(challenge);
+
+       len = iov[0].iov_len + iov[1].iov_len;
+
+       hdr.serial = htonl(atomic_inc_return(&conn->serial));
+       _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial));
+
+       ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
+       if (ret < 0) {
+               _debug("sendmsg failed: %d", ret);
+               return -EAGAIN;
+       }
+
+       _leave(" = 0");
+       return 0;
+}
+
+/*
+ * send a Kerberos security response
+ */
+static int rxkad_send_response(struct rxrpc_connection *conn,
+                              struct rxrpc_header *hdr,
+                              struct rxkad_response *resp,
+                              const struct rxkad_key *s2)
+{
+       struct msghdr msg;
+       struct kvec iov[3];
+       size_t len;
+       int ret;
+
+       _enter("");
+
+       msg.msg_name    = &conn->trans->peer->srx.transport.sin;
+       msg.msg_namelen = sizeof(conn->trans->peer->srx.transport.sin);
+       msg.msg_control = NULL;
+       msg.msg_controllen = 0;
+       msg.msg_flags   = 0;
+
+       hdr->epoch      = conn->epoch;
+       hdr->seq        = 0;
+       hdr->type       = RXRPC_PACKET_TYPE_RESPONSE;
+       hdr->flags      = conn->out_clientflag;
+       hdr->userStatus = 0;
+       hdr->_rsvd      = 0;
+
+       iov[0].iov_base = hdr;
+       iov[0].iov_len  = sizeof(*hdr);
+       iov[1].iov_base = resp;
+       iov[1].iov_len  = sizeof(*resp);
+       iov[2].iov_base = (void *) s2->ticket;
+       iov[2].iov_len  = s2->ticket_len;
+
+       len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len;
+
+       hdr->serial = htonl(atomic_inc_return(&conn->serial));
+       _proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
+
+       ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
+       if (ret < 0) {
+               _debug("sendmsg failed: %d", ret);
+               return -EAGAIN;
+       }
+
+       _leave(" = 0");
+       return 0;
+}
+
+/*
+ * calculate the response checksum
+ */
+static void rxkad_calc_response_checksum(struct rxkad_response *response)
+{
+       u32 csum = 1000003;
+       int loop;
+       u8 *p = (u8 *) response;
+
+       for (loop = sizeof(*response); loop > 0; loop--)
+               csum = csum * 0x10204081 + *p++;
+
+       response->encrypted.checksum = htonl(csum);
+}
+
+/*
+ * load a scatterlist with a potentially split-page buffer
+ */
+static void rxkad_sg_set_buf2(struct scatterlist sg[2],
+                             void *buf, size_t buflen)
+{
+
+       memset(sg, 0, sizeof(sg));
+
+       sg_set_buf(&sg[0], buf, buflen);
+       if (sg[0].offset + buflen > PAGE_SIZE) {
+               /* the buffer was split over two pages */
+               sg[0].length = PAGE_SIZE - sg[0].offset;
+               sg_set_buf(&sg[1], buf + sg[0].length, buflen - sg[0].length);
+       }
+
+       ASSERTCMP(sg[0].length + sg[1].length, ==, buflen);
+}
+
+/*
+ * encrypt the response packet
+ */
+static void rxkad_encrypt_response(struct rxrpc_connection *conn,
+                                  struct rxkad_response *resp,
+                                  const struct rxkad_key *s2)
+{
+       struct blkcipher_desc desc;
+       struct rxrpc_crypt iv;
+       struct scatterlist ssg[2], dsg[2];
+
+       /* continue encrypting from where we left off */
+       memcpy(&iv, s2->session_key, sizeof(iv));
+       desc.tfm = conn->cipher;
+       desc.info = iv.x;
+       desc.flags = 0;
+
+       rxkad_sg_set_buf2(ssg, &resp->encrypted, sizeof(resp->encrypted));
+       memcpy(dsg, ssg, sizeof(dsg));
+       crypto_blkcipher_encrypt_iv(&desc, dsg, ssg, sizeof(resp->encrypted));
+}
+
+/*
+ * respond to a challenge packet
+ */
+static int rxkad_respond_to_challenge(struct rxrpc_connection *conn,
+                                     struct sk_buff *skb,
+                                     u32 *_abort_code)
+{
+       const struct rxrpc_key_payload *payload;
+       struct rxkad_challenge challenge;
+       struct rxkad_response resp
+               __attribute__((aligned(8))); /* must be aligned for crypto */
+       struct rxrpc_skb_priv *sp;
+       u32 version, nonce, min_level, abort_code;
+       int ret;
+
+       _enter("{%d,%x}", conn->debug_id, key_serial(conn->key));
+
+       if (!conn->key) {
+               _leave(" = -EPROTO [no key]");
+               return -EPROTO;
+       }
+
+       ret = key_validate(conn->key);
+       if (ret < 0) {
+               *_abort_code = RXKADEXPIRED;
+               return ret;
+       }
+
+       abort_code = RXKADPACKETSHORT;
+       sp = rxrpc_skb(skb);
+       if (skb_copy_bits(skb, 0, &challenge, sizeof(challenge)) < 0)
+               goto protocol_error;
+
+       version = ntohl(challenge.version);
+       nonce = ntohl(challenge.nonce);
+       min_level = ntohl(challenge.min_level);
+
+       _proto("Rx CHALLENGE %%%u { v=%u n=%u ml=%u }",
+              ntohl(sp->hdr.serial), version, nonce, min_level);
+
+       abort_code = RXKADINCONSISTENCY;
+       if (version != RXKAD_VERSION)
+               goto protocol_error;
+
+       abort_code = RXKADLEVELFAIL;
+       if (conn->security_level < min_level)
+               goto protocol_error;
+
+       payload = conn->key->payload.data;
+
+       /* build the response packet */
+       memset(&resp, 0, sizeof(resp));
+
+       resp.version = RXKAD_VERSION;
+       resp.encrypted.epoch = conn->epoch;
+       resp.encrypted.cid = conn->cid;
+       resp.encrypted.securityIndex = htonl(conn->security_ix);
+       resp.encrypted.call_id[0] =
+               (conn->channels[0] ? conn->channels[0]->call_id : 0);
+       resp.encrypted.call_id[1] =
+               (conn->channels[1] ? conn->channels[1]->call_id : 0);
+       resp.encrypted.call_id[2] =
+               (conn->channels[2] ? conn->channels[2]->call_id : 0);
+       resp.encrypted.call_id[3] =
+               (conn->channels[3] ? conn->channels[3]->call_id : 0);
+       resp.encrypted.inc_nonce = htonl(nonce + 1);
+       resp.encrypted.level = htonl(conn->security_level);
+       resp.kvno = htonl(payload->k.kvno);
+       resp.ticket_len = htonl(payload->k.ticket_len);
+
+       /* calculate the response checksum and then do the encryption */
+       rxkad_calc_response_checksum(&resp);
+       rxkad_encrypt_response(conn, &resp, &payload->k);
+       return rxkad_send_response(conn, &sp->hdr, &resp, &payload->k);
+
+protocol_error:
+       *_abort_code = abort_code;
+       _leave(" = -EPROTO [%d]", abort_code);
+       return -EPROTO;
+}
+
+/*
+ * decrypt the kerberos IV ticket in the response
+ */
+static int rxkad_decrypt_ticket(struct rxrpc_connection *conn,
+                               void *ticket, size_t ticket_len,
+                               struct rxrpc_crypt *_session_key,
+                               time_t *_expiry,
+                               u32 *_abort_code)
+{
+       struct blkcipher_desc desc;
+       struct rxrpc_crypt iv, key;
+       struct scatterlist ssg[1], dsg[1];
+       struct in_addr addr;
+       unsigned life;
+       time_t issue, now;
+       bool little_endian;
+       int ret;
+       u8 *p, *q, *name, *end;
+
+       _enter("{%d},{%x}", conn->debug_id, key_serial(conn->server_key));
+
+       *_expiry = 0;
+
+       ret = key_validate(conn->server_key);
+       if (ret < 0) {
+               switch (ret) {
+               case -EKEYEXPIRED:
+                       *_abort_code = RXKADEXPIRED;
+                       goto error;
+               default:
+                       *_abort_code = RXKADNOAUTH;
+                       goto error;
+               }
+       }
+
+       ASSERT(conn->server_key->payload.data != NULL);
+       ASSERTCMP((unsigned long) ticket & 7UL, ==, 0);
+
+       memcpy(&iv, &conn->server_key->type_data, sizeof(iv));
+
+       desc.tfm = conn->server_key->payload.data;
+       desc.info = iv.x;
+       desc.flags = 0;
+
+       sg_init_one(&ssg[0], ticket, ticket_len);
+       memcpy(dsg, ssg, sizeof(dsg));
+       crypto_blkcipher_decrypt_iv(&desc, dsg, ssg, ticket_len);
+
+       p = ticket;
+       end = p + ticket_len;
+
+#define Z(size)                                                \
+       ({                                              \
+               u8 *__str = p;                          \
+               q = memchr(p, 0, end - p);              \
+               if (!q || q - p > (size))               \
+                       goto bad_ticket;                \
+               for (; p < q; p++)                      \
+                       if (!isprint(*p))               \
+                               goto bad_ticket;        \
+               p++;                                    \
+               __str;                                  \
+       })
+
+       /* extract the ticket flags */
+       _debug("KIV FLAGS: %x", *p);
+       little_endian = *p & 1;
+       p++;
+
+       /* extract the authentication name */
+       name = Z(ANAME_SZ);
+       _debug("KIV ANAME: %s", name);
+
+       /* extract the principal's instance */
+       name = Z(INST_SZ);
+       _debug("KIV INST : %s", name);
+
+       /* extract the principal's authentication domain */
+       name = Z(REALM_SZ);
+       _debug("KIV REALM: %s", name);
+
+       if (end - p < 4 + 8 + 4 + 2)
+               goto bad_ticket;
+
+       /* get the IPv4 address of the entity that requested the ticket */
+       memcpy(&addr, p, sizeof(addr));
+       p += 4;
+       _debug("KIV ADDR : "NIPQUAD_FMT, NIPQUAD(addr));
+
+       /* get the session key from the ticket */
+       memcpy(&key, p, sizeof(key));
+       p += 8;
+       _debug("KIV KEY  : %08x %08x", ntohl(key.n[0]), ntohl(key.n[1]));
+       memcpy(_session_key, &key, sizeof(key));
+
+       /* get the ticket's lifetime */
+       life = *p++ * 5 * 60;
+       _debug("KIV LIFE : %u", life);
+
+       /* get the issue time of the ticket */
+       if (little_endian) {
+               __le32 stamp;
+               memcpy(&stamp, p, 4);
+               issue = le32_to_cpu(stamp);
+       } else {
+               __be32 stamp;
+               memcpy(&stamp, p, 4);
+               issue = be32_to_cpu(stamp);
+       }
+       p += 4;
+       now = xtime.tv_sec;
+       _debug("KIV ISSUE: %lx [%lx]", issue, now);
+
+       /* check the ticket is in date */
+       if (issue > now) {
+               *_abort_code = RXKADNOAUTH;
+               ret = -EKEYREJECTED;
+               goto error;
+       }
+
+       if (issue < now - life) {
+               *_abort_code = RXKADEXPIRED;
+               ret = -EKEYEXPIRED;
+               goto error;
+       }
+
+       *_expiry = issue + life;
+
+       /* get the service name */
+       name = Z(SNAME_SZ);
+       _debug("KIV SNAME: %s", name);
+
+       /* get the service instance name */
+       name = Z(INST_SZ);
+       _debug("KIV SINST: %s", name);
+
+       ret = 0;
+error:
+       _leave(" = %d", ret);
+       return ret;
+
+bad_ticket:
+       *_abort_code = RXKADBADTICKET;
+       ret = -EBADMSG;
+       goto error;
+}
+
+/*
+ * decrypt the response packet
+ */
+static void rxkad_decrypt_response(struct rxrpc_connection *conn,
+                                  struct rxkad_response *resp,
+                                  const struct rxrpc_crypt *session_key)
+{
+       struct blkcipher_desc desc;
+       struct scatterlist ssg[2], dsg[2];
+       struct rxrpc_crypt iv;
+
+       _enter(",,%08x%08x",
+              ntohl(session_key->n[0]), ntohl(session_key->n[1]));
+
+       ASSERT(rxkad_ci != NULL);
+
+       mutex_lock(&rxkad_ci_mutex);
+       if (crypto_blkcipher_setkey(rxkad_ci, session_key->x,
+                                   sizeof(*session_key)) < 0)
+               BUG();
+
+       memcpy(&iv, session_key, sizeof(iv));
+       desc.tfm = rxkad_ci;
+       desc.info = iv.x;
+       desc.flags = 0;
+
+       rxkad_sg_set_buf2(ssg, &resp->encrypted, sizeof(resp->encrypted));
+       memcpy(dsg, ssg, sizeof(dsg));
+       crypto_blkcipher_decrypt_iv(&desc, dsg, ssg, sizeof(resp->encrypted));
+       mutex_unlock(&rxkad_ci_mutex);
+
+       _leave("");
+}
+
+/*
+ * verify a response
+ */
+static int rxkad_verify_response(struct rxrpc_connection *conn,
+                                struct sk_buff *skb,
+                                u32 *_abort_code)
+{
+       struct rxkad_response response
+               __attribute__((aligned(8))); /* must be aligned for crypto */
+       struct rxrpc_skb_priv *sp;
+       struct rxrpc_crypt session_key;
+       time_t expiry;
+       void *ticket;
+       u32 abort_code, version, kvno, ticket_len, csum, level;
+       int ret;
+
+       _enter("{%d,%x}", conn->debug_id, key_serial(conn->server_key));
+
+       abort_code = RXKADPACKETSHORT;
+       if (skb_copy_bits(skb, 0, &response, sizeof(response)) < 0)
+               goto protocol_error;
+       if (!pskb_pull(skb, sizeof(response)))
+               BUG();
+
+       version = ntohl(response.version);
+       ticket_len = ntohl(response.ticket_len);
+       kvno = ntohl(response.kvno);
+       sp = rxrpc_skb(skb);
+       _proto("Rx RESPONSE %%%u { v=%u kv=%u tl=%u }",
+              ntohl(sp->hdr.serial), version, kvno, ticket_len);
+
+       abort_code = RXKADINCONSISTENCY;
+       if (version != RXKAD_VERSION)
+
+       abort_code = RXKADTICKETLEN;
+       if (ticket_len < 4 || ticket_len > MAXKRB5TICKETLEN)
+               goto protocol_error;
+
+       abort_code = RXKADUNKNOWNKEY;
+       if (kvno >= RXKAD_TKT_TYPE_KERBEROS_V5)
+               goto protocol_error;
+
+       /* extract the kerberos ticket and decrypt and decode it */
+       ticket = kmalloc(ticket_len, GFP_NOFS);
+       if (!ticket)
+               return -ENOMEM;
+
+       abort_code = RXKADPACKETSHORT;
+       if (skb_copy_bits(skb, 0, ticket, ticket_len) < 0)
+               goto protocol_error_free;
+
+       ret = rxkad_decrypt_ticket(conn, ticket, ticket_len, &session_key,
+                                  &expiry, &abort_code);
+       if (ret < 0) {
+               *_abort_code = abort_code;
+               kfree(ticket);
+               return ret;
+       }
+
+       /* use the session key from inside the ticket to decrypt the
+        * response */
+       rxkad_decrypt_response(conn, &response, &session_key);
+
+       abort_code = RXKADSEALEDINCON;
+       if (response.encrypted.epoch != conn->epoch)
+               goto protocol_error_free;
+       if (response.encrypted.cid != conn->cid)
+               goto protocol_error_free;
+       if (ntohl(response.encrypted.securityIndex) != conn->security_ix)
+               goto protocol_error_free;
+       csum = response.encrypted.checksum;
+       response.encrypted.checksum = 0;
+       rxkad_calc_response_checksum(&response);
+       if (response.encrypted.checksum != csum)
+               goto protocol_error_free;
+
+       if (ntohl(response.encrypted.call_id[0]) > INT_MAX ||
+           ntohl(response.encrypted.call_id[1]) > INT_MAX ||
+           ntohl(response.encrypted.call_id[2]) > INT_MAX ||
+           ntohl(response.encrypted.call_id[3]) > INT_MAX)
+               goto protocol_error_free;
+
+       abort_code = RXKADOUTOFSEQUENCE;
+       if (response.encrypted.inc_nonce != htonl(conn->security_nonce + 1))
+               goto protocol_error_free;
+
+       abort_code = RXKADLEVELFAIL;
+       level = ntohl(response.encrypted.level);
+       if (level > RXRPC_SECURITY_ENCRYPT)
+               goto protocol_error_free;
+       conn->security_level = level;
+
+       /* create a key to hold the security data and expiration time - after
+        * this the connection security can be handled in exactly the same way
+        * as for a client connection */
+       ret = rxrpc_get_server_data_key(conn, &session_key, expiry, kvno);
+       if (ret < 0) {
+               kfree(ticket);
+               return ret;
+       }
+
+       kfree(ticket);
+       _leave(" = 0");
+       return 0;
+
+protocol_error_free:
+       kfree(ticket);
+protocol_error:
+       *_abort_code = abort_code;
+       _leave(" = -EPROTO [%d]", abort_code);
+       return -EPROTO;
+}
+
+/*
+ * clear the connection security
+ */
+static void rxkad_clear(struct rxrpc_connection *conn)
+{
+       _enter("");
+
+       if (conn->cipher)
+               crypto_free_blkcipher(conn->cipher);
+}
+
+/*
+ * RxRPC Kerberos-based security
+ */
+static struct rxrpc_security rxkad = {
+       .owner                          = THIS_MODULE,
+       .name                           = "rxkad",
+       .security_index                 = RXKAD_VERSION,
+       .init_connection_security       = rxkad_init_connection_security,
+       .prime_packet_security          = rxkad_prime_packet_security,
+       .secure_packet                  = rxkad_secure_packet,
+       .verify_packet                  = rxkad_verify_packet,
+       .issue_challenge                = rxkad_issue_challenge,
+       .respond_to_challenge           = rxkad_respond_to_challenge,
+       .verify_response                = rxkad_verify_response,
+       .clear                          = rxkad_clear,
+};
+
+static __init int rxkad_init(void)
+{
+       _enter("");
+
+       /* pin the cipher we need so that the crypto layer doesn't invoke
+        * keventd to go get it */
+       rxkad_ci = crypto_alloc_blkcipher("pcbc(fcrypt)", 0, CRYPTO_ALG_ASYNC);
+       if (IS_ERR(rxkad_ci))
+               return PTR_ERR(rxkad_ci);
+
+       return rxrpc_register_security(&rxkad);
+}
+
+module_init(rxkad_init);
+
+static __exit void rxkad_exit(void)
+{
+       _enter("");
+
+       rxrpc_unregister_security(&rxkad);
+       crypto_free_blkcipher(rxkad_ci);
+}
+
+module_exit(rxkad_exit);
diff --git a/net/rxrpc/rxrpc_syms.c b/net/rxrpc/rxrpc_syms.c
deleted file mode 100644 (file)
index 9896fd8..0000000
+++ /dev/null
@@ -1,34 +0,0 @@
-/* rxrpc_syms.c: exported Rx RPC layer interface symbols
- *
- * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/module.h>
-
-#include <rxrpc/transport.h>
-#include <rxrpc/connection.h>
-#include <rxrpc/call.h>
-#include <rxrpc/krxiod.h>
-
-/* call.c */
-EXPORT_SYMBOL(rxrpc_create_call);
-EXPORT_SYMBOL(rxrpc_put_call);
-EXPORT_SYMBOL(rxrpc_call_abort);
-EXPORT_SYMBOL(rxrpc_call_read_data);
-EXPORT_SYMBOL(rxrpc_call_write_data);
-
-/* connection.c */
-EXPORT_SYMBOL(rxrpc_create_connection);
-EXPORT_SYMBOL(rxrpc_put_connection);
-
-/* transport.c */
-EXPORT_SYMBOL(rxrpc_create_transport);
-EXPORT_SYMBOL(rxrpc_put_transport);
-EXPORT_SYMBOL(rxrpc_add_service);
-EXPORT_SYMBOL(rxrpc_del_service);
diff --git a/net/rxrpc/sysctl.c b/net/rxrpc/sysctl.c
deleted file mode 100644 (file)
index 8842907..0000000
+++ /dev/null
@@ -1,121 +0,0 @@
-/* sysctl.c: Rx RPC control
- *
- * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/sysctl.h>
-#include <rxrpc/types.h>
-#include <rxrpc/rxrpc.h>
-#include <asm/errno.h>
-#include "internal.h"
-
-int rxrpc_ktrace;
-int rxrpc_kdebug;
-int rxrpc_kproto;
-int rxrpc_knet;
-
-#ifdef CONFIG_SYSCTL
-static struct ctl_table_header *rxrpc_sysctl = NULL;
-
-static ctl_table rxrpc_sysctl_table[] = {
-       {
-               .ctl_name       = 1,
-               .procname       = "kdebug",
-               .data           = &rxrpc_kdebug,
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = &proc_dointvec
-       },
-       {
-               .ctl_name       = 2,
-               .procname       = "ktrace",
-               .data           = &rxrpc_ktrace,
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = &proc_dointvec
-       },
-       {
-               .ctl_name       = 3,
-               .procname       = "kproto",
-               .data           = &rxrpc_kproto,
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = &proc_dointvec
-       },
-       {
-               .ctl_name       = 4,
-               .procname       = "knet",
-               .data           = &rxrpc_knet,
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = &proc_dointvec
-       },
-       {
-               .ctl_name       = 5,
-               .procname       = "peertimo",
-               .data           = &rxrpc_peer_timeout,
-               .maxlen         = sizeof(unsigned long),
-               .mode           = 0644,
-               .proc_handler   = &proc_doulongvec_minmax
-       },
-       {
-               .ctl_name       = 6,
-               .procname       = "conntimo",
-               .data           = &rxrpc_conn_timeout,
-               .maxlen         = sizeof(unsigned long),
-               .mode           = 0644,
-               .proc_handler   = &proc_doulongvec_minmax
-       },
-       { .ctl_name = 0 }
-};
-
-static ctl_table rxrpc_dir_sysctl_table[] = {
-       {
-               .ctl_name       = 1,
-               .procname       = "rxrpc",
-               .maxlen         = 0,
-               .mode           = 0555,
-               .child          = rxrpc_sysctl_table
-       },
-       { .ctl_name = 0 }
-};
-#endif /* CONFIG_SYSCTL */
-
-/*****************************************************************************/
-/*
- * initialise the sysctl stuff for Rx RPC
- */
-int rxrpc_sysctl_init(void)
-{
-#ifdef CONFIG_SYSCTL
-       rxrpc_sysctl = register_sysctl_table(rxrpc_dir_sysctl_table);
-       if (!rxrpc_sysctl)
-               return -ENOMEM;
-#endif /* CONFIG_SYSCTL */
-
-       return 0;
-} /* end rxrpc_sysctl_init() */
-
-/*****************************************************************************/
-/*
- * clean up the sysctl stuff for Rx RPC
- */
-void rxrpc_sysctl_cleanup(void)
-{
-#ifdef CONFIG_SYSCTL
-       if (rxrpc_sysctl) {
-               unregister_sysctl_table(rxrpc_sysctl);
-               rxrpc_sysctl = NULL;
-       }
-#endif /* CONFIG_SYSCTL */
-
-} /* end rxrpc_sysctl_cleanup() */
diff --git a/net/rxrpc/transport.c b/net/rxrpc/transport.c
deleted file mode 100644 (file)
index 8e57be2..0000000
+++ /dev/null
@@ -1,846 +0,0 @@
-/* transport.c: Rx Transport routines
- *
- * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <rxrpc/transport.h>
-#include <rxrpc/peer.h>
-#include <rxrpc/connection.h>
-#include <rxrpc/call.h>
-#include <rxrpc/message.h>
-#include <rxrpc/krxiod.h>
-#include <rxrpc/krxsecd.h>
-#include <linux/udp.h>
-#include <linux/in.h>
-#include <linux/in6.h>
-#include <linux/icmp.h>
-#include <linux/skbuff.h>
-#include <net/sock.h>
-#include <net/ip.h>
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
-#include <linux/ipv6.h>        /* this should _really_ be in errqueue.h.. */
-#endif
-#include <linux/errqueue.h>
-#include <asm/uaccess.h>
-#include "internal.h"
-
-struct errormsg {
-       struct cmsghdr                  cmsg;           /* control message header */
-       struct sock_extended_err        ee;             /* extended error information */
-       struct sockaddr_in              icmp_src;       /* ICMP packet source address */
-};
-
-static DEFINE_SPINLOCK(rxrpc_transports_lock);
-static struct list_head rxrpc_transports = LIST_HEAD_INIT(rxrpc_transports);
-
-__RXACCT_DECL(atomic_t rxrpc_transport_count);
-LIST_HEAD(rxrpc_proc_transports);
-DECLARE_RWSEM(rxrpc_proc_transports_sem);
-
-static void rxrpc_data_ready(struct sock *sk, int count);
-static void rxrpc_error_report(struct sock *sk);
-static int rxrpc_trans_receive_new_call(struct rxrpc_transport *trans,
-                                       struct list_head *msgq);
-static void rxrpc_trans_receive_error_report(struct rxrpc_transport *trans);
-
-/*****************************************************************************/
-/*
- * create a new transport endpoint using the specified UDP port
- */
-int rxrpc_create_transport(unsigned short port,
-                          struct rxrpc_transport **_trans)
-{
-       struct rxrpc_transport *trans;
-       struct sockaddr_in sin;
-       mm_segment_t oldfs;
-       struct sock *sock;
-       int ret, opt;
-
-       _enter("%hu", port);
-
-       trans = kzalloc(sizeof(struct rxrpc_transport), GFP_KERNEL);
-       if (!trans)
-               return -ENOMEM;
-
-       atomic_set(&trans->usage, 1);
-       INIT_LIST_HEAD(&trans->services);
-       INIT_LIST_HEAD(&trans->link);
-       INIT_LIST_HEAD(&trans->krxiodq_link);
-       spin_lock_init(&trans->lock);
-       INIT_LIST_HEAD(&trans->peer_active);
-       INIT_LIST_HEAD(&trans->peer_graveyard);
-       spin_lock_init(&trans->peer_gylock);
-       init_waitqueue_head(&trans->peer_gy_waitq);
-       rwlock_init(&trans->peer_lock);
-       atomic_set(&trans->peer_count, 0);
-       trans->port = port;
-
-       /* create a UDP socket to be my actual transport endpoint */
-       ret = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &trans->socket);
-       if (ret < 0)
-               goto error;
-
-       /* use the specified port */
-       if (port) {
-               memset(&sin, 0, sizeof(sin));
-               sin.sin_family = AF_INET;
-               sin.sin_port = htons(port);
-               ret = trans->socket->ops->bind(trans->socket,
-                                              (struct sockaddr *) &sin,
-                                              sizeof(sin));
-               if (ret < 0)
-                       goto error;
-       }
-
-       opt = 1;
-       oldfs = get_fs();
-       set_fs(KERNEL_DS);
-       ret = trans->socket->ops->setsockopt(trans->socket, SOL_IP, IP_RECVERR,
-                                            (char *) &opt, sizeof(opt));
-       set_fs(oldfs);
-
-       spin_lock(&rxrpc_transports_lock);
-       list_add(&trans->link, &rxrpc_transports);
-       spin_unlock(&rxrpc_transports_lock);
-
-       /* set the socket up */
-       sock = trans->socket->sk;
-       sock->sk_user_data      = trans;
-       sock->sk_data_ready     = rxrpc_data_ready;
-       sock->sk_error_report   = rxrpc_error_report;
-
-       down_write(&rxrpc_proc_transports_sem);
-       list_add_tail(&trans->proc_link, &rxrpc_proc_transports);
-       up_write(&rxrpc_proc_transports_sem);
-
-       __RXACCT(atomic_inc(&rxrpc_transport_count));
-
-       *_trans = trans;
-       _leave(" = 0 (%p)", trans);
-       return 0;
-
- error:
-       /* finish cleaning up the transport (not really needed here, but...) */
-       if (trans->socket)
-               trans->socket->ops->shutdown(trans->socket, 2);
-
-       /* close the socket */
-       if (trans->socket) {
-               trans->socket->sk->sk_user_data = NULL;
-               sock_release(trans->socket);
-               trans->socket = NULL;
-       }
-
-       kfree(trans);
-
-
-       _leave(" = %d", ret);
-       return ret;
-} /* end rxrpc_create_transport() */
-
-/*****************************************************************************/
-/*
- * destroy a transport endpoint
- */
-void rxrpc_put_transport(struct rxrpc_transport *trans)
-{
-       _enter("%p{u=%d p=%hu}",
-              trans, atomic_read(&trans->usage), trans->port);
-
-       BUG_ON(atomic_read(&trans->usage) <= 0);
-
-       /* to prevent a race, the decrement and the dequeue must be
-        * effectively atomic */
-       spin_lock(&rxrpc_transports_lock);
-       if (likely(!atomic_dec_and_test(&trans->usage))) {
-               spin_unlock(&rxrpc_transports_lock);
-               _leave("");
-               return;
-       }
-
-       list_del(&trans->link);
-       spin_unlock(&rxrpc_transports_lock);
-
-       /* finish cleaning up the transport */
-       if (trans->socket)
-               trans->socket->ops->shutdown(trans->socket, 2);
-
-       rxrpc_krxsecd_clear_transport(trans);
-       rxrpc_krxiod_dequeue_transport(trans);
-
-       /* discard all peer information */
-       rxrpc_peer_clearall(trans);
-
-       down_write(&rxrpc_proc_transports_sem);
-       list_del(&trans->proc_link);
-       up_write(&rxrpc_proc_transports_sem);
-       __RXACCT(atomic_dec(&rxrpc_transport_count));
-
-       /* close the socket */
-       if (trans->socket) {
-               trans->socket->sk->sk_user_data = NULL;
-               sock_release(trans->socket);
-               trans->socket = NULL;
-       }
-
-       kfree(trans);
-
-       _leave("");
-} /* end rxrpc_put_transport() */
-
-/*****************************************************************************/
-/*
- * add a service to a transport to be listened upon
- */
-int rxrpc_add_service(struct rxrpc_transport *trans,
-                     struct rxrpc_service *newsrv)
-{
-       struct rxrpc_service *srv;
-       struct list_head *_p;
-       int ret = -EEXIST;
-
-       _enter("%p{%hu},%p{%hu}",
-              trans, trans->port, newsrv, newsrv->service_id);
-
-       /* verify that the service ID is not already present */
-       spin_lock(&trans->lock);
-
-       list_for_each(_p, &trans->services) {
-               srv = list_entry(_p, struct rxrpc_service, link);
-               if (srv->service_id == newsrv->service_id)
-                       goto out;
-       }
-
-       /* okay - add the transport to the list */
-       list_add_tail(&newsrv->link, &trans->services);
-       rxrpc_get_transport(trans);
-       ret = 0;
-
- out:
-       spin_unlock(&trans->lock);
-
-       _leave("= %d", ret);
-       return ret;
-} /* end rxrpc_add_service() */
-
-/*****************************************************************************/
-/*
- * remove a service from a transport
- */
-void rxrpc_del_service(struct rxrpc_transport *trans, struct rxrpc_service *srv)
-{
-       _enter("%p{%hu},%p{%hu}", trans, trans->port, srv, srv->service_id);
-
-       spin_lock(&trans->lock);
-       list_del(&srv->link);
-       spin_unlock(&trans->lock);
-
-       rxrpc_put_transport(trans);
-
-       _leave("");
-} /* end rxrpc_del_service() */
-
-/*****************************************************************************/
-/*
- * INET callback when data has been received on the socket.
- */
-static void rxrpc_data_ready(struct sock *sk, int count)
-{
-       struct rxrpc_transport *trans;
-
-       _enter("%p{t=%p},%d", sk, sk->sk_user_data, count);
-
-       /* queue the transport for attention by krxiod */
-       trans = (struct rxrpc_transport *) sk->sk_user_data;
-       if (trans)
-               rxrpc_krxiod_queue_transport(trans);
-
-       /* wake up anyone waiting on the socket */
-       if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
-               wake_up_interruptible(sk->sk_sleep);
-
-       _leave("");
-} /* end rxrpc_data_ready() */
-
-/*****************************************************************************/
-/*
- * INET callback when an ICMP error packet is received
- * - sk->err is error (EHOSTUNREACH, EPROTO or EMSGSIZE)
- */
-static void rxrpc_error_report(struct sock *sk)
-{
-       struct rxrpc_transport *trans;
-
-       _enter("%p{t=%p}", sk, sk->sk_user_data);
-
-       /* queue the transport for attention by krxiod */
-       trans = (struct rxrpc_transport *) sk->sk_user_data;
-       if (trans) {
-               trans->error_rcvd = 1;
-               rxrpc_krxiod_queue_transport(trans);
-       }
-
-       /* wake up anyone waiting on the socket */
-       if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
-               wake_up_interruptible(sk->sk_sleep);
-
-       _leave("");
-} /* end rxrpc_error_report() */
-
-/*****************************************************************************/
-/*
- * split a message up, allocating message records and filling them in
- * from the contents of a socket buffer
- */
-static int rxrpc_incoming_msg(struct rxrpc_transport *trans,
-                             struct sk_buff *pkt,
-                             struct list_head *msgq)
-{
-       struct rxrpc_message *msg;
-       int ret;
-
-       _enter("");
-
-       msg = kzalloc(sizeof(struct rxrpc_message), GFP_KERNEL);
-       if (!msg) {
-               _leave(" = -ENOMEM");
-               return -ENOMEM;
-       }
-
-       atomic_set(&msg->usage, 1);
-       list_add_tail(&msg->link,msgq);
-
-       /* dig out the Rx routing parameters */
-       if (skb_copy_bits(pkt, sizeof(struct udphdr),
-                         &msg->hdr, sizeof(msg->hdr)) < 0) {
-               ret = -EBADMSG;
-               goto error;
-       }
-
-       msg->trans = trans;
-       msg->state = RXRPC_MSG_RECEIVED;
-       skb_get_timestamp(pkt, &msg->stamp);
-       if (msg->stamp.tv_sec == 0) {
-               do_gettimeofday(&msg->stamp);
-               if (pkt->sk)
-                       sock_enable_timestamp(pkt->sk);
-       }
-       msg->seq = ntohl(msg->hdr.seq);
-
-       /* attach the packet */
-       skb_get(pkt);
-       msg->pkt = pkt;
-
-       msg->offset = sizeof(struct udphdr) + sizeof(struct rxrpc_header);
-       msg->dsize = msg->pkt->len - msg->offset;
-
-       _net("Rx Received packet from %s (%08x;%08x,%1x,%d,%s,%02x,%d,%d)",
-            msg->hdr.flags & RXRPC_CLIENT_INITIATED ? "client" : "server",
-            ntohl(msg->hdr.epoch),
-            (ntohl(msg->hdr.cid) & RXRPC_CIDMASK) >> RXRPC_CIDSHIFT,
-            ntohl(msg->hdr.cid) & RXRPC_CHANNELMASK,
-            ntohl(msg->hdr.callNumber),
-            rxrpc_pkts[msg->hdr.type],
-            msg->hdr.flags,
-            ntohs(msg->hdr.serviceId),
-            msg->hdr.securityIndex);
-
-       __RXACCT(atomic_inc(&rxrpc_message_count));
-
-       /* split off jumbo packets */
-       while (msg->hdr.type == RXRPC_PACKET_TYPE_DATA &&
-              msg->hdr.flags & RXRPC_JUMBO_PACKET
-              ) {
-               struct rxrpc_jumbo_header jumbo;
-               struct rxrpc_message *jumbomsg = msg;
-
-               _debug("split jumbo packet");
-
-               /* quick sanity check */
-               ret = -EBADMSG;
-               if (msg->dsize <
-                   RXRPC_JUMBO_DATALEN + sizeof(struct rxrpc_jumbo_header))
-                       goto error;
-               if (msg->hdr.flags & RXRPC_LAST_PACKET)
-                       goto error;
-
-               /* dig out the secondary header */
-               if (skb_copy_bits(pkt, msg->offset + RXRPC_JUMBO_DATALEN,
-                                 &jumbo, sizeof(jumbo)) < 0)
-                       goto error;
-
-               /* allocate a new message record */
-               ret = -ENOMEM;
-               msg = kmemdup(jumbomsg, sizeof(struct rxrpc_message), GFP_KERNEL);
-               if (!msg)
-                       goto error;
-
-               list_add_tail(&msg->link, msgq);
-
-               /* adjust the jumbo packet */
-               jumbomsg->dsize = RXRPC_JUMBO_DATALEN;
-
-               /* attach the packet here too */
-               skb_get(pkt);
-
-               /* adjust the parameters */
-               msg->seq++;
-               msg->hdr.seq = htonl(msg->seq);
-               msg->hdr.serial = htonl(ntohl(msg->hdr.serial) + 1);
-               msg->offset += RXRPC_JUMBO_DATALEN +
-                       sizeof(struct rxrpc_jumbo_header);
-               msg->dsize -= RXRPC_JUMBO_DATALEN +
-                       sizeof(struct rxrpc_jumbo_header);
-               msg->hdr.flags = jumbo.flags;
-               msg->hdr._rsvd = jumbo._rsvd;
-
-               _net("Rx Split jumbo packet from %s"
-                    " (%08x;%08x,%1x,%d,%s,%02x,%d,%d)",
-                    msg->hdr.flags & RXRPC_CLIENT_INITIATED ? "client" : "server",
-                    ntohl(msg->hdr.epoch),
-                    (ntohl(msg->hdr.cid) & RXRPC_CIDMASK) >> RXRPC_CIDSHIFT,
-                    ntohl(msg->hdr.cid) & RXRPC_CHANNELMASK,
-                    ntohl(msg->hdr.callNumber),
-                    rxrpc_pkts[msg->hdr.type],
-                    msg->hdr.flags,
-                    ntohs(msg->hdr.serviceId),
-                    msg->hdr.securityIndex);
-
-               __RXACCT(atomic_inc(&rxrpc_message_count));
-       }
-
-       _leave(" = 0 #%d", atomic_read(&rxrpc_message_count));
-       return 0;
-
- error:
-       while (!list_empty(msgq)) {
-               msg = list_entry(msgq->next, struct rxrpc_message, link);
-               list_del_init(&msg->link);
-
-               rxrpc_put_message(msg);
-       }
-
-       _leave(" = %d", ret);
-       return ret;
-} /* end rxrpc_incoming_msg() */
-
-/*****************************************************************************/
-/*
- * accept a new call
- * - called from krxiod in process context
- */
-void rxrpc_trans_receive_packet(struct rxrpc_transport *trans)
-{
-       struct rxrpc_message *msg;
-       struct rxrpc_peer *peer;
-       struct sk_buff *pkt;
-       int ret;
-       __be32 addr;
-       __be16 port;
-
-       LIST_HEAD(msgq);
-
-       _enter("%p{%d}", trans, trans->port);
-
-       for (;;) {
-               /* deal with outstanting errors first */
-               if (trans->error_rcvd)
-                       rxrpc_trans_receive_error_report(trans);
-
-               /* attempt to receive a packet */
-               pkt = skb_recv_datagram(trans->socket->sk, 0, 1, &ret);
-               if (!pkt) {
-                       if (ret == -EAGAIN) {
-                               _leave(" EAGAIN");
-                               return;
-                       }
-
-                       /* an icmp error may have occurred */
-                       rxrpc_krxiod_queue_transport(trans);
-                       _leave(" error %d\n", ret);
-                       return;
-               }
-
-               /* we'll probably need to checksum it (didn't call
-                * sock_recvmsg) */
-               if (skb_checksum_complete(pkt)) {
-                       kfree_skb(pkt);
-                       rxrpc_krxiod_queue_transport(trans);
-                       _leave(" CSUM failed");
-                       return;
-               }
-
-               addr = pkt->nh.iph->saddr;
-               port = pkt->h.uh->source;
-
-               _net("Rx Received UDP packet from %08x:%04hu",
-                    ntohl(addr), ntohs(port));
-
-               /* unmarshall the Rx parameters and split jumbo packets */
-               ret = rxrpc_incoming_msg(trans, pkt, &msgq);
-               if (ret < 0) {
-                       kfree_skb(pkt);
-                       rxrpc_krxiod_queue_transport(trans);
-                       _leave(" bad packet");
-                       return;
-               }
-
-               BUG_ON(list_empty(&msgq));
-
-               msg = list_entry(msgq.next, struct rxrpc_message, link);
-
-               /* locate the record for the peer from which it
-                * originated */
-               ret = rxrpc_peer_lookup(trans, addr, &peer);
-               if (ret < 0) {
-                       kdebug("Rx No connections from that peer");
-                       rxrpc_trans_immediate_abort(trans, msg, -EINVAL);
-                       goto finished_msg;
-               }
-
-               /* try and find a matching connection */
-               ret = rxrpc_connection_lookup(peer, msg, &msg->conn);
-               if (ret < 0) {
-                       kdebug("Rx Unknown Connection");
-                       rxrpc_trans_immediate_abort(trans, msg, -EINVAL);
-                       rxrpc_put_peer(peer);
-                       goto finished_msg;
-               }
-               rxrpc_put_peer(peer);
-
-               /* deal with the first packet of a new call */
-               if (msg->hdr.flags & RXRPC_CLIENT_INITIATED &&
-                   msg->hdr.type == RXRPC_PACKET_TYPE_DATA &&
-                   ntohl(msg->hdr.seq) == 1
-                   ) {
-                       _debug("Rx New server call");
-                       rxrpc_trans_receive_new_call(trans, &msgq);
-                       goto finished_msg;
-               }
-
-               /* deal with subsequent packet(s) of call */
-               _debug("Rx Call packet");
-               while (!list_empty(&msgq)) {
-                       msg = list_entry(msgq.next, struct rxrpc_message, link);
-                       list_del_init(&msg->link);
-
-                       ret = rxrpc_conn_receive_call_packet(msg->conn, NULL, msg);
-                       if (ret < 0) {
-                               rxrpc_trans_immediate_abort(trans, msg, ret);
-                               rxrpc_put_message(msg);
-                               goto finished_msg;
-                       }
-
-                       rxrpc_put_message(msg);
-               }
-
-               goto finished_msg;
-
-               /* dispose of the packets */
-       finished_msg:
-               while (!list_empty(&msgq)) {
-                       msg = list_entry(msgq.next, struct rxrpc_message, link);
-                       list_del_init(&msg->link);
-
-                       rxrpc_put_message(msg);
-               }
-               kfree_skb(pkt);
-       }
-
-       _leave("");
-
-} /* end rxrpc_trans_receive_packet() */
-
-/*****************************************************************************/
-/*
- * accept a new call from a client trying to connect to one of my services
- * - called in process context
- */
-static int rxrpc_trans_receive_new_call(struct rxrpc_transport *trans,
-                                       struct list_head *msgq)
-{
-       struct rxrpc_message *msg;
-
-       _enter("");
-
-       /* only bother with the first packet */
-       msg = list_entry(msgq->next, struct rxrpc_message, link);
-       list_del_init(&msg->link);
-       rxrpc_krxsecd_queue_incoming_call(msg);
-       rxrpc_put_message(msg);
-
-       _leave(" = 0");
-
-       return 0;
-} /* end rxrpc_trans_receive_new_call() */
-
-/*****************************************************************************/
-/*
- * perform an immediate abort without connection or call structures
- */
-int rxrpc_trans_immediate_abort(struct rxrpc_transport *trans,
-                               struct rxrpc_message *msg,
-                               int error)
-{
-       struct rxrpc_header ahdr;
-       struct sockaddr_in sin;
-       struct msghdr msghdr;
-       struct kvec iov[2];
-       __be32 _error;
-       int len, ret;
-
-       _enter("%p,%p,%d", trans, msg, error);
-
-       /* don't abort an abort packet */
-       if (msg->hdr.type == RXRPC_PACKET_TYPE_ABORT) {
-               _leave(" = 0");
-               return 0;
-       }
-
-       _error = htonl(-error);
-
-       /* set up the message to be transmitted */
-       memcpy(&ahdr, &msg->hdr, sizeof(ahdr));
-       ahdr.epoch      = msg->hdr.epoch;
-       ahdr.serial     = htonl(1);
-       ahdr.seq        = 0;
-       ahdr.type       = RXRPC_PACKET_TYPE_ABORT;
-       ahdr.flags      = RXRPC_LAST_PACKET;
-       ahdr.flags      |= ~msg->hdr.flags & RXRPC_CLIENT_INITIATED;
-
-       iov[0].iov_len  = sizeof(ahdr);
-       iov[0].iov_base = &ahdr;
-       iov[1].iov_len  = sizeof(_error);
-       iov[1].iov_base = &_error;
-
-       len = sizeof(ahdr) + sizeof(_error);
-
-       memset(&sin,0,sizeof(sin));
-       sin.sin_family          = AF_INET;
-       sin.sin_port            = msg->pkt->h.uh->source;
-       sin.sin_addr.s_addr     = msg->pkt->nh.iph->saddr;
-
-       msghdr.msg_name         = &sin;
-       msghdr.msg_namelen      = sizeof(sin);
-       msghdr.msg_control      = NULL;
-       msghdr.msg_controllen   = 0;
-       msghdr.msg_flags        = MSG_DONTWAIT;
-
-       _net("Sending message type %d of %d bytes to %08x:%d",
-            ahdr.type,
-            len,
-            ntohl(sin.sin_addr.s_addr),
-            ntohs(sin.sin_port));
-
-       /* send the message */
-       ret = kernel_sendmsg(trans->socket, &msghdr, iov, 2, len);
-
-       _leave(" = %d", ret);
-       return ret;
-} /* end rxrpc_trans_immediate_abort() */
-
-/*****************************************************************************/
-/*
- * receive an ICMP error report and percolate it to all connections
- * heading to the affected host or port
- */
-static void rxrpc_trans_receive_error_report(struct rxrpc_transport *trans)
-{
-       struct rxrpc_connection *conn;
-       struct sockaddr_in sin;
-       struct rxrpc_peer *peer;
-       struct list_head connq, *_p;
-       struct errormsg emsg;
-       struct msghdr msg;
-       __be16 port;
-       int local, err;
-
-       _enter("%p", trans);
-
-       for (;;) {
-               trans->error_rcvd = 0;
-
-               /* try and receive an error message */
-               msg.msg_name    = &sin;
-               msg.msg_namelen = sizeof(sin);
-               msg.msg_control = &emsg;
-               msg.msg_controllen = sizeof(emsg);
-               msg.msg_flags   = 0;
-
-               err = kernel_recvmsg(trans->socket, &msg, NULL, 0, 0,
-                                  MSG_ERRQUEUE | MSG_DONTWAIT | MSG_TRUNC);
-
-               if (err == -EAGAIN) {
-                       _leave("");
-                       return;
-               }
-
-               if (err < 0) {
-                       printk("%s: unable to recv an error report: %d\n",
-                              __FUNCTION__, err);
-                       _leave("");
-                       return;
-               }
-
-               msg.msg_controllen = (char *) msg.msg_control - (char *) &emsg;
-
-               if (msg.msg_controllen < sizeof(emsg.cmsg) ||
-                   msg.msg_namelen < sizeof(sin)) {
-                       printk("%s: short control message"
-                              " (nlen=%u clen=%Zu fl=%x)\n",
-                              __FUNCTION__,
-                              msg.msg_namelen,
-                              msg.msg_controllen,
-                              msg.msg_flags);
-                       continue;
-               }
-
-               _net("Rx Received control message"
-                    " { len=%Zu level=%u type=%u }",
-                    emsg.cmsg.cmsg_len,
-                    emsg.cmsg.cmsg_level,
-                    emsg.cmsg.cmsg_type);
-
-               if (sin.sin_family != AF_INET) {
-                       printk("Rx Ignoring error report with non-INET address"
-                              " (fam=%u)",
-                              sin.sin_family);
-                       continue;
-               }
-
-               _net("Rx Received message pertaining to host addr=%x port=%hu",
-                    ntohl(sin.sin_addr.s_addr), ntohs(sin.sin_port));
-
-               if (emsg.cmsg.cmsg_level != SOL_IP ||
-                   emsg.cmsg.cmsg_type != IP_RECVERR) {
-                       printk("Rx Ignoring unknown error report"
-                              " { level=%u type=%u }",
-                              emsg.cmsg.cmsg_level,
-                              emsg.cmsg.cmsg_type);
-                       continue;
-               }
-
-               if (msg.msg_controllen < sizeof(emsg.cmsg) + sizeof(emsg.ee)) {
-                       printk("%s: short error message (%Zu)\n",
-                              __FUNCTION__, msg.msg_controllen);
-                       _leave("");
-                       return;
-               }
-
-               port = sin.sin_port;
-
-               switch (emsg.ee.ee_origin) {
-               case SO_EE_ORIGIN_ICMP:
-                       local = 0;
-                       switch (emsg.ee.ee_type) {
-                       case ICMP_DEST_UNREACH:
-                               switch (emsg.ee.ee_code) {
-                               case ICMP_NET_UNREACH:
-                                       _net("Rx Received ICMP Network Unreachable");
-                                       port = 0;
-                                       err = -ENETUNREACH;
-                                       break;
-                               case ICMP_HOST_UNREACH:
-                                       _net("Rx Received ICMP Host Unreachable");
-                                       port = 0;
-                                       err = -EHOSTUNREACH;
-                                       break;
-                               case ICMP_PORT_UNREACH:
-                                       _net("Rx Received ICMP Port Unreachable");
-                                       err = -ECONNREFUSED;
-                                       break;
-                               case ICMP_NET_UNKNOWN:
-                                       _net("Rx Received ICMP Unknown Network");
-                                       port = 0;
-                                       err = -ENETUNREACH;
-                                       break;
-                               case ICMP_HOST_UNKNOWN:
-                                       _net("Rx Received ICMP Unknown Host");
-                                       port = 0;
-                                       err = -EHOSTUNREACH;
-                                       break;
-                               default:
-                                       _net("Rx Received ICMP DestUnreach { code=%u }",
-                                            emsg.ee.ee_code);
-                                       err = emsg.ee.ee_errno;
-                                       break;
-                               }
-                               break;
-
-                       case ICMP_TIME_EXCEEDED:
-                               _net("Rx Received ICMP TTL Exceeded");
-                               err = emsg.ee.ee_errno;
-                               break;
-
-                       default:
-                               _proto("Rx Received ICMP error { type=%u code=%u }",
-                                      emsg.ee.ee_type, emsg.ee.ee_code);
-                               err = emsg.ee.ee_errno;
-                               break;
-                       }
-                       break;
-
-               case SO_EE_ORIGIN_LOCAL:
-                       _proto("Rx Received local error { error=%d }",
-                              emsg.ee.ee_errno);
-                       local = 1;
-                       err = emsg.ee.ee_errno;
-                       break;
-
-               case SO_EE_ORIGIN_NONE:
-               case SO_EE_ORIGIN_ICMP6:
-               default:
-                       _proto("Rx Received error report { orig=%u }",
-                              emsg.ee.ee_origin);
-                       local = 0;
-                       err = emsg.ee.ee_errno;
-                       break;
-               }
-
-               /* find all the connections between this transport and the
-                * affected destination */
-               INIT_LIST_HEAD(&connq);
-
-               if (rxrpc_peer_lookup(trans, sin.sin_addr.s_addr,
-                                     &peer) == 0) {
-                       read_lock(&peer->conn_lock);
-                       list_for_each(_p, &peer->conn_active) {
-                               conn = list_entry(_p, struct rxrpc_connection,
-                                                 link);
-                               if (port && conn->addr.sin_port != port)
-                                       continue;
-                               if (!list_empty(&conn->err_link))
-                                       continue;
-
-                               rxrpc_get_connection(conn);
-                               list_add_tail(&conn->err_link, &connq);
-                       }
-                       read_unlock(&peer->conn_lock);
-
-                       /* service all those connections */
-                       while (!list_empty(&connq)) {
-                               conn = list_entry(connq.next,
-                                                 struct rxrpc_connection,
-                                                 err_link);
-                               list_del(&conn->err_link);
-
-                               rxrpc_conn_handle_error(conn, local, err);
-
-                               rxrpc_put_connection(conn);
-                       }
-
-                       rxrpc_put_peer(peer);
-               }
-       }
-
-       _leave("");
-       return;
-} /* end rxrpc_trans_receive_error_report() */
index f4544dd..475df84 100644 (file)
@@ -46,62 +46,6 @@ config NET_SCH_FIFO
 
 if NET_SCHED
 
-choice
-       prompt "Packet scheduler clock source"
-       default NET_SCH_CLK_GETTIMEOFDAY
-       ---help---
-         Packet schedulers need a monotonic clock that increments at a static
-         rate. The kernel provides several suitable interfaces, each with
-         different properties:
-         
-         - high resolution (us or better)
-         - fast to read (minimal locking, no i/o access)
-         - synchronized on all processors
-         - handles cpu clock frequency changes
-
-         but nothing provides all of the above.
-
-config NET_SCH_CLK_JIFFIES
-       bool "Timer interrupt"
-       ---help---
-         Say Y here if you want to use the timer interrupt (jiffies) as clock
-         source. This clock source is fast, synchronized on all processors and
-         handles cpu clock frequency changes, but its resolution is too low
-         for accurate shaping except at very low speed.
-
-config NET_SCH_CLK_GETTIMEOFDAY
-       bool "gettimeofday"
-       ---help---
-         Say Y here if you want to use gettimeofday as clock source. This clock
-         source has high resolution, is synchronized on all processors and
-         handles cpu clock frequency changes, but it is slow.
-
-         Choose this if you need a high resolution clock source but can't use
-         the CPU's cycle counter.
-
-# don't allow on SMP x86 because they can have unsynchronized TSCs.
-# gettimeofday is a good alternative
-config NET_SCH_CLK_CPU
-       bool "CPU cycle counter"
-       depends on ((X86_TSC || X86_64) && !SMP) || ALPHA || SPARC64 || PPC64 || IA64
-       ---help---
-         Say Y here if you want to use the CPU's cycle counter as clock source.
-         This is a cheap and high resolution clock source, but on some
-         architectures it is not synchronized on all processors and doesn't
-         handle cpu clock frequency changes.
-
-         The useable cycle counters are:
-
-               x86/x86_64      - Timestamp Counter
-               alpha           - Cycle Counter
-               sparc64         - %ticks register
-               ppc64           - Time base
-               ia64            - Interval Time Counter
-
-         Choose this if your CPU's cycle counter is working properly.
-
-endchoice
-
 comment "Queueing/Scheduling"
 
 config NET_SCH_CBQ
index cb21617..711dd26 100644 (file)
 #include <linux/interrupt.h>
 #include <linux/netdevice.h>
 #include <linux/skbuff.h>
-#include <linux/rtnetlink.h>
 #include <linux/init.h>
 #include <linux/kmod.h>
 #include <net/sock.h>
 #include <net/sch_generic.h>
 #include <net/act_api.h>
+#include <net/netlink.h>
 
 void tcf_hash_destroy(struct tcf_common *p, struct tcf_hashinfo *hinfo)
 {
@@ -93,15 +93,15 @@ static int tcf_dump_walker(struct sk_buff *skb, struct netlink_callback *cb,
                                continue;
                        a->priv = p;
                        a->order = n_i;
-                       r = (struct rtattr*) skb->tail;
+                       r = (struct rtattr *)skb_tail_pointer(skb);
                        RTA_PUT(skb, a->order, 0, NULL);
                        err = tcf_action_dump_1(skb, a, 0, 0);
                        if (err < 0) {
                                index--;
-                               skb_trim(skb, (u8*)r - skb->data);
+                               nlmsg_trim(skb, r);
                                goto done;
                        }
-                       r->rta_len = skb->tail - (u8*)r;
+                       r->rta_len = skb_tail_pointer(skb) - (u8 *)r;
                        n_i++;
                        if (n_i >= TCA_ACT_MAX_PRIO)
                                goto done;
@@ -114,7 +114,7 @@ done:
        return n_i;
 
 rtattr_failure:
-       skb_trim(skb, (u8*)r - skb->data);
+       nlmsg_trim(skb, r);
        goto done;
 }
 
@@ -125,7 +125,7 @@ static int tcf_del_walker(struct sk_buff *skb, struct tc_action *a,
        struct rtattr *r ;
        int i= 0, n_i = 0;
 
-       r = (struct rtattr*) skb->tail;
+       r = (struct rtattr *)skb_tail_pointer(skb);
        RTA_PUT(skb, a->order, 0, NULL);
        RTA_PUT(skb, TCA_KIND, IFNAMSIZ, a->ops->kind);
        for (i = 0; i < (hinfo->hmask + 1); i++) {
@@ -140,11 +140,11 @@ static int tcf_del_walker(struct sk_buff *skb, struct tc_action *a,
                }
        }
        RTA_PUT(skb, TCA_FCNT, 4, &n_i);
-       r->rta_len = skb->tail - (u8*)r;
+       r->rta_len = skb_tail_pointer(skb) - (u8 *)r;
 
        return n_i;
 rtattr_failure:
-       skb_trim(skb, (u8*)r - skb->data);
+       nlmsg_trim(skb, r);
        return -EINVAL;
 }
 
@@ -423,7 +423,7 @@ int
 tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
 {
        int err = -EINVAL;
-       unsigned char *b = skb->tail;
+       unsigned char *b = skb_tail_pointer(skb);
        struct rtattr *r;
 
        if (a->ops == NULL || a->ops->dump == NULL)
@@ -432,15 +432,15 @@ tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
        RTA_PUT(skb, TCA_KIND, IFNAMSIZ, a->ops->kind);
        if (tcf_action_copy_stats(skb, a, 0))
                goto rtattr_failure;
-       r = (struct rtattr*) skb->tail;
+       r = (struct rtattr *)skb_tail_pointer(skb);
        RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
        if ((err = tcf_action_dump_old(skb, a, bind, ref)) > 0) {
-               r->rta_len = skb->tail - (u8*)r;
+               r->rta_len = skb_tail_pointer(skb) - (u8 *)r;
                return err;
        }
 
 rtattr_failure:
-       skb_trim(skb, b - skb->data);
+       nlmsg_trim(skb, b);
        return -1;
 }
 
@@ -449,17 +449,17 @@ tcf_action_dump(struct sk_buff *skb, struct tc_action *act, int bind, int ref)
 {
        struct tc_action *a;
        int err = -EINVAL;
-       unsigned char *b = skb->tail;
+       unsigned char *b = skb_tail_pointer(skb);
        struct rtattr *r ;
 
        while ((a = act) != NULL) {
-               r = (struct rtattr*) skb->tail;
+               r = (struct rtattr *)skb_tail_pointer(skb);
                act = a->next;
                RTA_PUT(skb, a->order, 0, NULL);
                err = tcf_action_dump_1(skb, a, bind, ref);
                if (err < 0)
                        goto errout;
-               r->rta_len = skb->tail - (u8*)r;
+               r->rta_len = skb_tail_pointer(skb) - (u8 *)r;
        }
 
        return 0;
@@ -467,7 +467,7 @@ tcf_action_dump(struct sk_buff *skb, struct tc_action *act, int bind, int ref)
 rtattr_failure:
        err = -EINVAL;
 errout:
-       skb_trim(skb, b - skb->data);
+       nlmsg_trim(skb, b);
        return err;
 }
 
@@ -635,7 +635,7 @@ tca_get_fill(struct sk_buff *skb, struct tc_action *a, u32 pid, u32 seq,
 {
        struct tcamsg *t;
        struct nlmsghdr *nlh;
-       unsigned char *b = skb->tail;
+       unsigned char *b = skb_tail_pointer(skb);
        struct rtattr *x;
 
        nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*t), flags);
@@ -645,20 +645,20 @@ tca_get_fill(struct sk_buff *skb, struct tc_action *a, u32 pid, u32 seq,
        t->tca__pad1 = 0;
        t->tca__pad2 = 0;
 
-       x = (struct rtattr*) skb->tail;
+       x = (struct rtattr *)skb_tail_pointer(skb);
        RTA_PUT(skb, TCA_ACT_TAB, 0, NULL);
 
        if (tcf_action_dump(skb, a, bind, ref) < 0)
                goto rtattr_failure;
 
-       x->rta_len = skb->tail - (u8*)x;
+       x->rta_len = skb_tail_pointer(skb) - (u8 *)x;
 
-       nlh->nlmsg_len = skb->tail - b;
+       nlh->nlmsg_len = skb_tail_pointer(skb) - b;
        return skb->len;
 
 rtattr_failure:
 nlmsg_failure:
-       skb_trim(skb, b - skb->data);
+       nlmsg_trim(skb, b);
        return -1;
 }
 
@@ -767,7 +767,7 @@ static int tca_action_flush(struct rtattr *rta, struct nlmsghdr *n, u32 pid)
                return -ENOBUFS;
        }
 
-       b = (unsigned char *)skb->tail;
+       b = skb_tail_pointer(skb);
 
        if (rtattr_parse_nested(tb, TCA_ACT_MAX, rta) < 0)
                goto err_out;
@@ -783,16 +783,16 @@ static int tca_action_flush(struct rtattr *rta, struct nlmsghdr *n, u32 pid)
        t->tca__pad1 = 0;
        t->tca__pad2 = 0;
 
-       x = (struct rtattr *) skb->tail;
+       x = (struct rtattr *)skb_tail_pointer(skb);
        RTA_PUT(skb, TCA_ACT_TAB, 0, NULL);
 
        err = a->ops->walk(skb, &dcb, RTM_DELACTION, a);
        if (err < 0)
                goto rtattr_failure;
 
-       x->rta_len = skb->tail - (u8 *) x;
+       x->rta_len = skb_tail_pointer(skb) - (u8 *)x;
 
-       nlh->nlmsg_len = skb->tail - b;
+       nlh->nlmsg_len = skb_tail_pointer(skb) - b;
        nlh->nlmsg_flags |= NLM_F_ROOT;
        module_put(a->ops->owner);
        kfree(a);
@@ -884,7 +884,7 @@ static int tcf_add_notify(struct tc_action *a, u32 pid, u32 seq, int event,
        if (!skb)
                return -ENOBUFS;
 
-       b = (unsigned char *)skb->tail;
+       b = skb_tail_pointer(skb);
 
        nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*t), flags);
        t = NLMSG_DATA(nlh);
@@ -892,15 +892,15 @@ static int tcf_add_notify(struct tc_action *a, u32 pid, u32 seq, int event,
        t->tca__pad1 = 0;
        t->tca__pad2 = 0;
 
-       x = (struct rtattr*) skb->tail;
+       x = (struct rtattr *)skb_tail_pointer(skb);
        RTA_PUT(skb, TCA_ACT_TAB, 0, NULL);
 
        if (tcf_action_dump(skb, a, 0, 0) < 0)
                goto rtattr_failure;
 
-       x->rta_len = skb->tail - (u8*)x;
+       x->rta_len = skb_tail_pointer(skb) - (u8 *)x;
 
-       nlh->nlmsg_len = skb->tail - b;
+       nlh->nlmsg_len = skb_tail_pointer(skb) - b;
        NETLINK_CB(skb).dst_group = RTNLGRP_TC;
 
        err = rtnetlink_send(skb, pid, RTNLGRP_TC, flags&NLM_F_ECHO);
@@ -1015,7 +1015,7 @@ static int
 tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
 {
        struct nlmsghdr *nlh;
-       unsigned char *b = skb->tail;
+       unsigned char *b = skb_tail_pointer(skb);
        struct rtattr *x;
        struct tc_action_ops *a_o;
        struct tc_action a;
@@ -1048,7 +1048,7 @@ tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
        t->tca__pad1 = 0;
        t->tca__pad2 = 0;
 
-       x = (struct rtattr *) skb->tail;
+       x = (struct rtattr *)skb_tail_pointer(skb);
        RTA_PUT(skb, TCA_ACT_TAB, 0, NULL);
 
        ret = a_o->walk(skb, cb, RTM_GETACTION, &a);
@@ -1056,12 +1056,12 @@ tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
                goto rtattr_failure;
 
        if (ret > 0) {
-               x->rta_len = skb->tail - (u8 *) x;
+               x->rta_len = skb_tail_pointer(skb) - (u8 *)x;
                ret = skb->len;
        } else
-               skb_trim(skb, (u8*)x - skb->data);
+               nlmsg_trim(skb, x);
 
-       nlh->nlmsg_len = skb->tail - b;
+       nlh->nlmsg_len = skb_tail_pointer(skb) - b;
        if (NETLINK_CB(cb->skb).pid && ret)
                nlh->nlmsg_flags |= NLM_F_MULTI;
        module_put(a_o->owner);
@@ -1070,20 +1070,15 @@ tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
 rtattr_failure:
 nlmsg_failure:
        module_put(a_o->owner);
-       skb_trim(skb, b - skb->data);
+       nlmsg_trim(skb, b);
        return skb->len;
 }
 
 static int __init tc_action_init(void)
 {
-       struct rtnetlink_link *link_p = rtnetlink_links[PF_UNSPEC];
-
-       if (link_p) {
-               link_p[RTM_NEWACTION-RTM_BASE].doit = tc_ctl_action;
-               link_p[RTM_DELACTION-RTM_BASE].doit = tc_ctl_action;
-               link_p[RTM_GETACTION-RTM_BASE].doit = tc_ctl_action;
-               link_p[RTM_GETACTION-RTM_BASE].dumpit = tc_dump_action;
-       }
+       rtnl_register(PF_UNSPEC, RTM_NEWACTION, tc_ctl_action, NULL);
+       rtnl_register(PF_UNSPEC, RTM_DELACTION, tc_ctl_action, NULL);
+       rtnl_register(PF_UNSPEC, RTM_GETACTION, tc_ctl_action, tc_dump_action);
 
        return 0;
 }
index 87d0faf..7517f37 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/proc_fs.h>
+#include <net/netlink.h>
 #include <net/sock.h>
 #include <net/pkt_sched.h>
 #include <linux/tc_act/tc_gact.h>
@@ -155,7 +156,7 @@ static int tcf_gact(struct sk_buff *skb, struct tc_action *a, struct tcf_result
 
 static int tcf_gact_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
 {
-       unsigned char *b = skb->tail;
+       unsigned char *b = skb_tail_pointer(skb);
        struct tc_gact opt;
        struct tcf_gact *gact = a->priv;
        struct tcf_t t;
@@ -181,7 +182,7 @@ static int tcf_gact_dump(struct sk_buff *skb, struct tc_action *a, int bind, int
        return skb->len;
 
 rtattr_failure:
-       skb_trim(skb, b - skb->data);
+       nlmsg_trim(skb, b);
        return -1;
 }
 
index 47f0b13..00b05f4 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/init.h>
 #include <linux/proc_fs.h>
 #include <linux/kmod.h>
+#include <net/netlink.h>
 #include <net/sock.h>
 #include <net/pkt_sched.h>
 #include <linux/tc_act/tc_ipt.h>
@@ -245,7 +246,7 @@ static int tcf_ipt(struct sk_buff *skb, struct tc_action *a,
 
 static int tcf_ipt_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
 {
-       unsigned char *b = skb->tail;
+       unsigned char *b = skb_tail_pointer(skb);
        struct tcf_ipt *ipt = a->priv;
        struct ipt_entry_target *t;
        struct tcf_t tm;
@@ -277,7 +278,7 @@ static int tcf_ipt_dump(struct sk_buff *skb, struct tc_action *a, int bind, int
        return skb->len;
 
 rtattr_failure:
-       skb_trim(skb, b - skb->data);
+       nlmsg_trim(skb, b);
        kfree(t);
        return -1;
 }
index 3e93683..de21c92 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/proc_fs.h>
+#include <net/netlink.h>
 #include <net/sock.h>
 #include <net/pkt_sched.h>
 #include <linux/tc_act/tc_mirred.h>
@@ -206,7 +207,7 @@ bad_mirred:
 
 static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
 {
-       unsigned char *b = skb->tail;
+       unsigned char *b = skb_tail_pointer(skb);
        struct tcf_mirred *m = a->priv;
        struct tc_mirred opt;
        struct tcf_t t;
@@ -225,7 +226,7 @@ static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind, i
        return skb->len;
 
 rtattr_failure:
-       skb_trim(skb, b - skb->data);
+       nlmsg_trim(skb, b);
        return -1;
 }
 
index 3d6a2fc..45b3cda 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/proc_fs.h>
+#include <net/netlink.h>
 #include <net/sock.h>
 #include <net/pkt_sched.h>
 #include <linux/tc_act/tc_pedit.h>
@@ -136,7 +137,7 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a,
                }
        }
 
-       pptr = skb->nh.raw;
+       pptr = skb_network_header(skb);
 
        spin_lock(&p->tcf_lock);
 
@@ -195,7 +196,7 @@ done:
 static int tcf_pedit_dump(struct sk_buff *skb, struct tc_action *a,
                          int bind, int ref)
 {
-       unsigned char *b = skb->tail;
+       unsigned char *b = skb_tail_pointer(skb);
        struct tcf_pedit *p = a->priv;
        struct tc_pedit *opt;
        struct tcf_t t;
@@ -226,7 +227,7 @@ static int tcf_pedit_dump(struct sk_buff *skb, struct tc_action *a,
        return skb->len;
 
 rtattr_failure:
-       skb_trim(skb, b - skb->data);
+       nlmsg_trim(skb, b);
        kfree(opt);
        return -1;
 }
index 10a5a5c..616f465 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/init.h>
 #include <net/sock.h>
 #include <net/act_api.h>
+#include <net/netlink.h>
 
 #define L2T(p,L)   ((p)->tcfp_R_tab->data[(L)>>(p)->tcfp_R_tab->rate.cell_log])
 #define L2T_P(p,L) ((p)->tcfp_P_tab->data[(L)>>(p)->tcfp_P_tab->rate.cell_log])
@@ -80,7 +81,7 @@ static int tcf_act_police_walker(struct sk_buff *skb, struct netlink_callback *c
                                continue;
                        a->priv = p;
                        a->order = index;
-                       r = (struct rtattr*) skb->tail;
+                       r = (struct rtattr *)skb_tail_pointer(skb);
                        RTA_PUT(skb, a->order, 0, NULL);
                        if (type == RTM_DELACTION)
                                err = tcf_action_dump_1(skb, a, 0, 1);
@@ -88,10 +89,10 @@ static int tcf_act_police_walker(struct sk_buff *skb, struct netlink_callback *c
                                err = tcf_action_dump_1(skb, a, 0, 0);
                        if (err < 0) {
                                index--;
-                               skb_trim(skb, (u8*)r - skb->data);
+                               nlmsg_trim(skb, r);
                                goto done;
                        }
-                       r->rta_len = skb->tail - (u8*)r;
+                       r->rta_len = skb_tail_pointer(skb) - (u8 *)r;
                        n_i++;
                }
        }
@@ -102,7 +103,7 @@ done:
        return n_i;
 
 rtattr_failure:
-       skb_trim(skb, (u8*)r - skb->data);
+       nlmsg_trim(skb, r);
        goto done;
 }
 #endif
@@ -240,7 +241,7 @@ override:
        if (ret != ACT_P_CREATED)
                return ret;
 
-       PSCHED_GET_TIME(police->tcfp_t_c);
+       police->tcfp_t_c = psched_get_time();
        police->tcf_index = parm->index ? parm->index :
                tcf_hash_new_index(&police_idx_gen, &police_hash_info);
        h = tcf_hash(police->tcf_index, POL_TAB_MASK);
@@ -295,10 +296,9 @@ static int tcf_act_police(struct sk_buff *skb, struct tc_action *a,
                        return police->tcfp_result;
                }
 
-               PSCHED_GET_TIME(now);
-
-               toks = PSCHED_TDIFF_SAFE(now, police->tcfp_t_c,
-                                        police->tcfp_burst);
+               now = psched_get_time();
+               toks = psched_tdiff_bounded(now, police->tcfp_t_c,
+                                           police->tcfp_burst);
                if (police->tcfp_P_tab) {
                        ptoks = toks + police->tcfp_ptoks;
                        if (ptoks > (long)L2T_P(police, police->tcfp_mtu))
@@ -326,7 +326,7 @@ static int tcf_act_police(struct sk_buff *skb, struct tc_action *a,
 static int
 tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
 {
-       unsigned char    *b = skb->tail;
+       unsigned char *b = skb_tail_pointer(skb);
        struct tcf_police *police = a->priv;
        struct tc_police opt;
 
@@ -355,7 +355,7 @@ tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
        return skb->len;
 
 rtattr_failure:
-       skb_trim(skb, b - skb->data);
+       nlmsg_trim(skb, b);
        return -1;
 }
 
@@ -494,7 +494,7 @@ struct tcf_police *tcf_police_locate(struct rtattr *rta, struct rtattr *est)
        }
        if (police->tcfp_P_tab)
                police->tcfp_ptoks = L2T_P(police, police->tcfp_mtu);
-       PSCHED_GET_TIME(police->tcfp_t_c);
+       police->tcfp_t_c = psched_get_time();
        police->tcf_index = parm->index ? parm->index :
                tcf_police_new_index();
        police->tcf_action = parm->action;
@@ -542,9 +542,9 @@ int tcf_police(struct sk_buff *skb, struct tcf_police *police)
                        return police->tcfp_result;
                }
 
-               PSCHED_GET_TIME(now);
-               toks = PSCHED_TDIFF_SAFE(now, police->tcfp_t_c,
-                                        police->tcfp_burst);
+               now = psched_get_time();
+               toks = psched_tdiff_bounded(now, police->tcfp_t_c,
+                                           police->tcfp_burst);
                if (police->tcfp_P_tab) {
                        ptoks = toks + police->tcfp_ptoks;
                        if (ptoks > (long)L2T_P(police, police->tcfp_mtu))
@@ -572,7 +572,7 @@ EXPORT_SYMBOL(tcf_police);
 
 int tcf_police_dump(struct sk_buff *skb, struct tcf_police *police)
 {
-       unsigned char *b = skb->tail;
+       unsigned char *b = skb_tail_pointer(skb);
        struct tc_police opt;
 
        opt.index = police->tcf_index;
@@ -598,7 +598,7 @@ int tcf_police_dump(struct sk_buff *skb, struct tcf_police *police)
        return skb->len;
 
 rtattr_failure:
-       skb_trim(skb, b - skb->data);
+       nlmsg_trim(skb, b);
        return -1;
 }
 
index c797118..36e1eda 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/netdevice.h>
 #include <linux/skbuff.h>
 #include <linux/rtnetlink.h>
+#include <net/netlink.h>
 #include <net/pkt_sched.h>
 
 #define TCA_ACT_SIMP 22
@@ -155,7 +156,7 @@ static inline int tcf_simp_cleanup(struct tc_action *a, int bind)
 static inline int tcf_simp_dump(struct sk_buff *skb, struct tc_action *a,
                                int bind, int ref)
 {
-       unsigned char *b = skb->tail;
+       unsigned char *b = skb_tail_pointer(skb);
        struct tcf_defact *d = a->priv;
        struct tc_defact opt;
        struct tcf_t t;
@@ -173,7 +174,7 @@ static inline int tcf_simp_dump(struct sk_buff *skb, struct tc_action *a,
        return skb->len;
 
 rtattr_failure:
-       skb_trim(skb, b - skb->data);
+       nlmsg_trim(skb, b);
        return -1;
 }
 
index 5c6ffdb..ebf94ed 100644 (file)
 #include <linux/interrupt.h>
 #include <linux/netdevice.h>
 #include <linux/skbuff.h>
-#include <linux/rtnetlink.h>
 #include <linux/init.h>
 #include <linux/kmod.h>
+#include <linux/netlink.h>
+#include <net/netlink.h>
 #include <net/sock.h>
 #include <net/pkt_sched.h>
 #include <net/pkt_cls.h>
@@ -323,7 +324,7 @@ tcf_fill_node(struct sk_buff *skb, struct tcf_proto *tp, unsigned long fh,
 {
        struct tcmsg *tcm;
        struct nlmsghdr  *nlh;
-       unsigned char    *b = skb->tail;
+       unsigned char *b = skb_tail_pointer(skb);
 
        nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags);
        tcm = NLMSG_DATA(nlh);
@@ -340,12 +341,12 @@ tcf_fill_node(struct sk_buff *skb, struct tcf_proto *tp, unsigned long fh,
                if (tp->ops->dump && tp->ops->dump(tp, fh, skb, tcm) < 0)
                        goto rtattr_failure;
        }
-       nlh->nlmsg_len = skb->tail - b;
+       nlh->nlmsg_len = skb_tail_pointer(skb) - b;
        return skb->len;
 
 nlmsg_failure:
 rtattr_failure:
-       skb_trim(skb, b - skb->data);
+       nlmsg_trim(skb, b);
        return -1;
 }
 
@@ -399,7 +400,6 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
        if ((dev = dev_get_by_index(tcm->tcm_ifindex)) == NULL)
                return skb->len;
 
-       read_lock(&qdisc_tree_lock);
        if (!tcm->tcm_parent)
                q = dev->qdisc_sleeping;
        else
@@ -456,7 +456,6 @@ errout:
        if (cl)
                cops->put(q, cl);
 out:
-       read_unlock(&qdisc_tree_lock);
        dev_put(dev);
        return skb->len;
 }
@@ -563,30 +562,30 @@ tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts,
                 * to work with both old and new modes of entering
                 * tc data even if iproute2  was newer - jhs
                 */
-               struct rtattr * p_rta = (struct rtattr*) skb->tail;
+               struct rtattr *p_rta = (struct rtattr *)skb_tail_pointer(skb);
 
                if (exts->action->type != TCA_OLD_COMPAT) {
                        RTA_PUT(skb, map->action, 0, NULL);
                        if (tcf_action_dump(skb, exts->action, 0, 0) < 0)
                                goto rtattr_failure;
-                       p_rta->rta_len = skb->tail - (u8*)p_rta;
+                       p_rta->rta_len = skb_tail_pointer(skb) - (u8 *)p_rta;
                } else if (map->police) {
                        RTA_PUT(skb, map->police, 0, NULL);
                        if (tcf_action_dump_old(skb, exts->action, 0, 0) < 0)
                                goto rtattr_failure;
-                       p_rta->rta_len = skb->tail - (u8*)p_rta;
+                       p_rta->rta_len = skb_tail_pointer(skb) - (u8 *)p_rta;
                }
        }
 #elif defined CONFIG_NET_CLS_POLICE
        if (map->police && exts->police) {
-               struct rtattr * p_rta = (struct rtattr*) skb->tail;
+               struct rtattr *p_rta = (struct rtattr *)skb_tail_pointer(skb);
 
                RTA_PUT(skb, map->police, 0, NULL);
 
                if (tcf_police_dump(skb, exts->police) < 0)
                        goto rtattr_failure;
 
-               p_rta->rta_len = skb->tail - (u8*)p_rta;
+               p_rta->rta_len = skb_tail_pointer(skb) - (u8 *)p_rta;
        }
 #endif
        return 0;
@@ -614,18 +613,11 @@ rtattr_failure: __attribute__ ((unused))
 
 static int __init tc_filter_init(void)
 {
-       struct rtnetlink_link *link_p = rtnetlink_links[PF_UNSPEC];
+       rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_ctl_tfilter, NULL);
+       rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_ctl_tfilter, NULL);
+       rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_ctl_tfilter,
+                                                tc_dump_tfilter);
 
-       /* Setup rtnetlink links. It is made here to avoid
-          exporting large number of public symbols.
-        */
-
-       if (link_p) {
-               link_p[RTM_NEWTFILTER-RTM_BASE].doit = tc_ctl_tfilter;
-               link_p[RTM_DELTFILTER-RTM_BASE].doit = tc_ctl_tfilter;
-               link_p[RTM_GETTFILTER-RTM_BASE].doit = tc_ctl_tfilter;
-               link_p[RTM_GETTFILTER-RTM_BASE].dumpit = tc_dump_tfilter;
-       }
        return 0;
 }
 
index 4a91f08..c885412 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/errno.h>
 #include <linux/rtnetlink.h>
 #include <linux/skbuff.h>
+#include <net/netlink.h>
 #include <net/act_api.h>
 #include <net/pkt_cls.h>
 
@@ -245,7 +246,7 @@ static int basic_dump(struct tcf_proto *tp, unsigned long fh,
                      struct sk_buff *skb, struct tcmsg *t)
 {
        struct basic_filter *f = (struct basic_filter *) fh;
-       unsigned char *b = skb->tail;
+       unsigned char *b = skb_tail_pointer(skb);
        struct rtattr *rta;
 
        if (f == NULL)
@@ -263,11 +264,11 @@ static int basic_dump(struct tcf_proto *tp, unsigned long fh,
            tcf_em_tree_dump(skb, &f->ematches, TCA_BASIC_EMATCHES) < 0)
                goto rtattr_failure;
 
-       rta->rta_len = (skb->tail - b);
+       rta->rta_len = skb_tail_pointer(skb) - b;
        return skb->len;
 
 rtattr_failure:
-       skb_trim(skb, b - skb->data);
+       nlmsg_trim(skb, b);
        return -1;
 }
 
index 5dbb9d4..bbec4a0 100644 (file)
@@ -38,6 +38,7 @@
 #include <linux/notifier.h>
 #include <linux/netfilter.h>
 #include <net/ip.h>
+#include <net/netlink.h>
 #include <net/route.h>
 #include <linux/skbuff.h>
 #include <net/sock.h>
@@ -348,7 +349,7 @@ static int fw_dump(struct tcf_proto *tp, unsigned long fh,
 {
        struct fw_head *head = (struct fw_head *)tp->root;
        struct fw_filter *f = (struct fw_filter*)fh;
-       unsigned char    *b = skb->tail;
+       unsigned char *b = skb_tail_pointer(skb);
        struct rtattr *rta;
 
        if (f == NULL)
@@ -374,7 +375,7 @@ static int fw_dump(struct tcf_proto *tp, unsigned long fh,
        if (tcf_exts_dump(skb, &f->exts, &fw_ext_map) < 0)
                goto rtattr_failure;
 
-       rta->rta_len = skb->tail - b;
+       rta->rta_len = skb_tail_pointer(skb) - b;
 
        if (tcf_exts_dump_stats(skb, &f->exts, &fw_ext_map) < 0)
                goto rtattr_failure;
@@ -382,7 +383,7 @@ static int fw_dump(struct tcf_proto *tp, unsigned long fh,
        return skb->len;
 
 rtattr_failure:
-       skb_trim(skb, b - skb->data);
+       nlmsg_trim(skb, b);
        return -1;
 }
 
index abc47cc..cc941d0 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/etherdevice.h>
 #include <linux/notifier.h>
 #include <net/ip.h>
+#include <net/netlink.h>
 #include <net/route.h>
 #include <linux/skbuff.h>
 #include <net/sock.h>
@@ -88,9 +89,9 @@ static __inline__ int route4_fastmap_hash(u32 id, int iif)
 static inline
 void route4_reset_fastmap(struct net_device *dev, struct route4_head *head, u32 id)
 {
-       spin_lock_bh(&dev->queue_lock);
+       qdisc_lock_tree(dev);
        memset(head->fastmap, 0, sizeof(head->fastmap));
-       spin_unlock_bh(&dev->queue_lock);
+       qdisc_unlock_tree(dev);
 }
 
 static inline void
@@ -562,7 +563,7 @@ static int route4_dump(struct tcf_proto *tp, unsigned long fh,
                       struct sk_buff *skb, struct tcmsg *t)
 {
        struct route4_filter *f = (struct route4_filter*)fh;
-       unsigned char    *b = skb->tail;
+       unsigned char *b = skb_tail_pointer(skb);
        struct rtattr *rta;
        u32 id;
 
@@ -591,7 +592,7 @@ static int route4_dump(struct tcf_proto *tp, unsigned long fh,
        if (tcf_exts_dump(skb, &f->exts, &route_ext_map) < 0)
                goto rtattr_failure;
 
-       rta->rta_len = skb->tail - b;
+       rta->rta_len = skb_tail_pointer(skb) - b;
 
        if (tcf_exts_dump_stats(skb, &f->exts, &route_ext_map) < 0)
                goto rtattr_failure;
@@ -599,7 +600,7 @@ static int route4_dump(struct tcf_proto *tp, unsigned long fh,
        return skb->len;
 
 rtattr_failure:
-       skb_trim(skb, b - skb->data);
+       nlmsg_trim(skb, b);
        return -1;
 }
 
index 1d4a1fb..0a683c0 100644 (file)
@@ -31,6 +31,7 @@
 #include <net/route.h>
 #include <linux/skbuff.h>
 #include <net/sock.h>
+#include <net/netlink.h>
 #include <net/act_api.h>
 #include <net/pkt_cls.h>
 
index 7853621..22f9ede 100644 (file)
@@ -143,9 +143,9 @@ static int rsvp_classify(struct sk_buff *skb, struct tcf_proto *tp,
        u8 tunnelid = 0;
        u8 *xprt;
 #if RSVP_DST_LEN == 4
-       struct ipv6hdr *nhptr = skb->nh.ipv6h;
+       struct ipv6hdr *nhptr = ipv6_hdr(skb);
 #else
-       struct iphdr *nhptr = skb->nh.iph;
+       struct iphdr *nhptr = ip_hdr(skb);
 #endif
 
 restart:
@@ -160,7 +160,7 @@ restart:
        dst = &nhptr->daddr;
        protocol = nhptr->protocol;
        xprt = ((u8*)nhptr) + (nhptr->ihl<<2);
-       if (nhptr->frag_off&__constant_htons(IP_MF|IP_OFFSET))
+       if (nhptr->frag_off & htons(IP_MF|IP_OFFSET))
                return -1;
 #endif
 
@@ -593,7 +593,7 @@ static int rsvp_dump(struct tcf_proto *tp, unsigned long fh,
 {
        struct rsvp_filter *f = (struct rsvp_filter*)fh;
        struct rsvp_session *s;
-       unsigned char    *b = skb->tail;
+       unsigned char *b = skb_tail_pointer(skb);
        struct rtattr *rta;
        struct tc_rsvp_pinfo pinfo;
 
@@ -623,14 +623,14 @@ static int rsvp_dump(struct tcf_proto *tp, unsigned long fh,
        if (tcf_exts_dump(skb, &f->exts, &rsvp_ext_map) < 0)
                goto rtattr_failure;
 
-       rta->rta_len = skb->tail - b;
+       rta->rta_len = skb_tail_pointer(skb) - b;
 
        if (tcf_exts_dump_stats(skb, &f->exts, &rsvp_ext_map) < 0)
                goto rtattr_failure;
        return skb->len;
 
 rtattr_failure:
-       skb_trim(skb, b - skb->data);
+       nlmsg_trim(skb, b);
        return -1;
 }
 
index a2979d8..93b6abe 100644 (file)
@@ -34,6 +34,7 @@
 #include <net/sock.h>
 #include <net/act_api.h>
 #include <net/pkt_cls.h>
+#include <net/netlink.h>
 
 #define RSVP_DST_LEN   4
 #define RSVP_ID                "rsvp6"
index 7563fdc..47ac0c5 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/netdevice.h>
 #include <net/ip.h>
 #include <net/act_api.h>
+#include <net/netlink.h>
 #include <net/pkt_cls.h>
 #include <net/route.h>
 
@@ -448,7 +449,7 @@ static int tcindex_dump(struct tcf_proto *tp, unsigned long fh,
 {
        struct tcindex_data *p = PRIV(tp);
        struct tcindex_filter_result *r = (struct tcindex_filter_result *) fh;
-       unsigned char *b = skb->tail;
+       unsigned char *b = skb_tail_pointer(skb);
        struct rtattr *rta;
 
        DPRINTK("tcindex_dump(tp %p,fh 0x%lx,skb %p,t %p),p %p,r %p,b %p\n",
@@ -463,7 +464,7 @@ static int tcindex_dump(struct tcf_proto *tp, unsigned long fh,
                RTA_PUT(skb,TCA_TCINDEX_SHIFT,sizeof(p->shift),&p->shift);
                RTA_PUT(skb,TCA_TCINDEX_FALL_THROUGH,sizeof(p->fall_through),
                    &p->fall_through);
-               rta->rta_len = skb->tail-b;
+               rta->rta_len = skb_tail_pointer(skb) - b;
        } else {
                if (p->perfect) {
                        t->tcm_handle = r-p->perfect;
@@ -486,7 +487,7 @@ static int tcindex_dump(struct tcf_proto *tp, unsigned long fh,
 
                if (tcf_exts_dump(skb, &r->exts, &tcindex_ext_map) < 0)
                        goto rtattr_failure;
-               rta->rta_len = skb->tail-b;
+               rta->rta_len = skb_tail_pointer(skb) - b;
 
                if (tcf_exts_dump_stats(skb, &r->exts, &tcindex_ext_map) < 0)
                        goto rtattr_failure;
@@ -495,7 +496,7 @@ static int tcindex_dump(struct tcf_proto *tp, unsigned long fh,
        return skb->len;
 
 rtattr_failure:
-       skb_trim(skb, b - skb->data);
+       nlmsg_trim(skb, b);
        return -1;
 }
 
index 0bcb169..c7a347b 100644 (file)
@@ -50,6 +50,7 @@
 #include <linux/notifier.h>
 #include <linux/rtnetlink.h>
 #include <net/ip.h>
+#include <net/netlink.h>
 #include <net/route.h>
 #include <linux/skbuff.h>
 #include <net/sock.h>
@@ -119,7 +120,7 @@ static int u32_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_re
        } stack[TC_U32_MAXDEPTH];
 
        struct tc_u_hnode *ht = (struct tc_u_hnode*)tp->root;
-       u8 *ptr = skb->nh.raw;
+       u8 *ptr = skb_network_header(skb);
        struct tc_u_knode *n;
        int sdepth = 0;
        int off2 = 0;
@@ -213,7 +214,7 @@ check_terminal:
                        off2 = 0;
                }
 
-               if (ptr < skb->tail)
+               if (ptr < skb_tail_pointer(skb))
                        goto next_ht;
        }
 
@@ -435,7 +436,7 @@ static void u32_destroy(struct tcf_proto *tp)
                        BUG_TRAP(ht->refcnt == 0);
 
                        kfree(ht);
-               };
+               }
 
                kfree(tp_c);
        }
@@ -718,7 +719,7 @@ static int u32_dump(struct tcf_proto *tp, unsigned long fh,
                     struct sk_buff *skb, struct tcmsg *t)
 {
        struct tc_u_knode *n = (struct tc_u_knode*)fh;
-       unsigned char    *b = skb->tail;
+       unsigned char *b = skb_tail_pointer(skb);
        struct rtattr *rta;
 
        if (n == NULL)
@@ -765,14 +766,14 @@ static int u32_dump(struct tcf_proto *tp, unsigned long fh,
 #endif
        }
 
-       rta->rta_len = skb->tail - b;
+       rta->rta_len = skb_tail_pointer(skb) - b;
        if (TC_U32_KEY(n->handle))
                if (tcf_exts_dump_stats(skb, &n->exts, &u32_ext_map) < 0)
                        goto rtattr_failure;
        return skb->len;
 
 rtattr_failure:
-       skb_trim(skb, b - skb->data);
+       nlmsg_trim(skb, b);
        return -1;
 }
 
index cd0600c..0a2a7fe 100644 (file)
@@ -22,7 +22,7 @@ static int em_u32_match(struct sk_buff *skb, struct tcf_ematch *em,
                        struct tcf_pkt_info *info)
 {
        struct tc_u32_key *key = (struct tc_u32_key *) em->data;
-       unsigned char *ptr = skb->nh.raw;
+       const unsigned char *ptr = skb_network_header(skb);
 
        if (info) {
                if (info->ptr)
index 959c306..63146d3 100644 (file)
@@ -418,17 +418,19 @@ void tcf_em_tree_destroy(struct tcf_proto *tp, struct tcf_ematch_tree *tree)
 int tcf_em_tree_dump(struct sk_buff *skb, struct tcf_ematch_tree *tree, int tlv)
 {
        int i;
-       struct rtattr * top_start = (struct rtattr*) skb->tail;
-       struct rtattr * list_start;
+       u8 *tail;
+       struct rtattr *top_start = (struct rtattr *)skb_tail_pointer(skb);
+       struct rtattr *list_start;
 
        RTA_PUT(skb, tlv, 0, NULL);
        RTA_PUT(skb, TCA_EMATCH_TREE_HDR, sizeof(tree->hdr), &tree->hdr);
 
-       list_start = (struct rtattr *) skb->tail;
+       list_start = (struct rtattr *)skb_tail_pointer(skb);
        RTA_PUT(skb, TCA_EMATCH_TREE_LIST, 0, NULL);
 
+       tail = skb_tail_pointer(skb);
        for (i = 0; i < tree->hdr.nmatches; i++) {
-               struct rtattr *match_start = (struct rtattr*) skb->tail;
+               struct rtattr *match_start = (struct rtattr *)tail;
                struct tcf_ematch *em = tcf_em_get_match(tree, i);
                struct tcf_ematch_hdr em_hdr = {
                        .kind = em->ops ? em->ops->kind : TCF_EM_CONTAINER,
@@ -447,11 +449,12 @@ int tcf_em_tree_dump(struct sk_buff *skb, struct tcf_ematch_tree *tree, int tlv)
                } else if (em->datalen > 0)
                        RTA_PUT_NOHDR(skb, em->datalen, (void *) em->data);
 
-               match_start->rta_len = skb->tail - (u8*) match_start;
+               tail = skb_tail_pointer(skb);
+               match_start->rta_len = tail - (u8 *)match_start;
        }
 
-       list_start->rta_len = skb->tail - (u8 *) list_start;
-       top_start->rta_len = skb->tail - (u8 *) top_start;
+       list_start->rta_len = tail - (u8 *)list_start;
+       top_start->rta_len = tail - (u8 *)top_start;
 
        return 0;
 
index ecc988a..8699e70 100644 (file)
 #include <linux/interrupt.h>
 #include <linux/netdevice.h>
 #include <linux/skbuff.h>
-#include <linux/rtnetlink.h>
 #include <linux/init.h>
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
 #include <linux/kmod.h>
 #include <linux/list.h>
 #include <linux/bitops.h>
+#include <linux/hrtimer.h>
 
+#include <net/netlink.h>
 #include <net/sock.h>
 #include <net/pkt_sched.h>
 
@@ -190,7 +191,7 @@ int unregister_qdisc(struct Qdisc_ops *qops)
    (root qdisc, all its children, children of children etc.)
  */
 
-static struct Qdisc *__qdisc_lookup(struct net_device *dev, u32 handle)
+struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
 {
        struct Qdisc *q;
 
@@ -201,16 +202,6 @@ static struct Qdisc *__qdisc_lookup(struct net_device *dev, u32 handle)
        return NULL;
 }
 
-struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
-{
-       struct Qdisc *q;
-
-       read_lock(&qdisc_tree_lock);
-       q = __qdisc_lookup(dev, handle);
-       read_unlock(&qdisc_tree_lock);
-       return q;
-}
-
 static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
 {
        unsigned long cl;
@@ -291,6 +282,48 @@ void qdisc_put_rtab(struct qdisc_rate_table *tab)
        }
 }
 
+static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
+{
+       struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog,
+                                                timer);
+       struct net_device *dev = wd->qdisc->dev;
+
+       wd->qdisc->flags &= ~TCQ_F_THROTTLED;
+       smp_wmb();
+       if (spin_trylock(&dev->queue_lock)) {
+               qdisc_run(dev);
+               spin_unlock(&dev->queue_lock);
+       } else
+               netif_schedule(dev);
+
+       return HRTIMER_NORESTART;
+}
+
+void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc)
+{
+       hrtimer_init(&wd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+       wd->timer.function = qdisc_watchdog;
+       wd->qdisc = qdisc;
+}
+EXPORT_SYMBOL(qdisc_watchdog_init);
+
+void qdisc_watchdog_schedule(struct qdisc_watchdog *wd, psched_time_t expires)
+{
+       ktime_t time;
+
+       wd->qdisc->flags |= TCQ_F_THROTTLED;
+       time = ktime_set(0, 0);
+       time = ktime_add_ns(time, PSCHED_US2NS(expires));
+       hrtimer_start(&wd->timer, time, HRTIMER_MODE_ABS);
+}
+EXPORT_SYMBOL(qdisc_watchdog_schedule);
+
+void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
+{
+       hrtimer_cancel(&wd->timer);
+       wd->qdisc->flags &= ~TCQ_F_THROTTLED;
+}
+EXPORT_SYMBOL(qdisc_watchdog_cancel);
 
 /* Allocate an unique handle from space managed by kernel */
 
@@ -362,7 +395,7 @@ void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
        if (n == 0)
                return;
        while ((parentid = sch->parent)) {
-               sch = __qdisc_lookup(sch->dev, TC_H_MAJ(parentid));
+               sch = qdisc_lookup(sch->dev, TC_H_MAJ(parentid));
                cops = sch->ops->cl_ops;
                if (cops->qlen_notify) {
                        cl = cops->get(sch, parentid);
@@ -467,12 +500,16 @@ qdisc_create(struct net_device *dev, u32 handle, struct rtattr **tca, int *errp)
 
        if (handle == TC_H_INGRESS) {
                sch->flags |= TCQ_F_INGRESS;
+               sch->stats_lock = &dev->ingress_lock;
                handle = TC_H_MAKE(TC_H_INGRESS, 0);
-       } else if (handle == 0) {
-               handle = qdisc_alloc_handle(dev);
-               err = -ENOMEM;
-               if (handle == 0)
-                       goto err_out3;
+       } else {
+               sch->stats_lock = &dev->queue_lock;
+               if (handle == 0) {
+                       handle = qdisc_alloc_handle(dev);
+                       err = -ENOMEM;
+                       if (handle == 0)
+                               goto err_out3;
+               }
        }
 
        sch->handle = handle;
@@ -621,9 +658,9 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
                        return err;
                if (q) {
                        qdisc_notify(skb, n, clid, q, NULL);
-                       spin_lock_bh(&dev->queue_lock);
+                       qdisc_lock_tree(dev);
                        qdisc_destroy(q);
-                       spin_unlock_bh(&dev->queue_lock);
+                       qdisc_unlock_tree(dev);
                }
        } else {
                qdisc_notify(skb, n, clid, NULL, q);
@@ -756,17 +793,17 @@ graft:
                err = qdisc_graft(dev, p, clid, q, &old_q);
                if (err) {
                        if (q) {
-                               spin_lock_bh(&dev->queue_lock);
+                               qdisc_lock_tree(dev);
                                qdisc_destroy(q);
-                               spin_unlock_bh(&dev->queue_lock);
+                               qdisc_unlock_tree(dev);
                        }
                        return err;
                }
                qdisc_notify(skb, n, clid, old_q, q);
                if (old_q) {
-                       spin_lock_bh(&dev->queue_lock);
+                       qdisc_lock_tree(dev);
                        qdisc_destroy(old_q);
-                       spin_unlock_bh(&dev->queue_lock);
+                       qdisc_unlock_tree(dev);
                }
        }
        return 0;
@@ -777,7 +814,7 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
 {
        struct tcmsg *tcm;
        struct nlmsghdr  *nlh;
-       unsigned char    *b = skb->tail;
+       unsigned char *b = skb_tail_pointer(skb);
        struct gnet_dump d;
 
        nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags);
@@ -811,12 +848,12 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
        if (gnet_stats_finish_copy(&d) < 0)
                goto rtattr_failure;
 
-       nlh->nlmsg_len = skb->tail - b;
+       nlh->nlmsg_len = skb_tail_pointer(skb) - b;
        return skb->len;
 
 nlmsg_failure:
 rtattr_failure:
-       skb_trim(skb, b - skb->data);
+       nlmsg_trim(skb, b);
        return -1;
 }
 
@@ -862,7 +899,6 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
                        continue;
                if (idx > s_idx)
                        s_q_idx = 0;
-               read_lock(&qdisc_tree_lock);
                q_idx = 0;
                list_for_each_entry(q, &dev->qdisc_list, list) {
                        if (q_idx < s_q_idx) {
@@ -870,13 +906,10 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
                                continue;
                        }
                        if (tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).pid,
-                                         cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0) {
-                               read_unlock(&qdisc_tree_lock);
+                                         cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0)
                                goto done;
-                       }
                        q_idx++;
                }
-               read_unlock(&qdisc_tree_lock);
        }
 
 done:
@@ -1015,7 +1048,7 @@ static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
 {
        struct tcmsg *tcm;
        struct nlmsghdr  *nlh;
-       unsigned char    *b = skb->tail;
+       unsigned char *b = skb_tail_pointer(skb);
        struct gnet_dump d;
        struct Qdisc_class_ops *cl_ops = q->ops->cl_ops;
 
@@ -1040,12 +1073,12 @@ static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
        if (gnet_stats_finish_copy(&d) < 0)
                goto rtattr_failure;
 
-       nlh->nlmsg_len = skb->tail - b;
+       nlh->nlmsg_len = skb_tail_pointer(skb) - b;
        return skb->len;
 
 nlmsg_failure:
 rtattr_failure:
-       skb_trim(skb, b - skb->data);
+       nlmsg_trim(skb, b);
        return -1;
 }
 
@@ -1099,7 +1132,6 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
        s_t = cb->args[0];
        t = 0;
 
-       read_lock(&qdisc_tree_lock);
        list_for_each_entry(q, &dev->qdisc_list, list) {
                if (t < s_t || !q->ops->cl_ops ||
                    (tcm->tcm_parent &&
@@ -1121,7 +1153,6 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
                        break;
                t++;
        }
-       read_unlock(&qdisc_tree_lock);
 
        cb->args[0] = t;
 
@@ -1146,7 +1177,7 @@ reclassify:
 
        for ( ; tp; tp = tp->next) {
                if ((tp->protocol == protocol ||
-                       tp->protocol == __constant_htons(ETH_P_ALL)) &&
+                       tp->protocol == htons(ETH_P_ALL)) &&
                        (err = tp->classify(skb, tp, res)) >= 0) {
 #ifdef CONFIG_NET_CLS_ACT
                        if ( TC_ACT_RECLASSIFY == err) {
@@ -1175,15 +1206,31 @@ reclassify:
        return -1;
 }
 
-static int psched_us_per_tick = 1;
-static int psched_tick_per_us = 1;
+void tcf_destroy(struct tcf_proto *tp)
+{
+       tp->ops->destroy(tp);
+       module_put(tp->ops->owner);
+       kfree(tp);
+}
+
+void tcf_destroy_chain(struct tcf_proto *fl)
+{
+       struct tcf_proto *tp;
+
+       while ((tp = fl) != NULL) {
+               fl = tp->next;
+               tcf_destroy(tp);
+       }
+}
+EXPORT_SYMBOL(tcf_destroy_chain);
 
 #ifdef CONFIG_PROC_FS
 static int psched_show(struct seq_file *seq, void *v)
 {
        seq_printf(seq, "%08x %08x %08x %08x\n",
-                     psched_tick_per_us, psched_us_per_tick,
-                     1000000, HZ);
+                  (u32)NSEC_PER_USEC, (u32)PSCHED_US2NS(1),
+                  1000000,
+                  (u32)NSEC_PER_SEC/(u32)ktime_to_ns(KTIME_MONOTONIC_RES));
 
        return 0;
 }
@@ -1202,101 +1249,19 @@ static const struct file_operations psched_fops = {
 };
 #endif
 
-#ifdef CONFIG_NET_SCH_CLK_CPU
-psched_tdiff_t psched_clock_per_hz;
-int psched_clock_scale;
-EXPORT_SYMBOL(psched_clock_per_hz);
-EXPORT_SYMBOL(psched_clock_scale);
-
-psched_time_t psched_time_base;
-cycles_t psched_time_mark;
-EXPORT_SYMBOL(psched_time_mark);
-EXPORT_SYMBOL(psched_time_base);
-
-/*
- * Periodically adjust psched_time_base to avoid overflow
- * with 32-bit get_cycles(). Safe up to 4GHz CPU.
- */
-static void psched_tick(unsigned long);
-static DEFINE_TIMER(psched_timer, psched_tick, 0, 0);
-
-static void psched_tick(unsigned long dummy)
-{
-       if (sizeof(cycles_t) == sizeof(u32)) {
-               psched_time_t dummy_stamp;
-               PSCHED_GET_TIME(dummy_stamp);
-               psched_timer.expires = jiffies + 1*HZ;
-               add_timer(&psched_timer);
-       }
-}
-
-int __init psched_calibrate_clock(void)
-{
-       psched_time_t stamp, stamp1;
-       struct timeval tv, tv1;
-       psched_tdiff_t delay;
-       long rdelay;
-       unsigned long stop;
-
-       psched_tick(0);
-       stop = jiffies + HZ/10;
-       PSCHED_GET_TIME(stamp);
-       do_gettimeofday(&tv);
-       while (time_before(jiffies, stop)) {
-               barrier();
-               cpu_relax();
-       }
-       PSCHED_GET_TIME(stamp1);
-       do_gettimeofday(&tv1);
-
-       delay = PSCHED_TDIFF(stamp1, stamp);
-       rdelay = tv1.tv_usec - tv.tv_usec;
-       rdelay += (tv1.tv_sec - tv.tv_sec)*1000000;
-       if (rdelay > delay)
-               return -1;
-       delay /= rdelay;
-       psched_tick_per_us = delay;
-       while ((delay>>=1) != 0)
-               psched_clock_scale++;
-       psched_us_per_tick = 1<<psched_clock_scale;
-       psched_clock_per_hz = (psched_tick_per_us*(1000000/HZ))>>psched_clock_scale;
-       return 0;
-}
-#endif
-
 static int __init pktsched_init(void)
 {
-       struct rtnetlink_link *link_p;
-
-#ifdef CONFIG_NET_SCH_CLK_CPU
-       if (psched_calibrate_clock() < 0)
-               return -1;
-#elif defined(CONFIG_NET_SCH_CLK_JIFFIES)
-       psched_tick_per_us = HZ<<PSCHED_JSCALE;
-       psched_us_per_tick = 1000000;
-#endif
-
-       link_p = rtnetlink_links[PF_UNSPEC];
-
-       /* Setup rtnetlink links. It is made here to avoid
-          exporting large number of public symbols.
-        */
-
-       if (link_p) {
-               link_p[RTM_NEWQDISC-RTM_BASE].doit = tc_modify_qdisc;
-               link_p[RTM_DELQDISC-RTM_BASE].doit = tc_get_qdisc;
-               link_p[RTM_GETQDISC-RTM_BASE].doit = tc_get_qdisc;
-               link_p[RTM_GETQDISC-RTM_BASE].dumpit = tc_dump_qdisc;
-               link_p[RTM_NEWTCLASS-RTM_BASE].doit = tc_ctl_tclass;
-               link_p[RTM_DELTCLASS-RTM_BASE].doit = tc_ctl_tclass;
-               link_p[RTM_GETTCLASS-RTM_BASE].doit = tc_ctl_tclass;
-               link_p[RTM_GETTCLASS-RTM_BASE].dumpit = tc_dump_tclass;
-       }
-
        register_qdisc(&pfifo_qdisc_ops);
        register_qdisc(&bfifo_qdisc_ops);
        proc_net_fops_create("psched", 0, &psched_fops);
 
+       rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL);
+       rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL);
+       rtnl_register(PF_UNSPEC, RTM_GETQDISC, tc_get_qdisc, tc_dump_qdisc);
+       rtnl_register(PF_UNSPEC, RTM_NEWTCLASS, tc_ctl_tclass, NULL);
+       rtnl_register(PF_UNSPEC, RTM_DELTCLASS, tc_ctl_tclass, NULL);
+       rtnl_register(PF_UNSPEC, RTM_GETTCLASS, tc_ctl_tclass, tc_dump_tclass);
+
        return 0;
 }
 
index afb3bbd..be7d299 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/netdevice.h>
 #include <linux/rtnetlink.h>
 #include <linux/file.h> /* for fput */
+#include <net/netlink.h>
 #include <net/pkt_sched.h>
 #include <net/sock.h>
 
@@ -157,19 +158,6 @@ static unsigned long atm_tc_bind_filter(struct Qdisc *sch,
        return atm_tc_get(sch,classid);
 }
 
-
-static void destroy_filters(struct atm_flow_data *flow)
-{
-       struct tcf_proto *filter;
-
-       while ((filter = flow->filter_list)) {
-               DPRINTK("destroy_filters: destroying filter %p\n",filter);
-               flow->filter_list = filter->next;
-               tcf_destroy(filter);
-       }
-}
-
-
 /*
  * atm_tc_put handles all destructions, including the ones that are explicitly
  * requested (atm_tc_destroy, etc.). The assumption here is that we never drop
@@ -194,7 +182,7 @@ static void atm_tc_put(struct Qdisc *sch, unsigned long cl)
        *prev = flow->next;
        DPRINTK("atm_tc_put: qdisc %p\n",flow->q);
        qdisc_destroy(flow->q);
-       destroy_filters(flow);
+       tcf_destroy_chain(flow->filter_list);
        if (flow->sock) {
                DPRINTK("atm_tc_put: f_count %d\n",
                    file_count(flow->sock->file));
@@ -503,7 +491,7 @@ static void sch_atm_dequeue(unsigned long data)
                        }
                        D2PRINTK("atm_tc_dequeue: sending on class %p\n",flow);
                        /* remove any LL header somebody else has attached */
-                       skb_pull(skb,(char *) skb->nh.iph-(char *) skb->data);
+                       skb_pull(skb, skb_network_offset(skb));
                        if (skb_headroom(skb) < flow->hdr_len) {
                                struct sk_buff *new;
 
@@ -513,7 +501,7 @@ static void sch_atm_dequeue(unsigned long data)
                                skb = new;
                        }
                        D2PRINTK("sch_atm_dequeue: ip %p, data %p\n",
-                           skb->nh.iph,skb->data);
+                                skb_network_header(skb), skb->data);
                        ATM_SKB(skb)->vcc = flow->vcc;
                        memcpy(skb_push(skb,flow->hdr_len),flow->hdr,
                            flow->hdr_len);
@@ -610,7 +598,7 @@ static void atm_tc_destroy(struct Qdisc *sch)
        DPRINTK("atm_tc_destroy(sch %p,[qdisc %p])\n",sch,p);
        /* races ? */
        while ((flow = p->flows)) {
-               destroy_filters(flow);
+               tcf_destroy_chain(flow->filter_list);
                if (flow->ref > 1)
                        printk(KERN_ERR "atm_destroy: %p->ref = %d\n",flow,
                            flow->ref);
@@ -631,7 +619,7 @@ static int atm_tc_dump_class(struct Qdisc *sch, unsigned long cl,
 {
        struct atm_qdisc_data *p = PRIV(sch);
        struct atm_flow_data *flow = (struct atm_flow_data *) cl;
-       unsigned char *b = skb->tail;
+       unsigned char *b = skb_tail_pointer(skb);
        struct rtattr *rta;
 
        DPRINTK("atm_tc_dump_class(sch %p,[qdisc %p],flow %p,skb %p,tcm %p)\n",
@@ -661,11 +649,11 @@ static int atm_tc_dump_class(struct Qdisc *sch, unsigned long cl,
 
                RTA_PUT(skb,TCA_ATM_EXCESS,sizeof(zero),&zero);
        }
-       rta->rta_len = skb->tail-b;
+       rta->rta_len = skb_tail_pointer(skb) - b;
        return skb->len;
 
 rtattr_failure:
-       skb_trim(skb,b-skb->data);
+       nlmsg_trim(skb, b);
        return -1;
 }
 static int
index 76c92e7..a294542 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/etherdevice.h>
 #include <linux/notifier.h>
 #include <net/ip.h>
+#include <net/netlink.h>
 #include <net/route.h>
 #include <linux/skbuff.h>
 #include <net/sock.h>
@@ -112,7 +113,7 @@ struct cbq_class
 
        /* Overlimit strategy parameters */
        void                    (*overlimit)(struct cbq_class *cl);
-       long                    penalty;
+       psched_tdiff_t          penalty;
 
        /* General scheduler (WRR) parameters */
        long                    allot;
@@ -143,7 +144,7 @@ struct cbq_class
        psched_time_t           undertime;
        long                    avgidle;
        long                    deficit;        /* Saved deficit for WRR */
-       unsigned long           penalized;
+       psched_time_t           penalized;
        struct gnet_stats_basic bstats;
        struct gnet_stats_queue qstats;
        struct gnet_stats_rate_est rate_est;
@@ -180,12 +181,12 @@ struct cbq_sched_data
        psched_time_t           now_rt;         /* Cached real time */
        unsigned                pmask;
 
-       struct timer_list       delay_timer;
-       struct timer_list       wd_timer;       /* Watchdog timer,
+       struct hrtimer          delay_timer;
+       struct qdisc_watchdog   watchdog;       /* Watchdog timer,
                                                   started when CBQ has
                                                   backlog, but cannot
                                                   transmit just now */
-       long                    wd_expires;
+       psched_tdiff_t          wd_expires;
        int                     toplevel;
        u32                     hgenerator;
 };
@@ -384,12 +385,12 @@ cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl)
                psched_time_t now;
                psched_tdiff_t incr;
 
-               PSCHED_GET_TIME(now);
-               incr = PSCHED_TDIFF(now, q->now_rt);
-               PSCHED_TADD2(q->now, incr, now);
+               now = psched_get_time();
+               incr = now - q->now_rt;
+               now = q->now + incr;
 
                do {
-                       if (PSCHED_TLESS(cl->undertime, now)) {
+                       if (cl->undertime < now) {
                                q->toplevel = cl->level;
                                return;
                        }
@@ -473,7 +474,7 @@ cbq_requeue(struct sk_buff *skb, struct Qdisc *sch)
 static void cbq_ovl_classic(struct cbq_class *cl)
 {
        struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
-       psched_tdiff_t delay = PSCHED_TDIFF(cl->undertime, q->now);
+       psched_tdiff_t delay = cl->undertime - q->now;
 
        if (!cl->delayed) {
                delay += cl->offtime;
@@ -491,7 +492,7 @@ static void cbq_ovl_classic(struct cbq_class *cl)
                        cl->avgidle = cl->minidle;
                if (delay <= 0)
                        delay = 1;
-               PSCHED_TADD2(q->now, delay, cl->undertime);
+               cl->undertime = q->now + delay;
 
                cl->xstats.overactions++;
                cl->delayed = 1;
@@ -508,7 +509,7 @@ static void cbq_ovl_classic(struct cbq_class *cl)
                psched_tdiff_t base_delay = q->wd_expires;
 
                for (b = cl->borrow; b; b = b->borrow) {
-                       delay = PSCHED_TDIFF(b->undertime, q->now);
+                       delay = b->undertime - q->now;
                        if (delay < base_delay) {
                                if (delay <= 0)
                                        delay = 1;
@@ -546,27 +547,32 @@ static void cbq_ovl_rclassic(struct cbq_class *cl)
 static void cbq_ovl_delay(struct cbq_class *cl)
 {
        struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
-       psched_tdiff_t delay = PSCHED_TDIFF(cl->undertime, q->now);
+       psched_tdiff_t delay = cl->undertime - q->now;
 
        if (!cl->delayed) {
-               unsigned long sched = jiffies;
+               psched_time_t sched = q->now;
+               ktime_t expires;
 
                delay += cl->offtime;
                if (cl->avgidle < 0)
                        delay -= (-cl->avgidle) - ((-cl->avgidle) >> cl->ewma_log);
                if (cl->avgidle < cl->minidle)
                        cl->avgidle = cl->minidle;
-               PSCHED_TADD2(q->now, delay, cl->undertime);
+               cl->undertime = q->now + delay;
 
                if (delay > 0) {
-                       sched += PSCHED_US2JIFFIE(delay) + cl->penalty;
+                       sched += delay + cl->penalty;
                        cl->penalized = sched;
                        cl->cpriority = TC_CBQ_MAXPRIO;
                        q->pmask |= (1<<TC_CBQ_MAXPRIO);
-                       if (del_timer(&q->delay_timer) &&
-                           (long)(q->delay_timer.expires - sched) > 0)
-                               q->delay_timer.expires = sched;
-                       add_timer(&q->delay_timer);
+
+                       expires = ktime_set(0, 0);
+                       expires = ktime_add_ns(expires, PSCHED_US2NS(sched));
+                       if (hrtimer_try_to_cancel(&q->delay_timer) &&
+                           ktime_to_ns(ktime_sub(q->delay_timer.expires,
+                                                 expires)) > 0)
+                               q->delay_timer.expires = expires;
+                       hrtimer_restart(&q->delay_timer);
                        cl->delayed = 1;
                        cl->xstats.overactions++;
                        return;
@@ -583,7 +589,7 @@ static void cbq_ovl_lowprio(struct cbq_class *cl)
 {
        struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
 
-       cl->penalized = jiffies + cl->penalty;
+       cl->penalized = q->now + cl->penalty;
 
        if (cl->cpriority != cl->priority2) {
                cl->cpriority = cl->priority2;
@@ -604,27 +610,19 @@ static void cbq_ovl_drop(struct cbq_class *cl)
        cbq_ovl_classic(cl);
 }
 
-static void cbq_watchdog(unsigned long arg)
-{
-       struct Qdisc *sch = (struct Qdisc*)arg;
-
-       sch->flags &= ~TCQ_F_THROTTLED;
-       netif_schedule(sch->dev);
-}
-
-static unsigned long cbq_undelay_prio(struct cbq_sched_data *q, int prio)
+static psched_tdiff_t cbq_undelay_prio(struct cbq_sched_data *q, int prio,
+                                      psched_time_t now)
 {
        struct cbq_class *cl;
        struct cbq_class *cl_prev = q->active[prio];
-       unsigned long now = jiffies;
-       unsigned long sched = now;
+       psched_time_t sched = now;
 
        if (cl_prev == NULL)
-               return now;
+               return 0;
 
        do {
                cl = cl_prev->next_alive;
-               if ((long)(now - cl->penalized) > 0) {
+               if (now - cl->penalized > 0) {
                        cl_prev->next_alive = cl->next_alive;
                        cl->next_alive = NULL;
                        cl->cpriority = cl->priority;
@@ -640,30 +638,34 @@ static unsigned long cbq_undelay_prio(struct cbq_sched_data *q, int prio)
                        }
 
                        cl = cl_prev->next_alive;
-               } else if ((long)(sched - cl->penalized) > 0)
+               } else if (sched - cl->penalized > 0)
                        sched = cl->penalized;
        } while ((cl_prev = cl) != q->active[prio]);
 
-       return (long)(sched - now);
+       return sched - now;
 }
 
-static void cbq_undelay(unsigned long arg)
+static enum hrtimer_restart cbq_undelay(struct hrtimer *timer)
 {
-       struct Qdisc *sch = (struct Qdisc*)arg;
-       struct cbq_sched_data *q = qdisc_priv(sch);
-       long delay = 0;
+       struct cbq_sched_data *q = container_of(timer, struct cbq_sched_data,
+                                               delay_timer);
+       struct Qdisc *sch = q->watchdog.qdisc;
+       psched_time_t now;
+       psched_tdiff_t delay = 0;
        unsigned pmask;
 
+       now = psched_get_time();
+
        pmask = q->pmask;
        q->pmask = 0;
 
        while (pmask) {
                int prio = ffz(~pmask);
-               long tmp;
+               psched_tdiff_t tmp;
 
                pmask &= ~(1<<prio);
 
-               tmp = cbq_undelay_prio(q, prio);
+               tmp = cbq_undelay_prio(q, prio, now);
                if (tmp > 0) {
                        q->pmask |= 1<<prio;
                        if (tmp < delay || delay == 0)
@@ -672,12 +674,16 @@ static void cbq_undelay(unsigned long arg)
        }
 
        if (delay) {
-               q->delay_timer.expires = jiffies + delay;
-               add_timer(&q->delay_timer);
+               ktime_t time;
+
+               time = ktime_set(0, 0);
+               time = ktime_add_ns(time, PSCHED_US2NS(now + delay));
+               hrtimer_start(&q->delay_timer, time, HRTIMER_MODE_ABS);
        }
 
        sch->flags &= ~TCQ_F_THROTTLED;
        netif_schedule(sch->dev);
+       return HRTIMER_NORESTART;
 }
 
 
@@ -732,7 +738,7 @@ cbq_update_toplevel(struct cbq_sched_data *q, struct cbq_class *cl,
        if (cl && q->toplevel >= borrowed->level) {
                if (cl->q->q.qlen > 1) {
                        do {
-                               if (PSCHED_IS_PASTPERFECT(borrowed->undertime)) {
+                               if (borrowed->undertime == PSCHED_PASTPERFECT) {
                                        q->toplevel = borrowed->level;
                                        return;
                                }
@@ -770,7 +776,7 @@ cbq_update(struct cbq_sched_data *q)
                         idle = (now - last) - last_pktlen/rate
                 */
 
-               idle = PSCHED_TDIFF(q->now, cl->last);
+               idle = q->now - cl->last;
                if ((unsigned long)idle > 128*1024*1024) {
                        avgidle = cl->maxidle;
                } else {
@@ -814,13 +820,11 @@ cbq_update(struct cbq_sched_data *q)
                        idle -= L2T(&q->link, len);
                        idle += L2T(cl, len);
 
-                       PSCHED_AUDIT_TDIFF(idle);
-
-                       PSCHED_TADD2(q->now, idle, cl->undertime);
+                       cl->undertime = q->now + idle;
                } else {
                        /* Underlimit */
 
-                       PSCHED_SET_PASTPERFECT(cl->undertime);
+                       cl->undertime = PSCHED_PASTPERFECT;
                        if (avgidle > cl->maxidle)
                                cl->avgidle = cl->maxidle;
                        else
@@ -841,8 +845,7 @@ cbq_under_limit(struct cbq_class *cl)
        if (cl->tparent == NULL)
                return cl;
 
-       if (PSCHED_IS_PASTPERFECT(cl->undertime) ||
-           !PSCHED_TLESS(q->now, cl->undertime)) {
+       if (cl->undertime == PSCHED_PASTPERFECT || q->now >= cl->undertime) {
                cl->delayed = 0;
                return cl;
        }
@@ -865,8 +868,7 @@ cbq_under_limit(struct cbq_class *cl)
                }
                if (cl->level > q->toplevel)
                        return NULL;
-       } while (!PSCHED_IS_PASTPERFECT(cl->undertime) &&
-                PSCHED_TLESS(q->now, cl->undertime));
+       } while (cl->undertime != PSCHED_PASTPERFECT && q->now < cl->undertime);
 
        cl->delayed = 0;
        return cl;
@@ -1001,8 +1003,8 @@ cbq_dequeue(struct Qdisc *sch)
        psched_time_t now;
        psched_tdiff_t incr;
 
-       PSCHED_GET_TIME(now);
-       incr = PSCHED_TDIFF(now, q->now_rt);
+       now = psched_get_time();
+       incr = now - q->now_rt;
 
        if (q->tx_class) {
                psched_tdiff_t incr2;
@@ -1014,12 +1016,12 @@ cbq_dequeue(struct Qdisc *sch)
                   cbq_time = max(real_time, work);
                 */
                incr2 = L2T(&q->link, q->tx_len);
-               PSCHED_TADD(q->now, incr2);
+               q->now += incr2;
                cbq_update(q);
                if ((incr -= incr2) < 0)
                        incr = 0;
        }
-       PSCHED_TADD(q->now, incr);
+       q->now += incr;
        q->now_rt = now;
 
        for (;;) {
@@ -1051,11 +1053,11 @@ cbq_dequeue(struct Qdisc *sch)
                */
 
                if (q->toplevel == TC_CBQ_MAXLEVEL &&
-                   PSCHED_IS_PASTPERFECT(q->link.undertime))
+                   q->link.undertime == PSCHED_PASTPERFECT)
                        break;
 
                q->toplevel = TC_CBQ_MAXLEVEL;
-               PSCHED_SET_PASTPERFECT(q->link.undertime);
+               q->link.undertime = PSCHED_PASTPERFECT;
        }
 
        /* No packets in scheduler or nobody wants to give them to us :-(
@@ -1063,13 +1065,9 @@ cbq_dequeue(struct Qdisc *sch)
 
        if (sch->q.qlen) {
                sch->qstats.overlimits++;
-               if (q->wd_expires) {
-                       long delay = PSCHED_US2JIFFIE(q->wd_expires);
-                       if (delay <= 0)
-                               delay = 1;
-                       mod_timer(&q->wd_timer, jiffies + delay);
-                       sch->flags |= TCQ_F_THROTTLED;
-               }
+               if (q->wd_expires)
+                       qdisc_watchdog_schedule(&q->watchdog,
+                                               now + q->wd_expires);
        }
        return NULL;
 }
@@ -1276,10 +1274,10 @@ cbq_reset(struct Qdisc* sch)
        q->pmask = 0;
        q->tx_class = NULL;
        q->tx_borrowed = NULL;
-       del_timer(&q->wd_timer);
-       del_timer(&q->delay_timer);
+       qdisc_watchdog_cancel(&q->watchdog);
+       hrtimer_cancel(&q->delay_timer);
        q->toplevel = TC_CBQ_MAXLEVEL;
-       PSCHED_GET_TIME(q->now);
+       q->now = psched_get_time();
        q->now_rt = q->now;
 
        for (prio = 0; prio <= TC_CBQ_MAXPRIO; prio++)
@@ -1290,7 +1288,7 @@ cbq_reset(struct Qdisc* sch)
                        qdisc_reset(cl->q);
 
                        cl->next_alive = NULL;
-                       PSCHED_SET_PASTPERFECT(cl->undertime);
+                       cl->undertime = PSCHED_PASTPERFECT;
                        cl->avgidle = cl->maxidle;
                        cl->deficit = cl->quantum;
                        cl->cpriority = cl->priority;
@@ -1379,7 +1377,7 @@ static int cbq_set_overlimit(struct cbq_class *cl, struct tc_cbq_ovl *ovl)
        default:
                return -EINVAL;
        }
-       cl->penalty = (ovl->penalty*HZ)/1000;
+       cl->penalty = ovl->penalty;
        return 0;
 }
 
@@ -1446,14 +1444,11 @@ static int cbq_init(struct Qdisc *sch, struct rtattr *opt)
        q->link.minidle = -0x7FFFFFFF;
        q->link.stats_lock = &sch->dev->queue_lock;
 
-       init_timer(&q->wd_timer);
-       q->wd_timer.data = (unsigned long)sch;
-       q->wd_timer.function = cbq_watchdog;
-       init_timer(&q->delay_timer);
-       q->delay_timer.data = (unsigned long)sch;
+       qdisc_watchdog_init(&q->watchdog, sch);
+       hrtimer_init(&q->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
        q->delay_timer.function = cbq_undelay;
        q->toplevel = TC_CBQ_MAXLEVEL;
-       PSCHED_GET_TIME(q->now);
+       q->now = psched_get_time();
        q->now_rt = q->now;
 
        cbq_link_class(&q->link);
@@ -1467,19 +1462,19 @@ static int cbq_init(struct Qdisc *sch, struct rtattr *opt)
 
 static __inline__ int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl)
 {
-       unsigned char    *b = skb->tail;
+       unsigned char *b = skb_tail_pointer(skb);
 
        RTA_PUT(skb, TCA_CBQ_RATE, sizeof(cl->R_tab->rate), &cl->R_tab->rate);
        return skb->len;
 
 rtattr_failure:
-       skb_trim(skb, b - skb->data);
+       nlmsg_trim(skb, b);
        return -1;
 }
 
 static __inline__ int cbq_dump_lss(struct sk_buff *skb, struct cbq_class *cl)
 {
-       unsigned char    *b = skb->tail;
+       unsigned char *b = skb_tail_pointer(skb);
        struct tc_cbq_lssopt opt;
 
        opt.flags = 0;
@@ -1498,13 +1493,13 @@ static __inline__ int cbq_dump_lss(struct sk_buff *skb, struct cbq_class *cl)
        return skb->len;
 
 rtattr_failure:
-       skb_trim(skb, b - skb->data);
+       nlmsg_trim(skb, b);
        return -1;
 }
 
 static __inline__ int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl)
 {
-       unsigned char    *b = skb->tail;
+       unsigned char *b = skb_tail_pointer(skb);
        struct tc_cbq_wrropt opt;
 
        opt.flags = 0;
@@ -1516,30 +1511,30 @@ static __inline__ int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl)
        return skb->len;
 
 rtattr_failure:
-       skb_trim(skb, b - skb->data);
+       nlmsg_trim(skb, b);
        return -1;
 }
 
 static __inline__ int cbq_dump_ovl(struct sk_buff *skb, struct cbq_class *cl)
 {
-       unsigned char    *b = skb->tail;
+       unsigned char *b = skb_tail_pointer(skb);
        struct tc_cbq_ovl opt;
 
        opt.strategy = cl->ovl_strategy;
        opt.priority2 = cl->priority2+1;
        opt.pad = 0;
-       opt.penalty = (cl->penalty*1000)/HZ;
+       opt.penalty = cl->penalty;
        RTA_PUT(skb, TCA_CBQ_OVL_STRATEGY, sizeof(opt), &opt);
        return skb->len;
 
 rtattr_failure:
-       skb_trim(skb, b - skb->data);
+       nlmsg_trim(skb, b);
        return -1;
 }
 
 static __inline__ int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl)
 {
-       unsigned char    *b = skb->tail;
+       unsigned char *b = skb_tail_pointer(skb);
        struct tc_cbq_fopt opt;
 
        if (cl->split || cl->defmap) {
@@ -1551,14 +1546,14 @@ static __inline__ int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl)
        return skb->len;
 
 rtattr_failure:
-       skb_trim(skb, b - skb->data);
+       nlmsg_trim(skb, b);
        return -1;
 }
 
 #ifdef CONFIG_NET_CLS_POLICE
 static __inline__ int cbq_dump_police(struct sk_buff *skb, struct cbq_class *cl)
 {
-       unsigned char    *b = skb->tail;
+       unsigned char *b = skb_tail_pointer(skb);
        struct tc_cbq_police opt;
 
        if (cl->police) {
@@ -1570,7 +1565,7 @@ static __inline__ int cbq_dump_police(struct sk_buff *skb, struct cbq_class *cl)
        return skb->len;
 
 rtattr_failure:
-       skb_trim(skb, b - skb->data);
+       nlmsg_trim(skb, b);
        return -1;
 }
 #endif
@@ -1592,18 +1587,18 @@ static int cbq_dump_attr(struct sk_buff *skb, struct cbq_class *cl)
 static int cbq_dump(struct Qdisc *sch, struct sk_buff *skb)
 {
        struct cbq_sched_data *q = qdisc_priv(sch);
-       unsigned char    *b = skb->tail;
+       unsigned char *b = skb_tail_pointer(skb);
        struct rtattr *rta;
 
        rta = (struct rtattr*)b;
        RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
        if (cbq_dump_attr(skb, &q->link) < 0)
                goto rtattr_failure;
-       rta->rta_len = skb->tail - b;
+       rta->rta_len = skb_tail_pointer(skb) - b;
        return skb->len;
 
 rtattr_failure:
-       skb_trim(skb, b - skb->data);
+       nlmsg_trim(skb, b);
        return -1;
 }
 
@@ -1621,7 +1616,7 @@ cbq_dump_class(struct Qdisc *sch, unsigned long arg,
               struct sk_buff *skb, struct tcmsg *tcm)
 {
        struct cbq_class *cl = (struct cbq_class*)arg;
-       unsigned char    *b = skb->tail;
+       unsigned char *b = skb_tail_pointer(skb);
        struct rtattr *rta;
 
        if (cl->tparent)
@@ -1635,11 +1630,11 @@ cbq_dump_class(struct Qdisc *sch, unsigned long arg,
        RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
        if (cbq_dump_attr(skb, cl) < 0)
                goto rtattr_failure;
-       rta->rta_len = skb->tail - b;
+       rta->rta_len = skb_tail_pointer(skb) - b;
        return skb->len;
 
 rtattr_failure:
-       skb_trim(skb, b - skb->data);
+       nlmsg_trim(skb, b);
        return -1;
 }
 
@@ -1654,8 +1649,8 @@ cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
        cl->xstats.avgidle = cl->avgidle;
        cl->xstats.undertime = 0;
 
-       if (!PSCHED_IS_PASTPERFECT(cl->undertime))
-               cl->xstats.undertime = PSCHED_TDIFF(cl->undertime, q->now);
+       if (cl->undertime != PSCHED_PASTPERFECT)
+               cl->xstats.undertime = cl->undertime - q->now;
 
        if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
 #ifdef CONFIG_NET_ESTIMATOR
@@ -1722,23 +1717,13 @@ static unsigned long cbq_get(struct Qdisc *sch, u32 classid)
        return 0;
 }
 
-static void cbq_destroy_filters(struct cbq_class *cl)
-{
-       struct tcf_proto *tp;
-
-       while ((tp = cl->filter_list) != NULL) {
-               cl->filter_list = tp->next;
-               tcf_destroy(tp);
-       }
-}
-
 static void cbq_destroy_class(struct Qdisc *sch, struct cbq_class *cl)
 {
        struct cbq_sched_data *q = qdisc_priv(sch);
 
        BUG_TRAP(!cl->filters);
 
-       cbq_destroy_filters(cl);
+       tcf_destroy_chain(cl->filter_list);
        qdisc_destroy(cl->q);
        qdisc_put_rtab(cl->R_tab);
 #ifdef CONFIG_NET_ESTIMATOR
@@ -1765,7 +1750,7 @@ cbq_destroy(struct Qdisc* sch)
         */
        for (h = 0; h < 16; h++)
                for (cl = q->classes[h]; cl; cl = cl->next)
-                       cbq_destroy_filters(cl);
+                       tcf_destroy_chain(cl->filter_list);
 
        for (h = 0; h < 16; h++) {
                struct cbq_class *next;
index 96324cf..3c6fd18 100644 (file)
@@ -216,17 +216,17 @@ static int dsmark_enqueue(struct sk_buff *skb,struct Qdisc *sch)
                /* FIXME: Safe with non-linear skbs? --RR */
                switch (skb->protocol) {
                        case __constant_htons(ETH_P_IP):
-                               skb->tc_index = ipv4_get_dsfield(skb->nh.iph)
+                               skb->tc_index = ipv4_get_dsfield(ip_hdr(skb))
                                        & ~INET_ECN_MASK;
                                break;
                        case __constant_htons(ETH_P_IPV6):
-                               skb->tc_index = ipv6_get_dsfield(skb->nh.ipv6h)
+                               skb->tc_index = ipv6_get_dsfield(ipv6_hdr(skb))
                                        & ~INET_ECN_MASK;
                                break;
                        default:
                                skb->tc_index = 0;
                                break;
-               };
+               }
        }
 
        if (TC_H_MAJ(skb->priority) == sch->handle)
@@ -257,7 +257,7 @@ static int dsmark_enqueue(struct sk_buff *skb,struct Qdisc *sch)
                                if (p->default_index != NO_DEFAULT_INDEX)
                                        skb->tc_index = p->default_index;
                                break;
-               };
+               }
        }
 
        err = p->q->enqueue(skb,p->q);
@@ -292,11 +292,11 @@ static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
 
        switch (skb->protocol) {
                case __constant_htons(ETH_P_IP):
-                       ipv4_change_dsfield(skb->nh.iph, p->mask[index],
+                       ipv4_change_dsfield(ip_hdr(skb), p->mask[index],
                                            p->value[index]);
                        break;
                case __constant_htons(ETH_P_IPV6):
-                       ipv6_change_dsfield(skb->nh.ipv6h, p->mask[index],
+                       ipv6_change_dsfield(ipv6_hdr(skb), p->mask[index],
                                            p->value[index]);
                        break;
                default:
@@ -310,7 +310,7 @@ static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
                                       "unsupported protocol %d\n",
                                       ntohs(skb->protocol));
                        break;
-       };
+       }
 
        return skb;
 }
@@ -412,16 +412,10 @@ static void dsmark_reset(struct Qdisc *sch)
 static void dsmark_destroy(struct Qdisc *sch)
 {
        struct dsmark_qdisc_data *p = PRIV(sch);
-       struct tcf_proto *tp;
 
        DPRINTK("dsmark_destroy(sch %p,[qdisc %p])\n", sch, p);
 
-       while (p->filter_list) {
-               tp = p->filter_list;
-               p->filter_list = tp->next;
-               tcf_destroy(tp);
-       }
-
+       tcf_destroy_chain(p->filter_list);
        qdisc_destroy(p->q);
        kfree(p->mask);
 }
index 52eb343..3385ee5 100644 (file)
 
 /* Main transmission queue. */
 
-/* Main qdisc structure lock.
-
-   However, modifications
-   to data, participating in scheduling must be additionally
-   protected with dev->queue_lock spinlock.
-
-   The idea is the following:
-   - enqueue, dequeue are serialized via top level device
-     spinlock dev->queue_lock.
-   - tree walking is protected by read_lock(qdisc_tree_lock)
-     and this lock is used only in process context.
-   - updates to tree are made only under rtnl semaphore,
-     hence this lock may be made without local bh disabling.
-
-   qdisc_tree_lock must be grabbed BEFORE dev->queue_lock!
+/* Modifications to data participating in scheduling must be protected with
+ * dev->queue_lock spinlock.
+ *
+ * The idea is the following:
+ * - enqueue, dequeue are serialized via top level device
+ *   spinlock dev->queue_lock.
+ * - ingress filtering is serialized via top level device
+ *   spinlock dev->ingress_lock.
+ * - updates to tree and tree walking are only done under the rtnl mutex.
  */
-DEFINE_RWLOCK(qdisc_tree_lock);
 
 void qdisc_lock_tree(struct net_device *dev)
 {
-       write_lock(&qdisc_tree_lock);
        spin_lock_bh(&dev->queue_lock);
+       spin_lock(&dev->ingress_lock);
 }
 
 void qdisc_unlock_tree(struct net_device *dev)
 {
+       spin_unlock(&dev->ingress_lock);
        spin_unlock_bh(&dev->queue_lock);
-       write_unlock(&qdisc_tree_lock);
 }
 
 /*
@@ -442,7 +435,6 @@ struct Qdisc *qdisc_alloc(struct net_device *dev, struct Qdisc_ops *ops)
        sch->dequeue = ops->dequeue;
        sch->dev = dev;
        dev_hold(dev);
-       sch->stats_lock = &dev->queue_lock;
        atomic_set(&sch->refcnt, 1);
 
        return sch;
@@ -458,6 +450,7 @@ struct Qdisc * qdisc_create_dflt(struct net_device *dev, struct Qdisc_ops *ops,
        sch = qdisc_alloc(dev, ops);
        if (IS_ERR(sch))
                goto errout;
+       sch->stats_lock = &dev->queue_lock;
        sch->parent = parentid;
 
        if (!ops->init || ops->init(sch, NULL) == 0)
@@ -528,15 +521,11 @@ void dev_activate(struct net_device *dev)
                                printk(KERN_INFO "%s: activation failed\n", dev->name);
                                return;
                        }
-                       write_lock(&qdisc_tree_lock);
                        list_add_tail(&qdisc->list, &dev->qdisc_list);
-                       write_unlock(&qdisc_tree_lock);
                } else {
                        qdisc =  &noqueue_qdisc;
                }
-               write_lock(&qdisc_tree_lock);
                dev->qdisc_sleeping = qdisc;
-               write_unlock(&qdisc_tree_lock);
        }
 
        if (!netif_carrier_ok(dev))
index 407c6fb..9d124c4 100644 (file)
 #include <linux/skbuff.h>
 #include <linux/string.h>
 #include <linux/slab.h>
-#include <linux/timer.h>
 #include <linux/list.h>
 #include <linux/rbtree.h>
 #include <linux/init.h>
 #include <linux/netdevice.h>
 #include <linux/rtnetlink.h>
 #include <linux/pkt_sched.h>
+#include <net/netlink.h>
 #include <net/pkt_sched.h>
 #include <net/pkt_cls.h>
 #include <asm/system.h>
@@ -192,23 +192,9 @@ struct hfsc_sched
        struct list_head droplist;              /* active leaf class list (for
                                                   dropping) */
        struct sk_buff_head requeue;            /* requeued packet */
-       struct timer_list wd_timer;             /* watchdog timer */
+       struct qdisc_watchdog watchdog;         /* watchdog timer */
 };
 
-/*
- * macros
- */
-#ifdef CONFIG_NET_SCH_CLK_GETTIMEOFDAY
-#include <linux/time.h>
-#undef PSCHED_GET_TIME
-#define PSCHED_GET_TIME(stamp)                                         \
-do {                                                                   \
-       struct timeval tv;                                              \
-       do_gettimeofday(&tv);                                           \
-       (stamp) = 1ULL * USEC_PER_SEC * tv.tv_sec + tv.tv_usec;         \
-} while (0)
-#endif
-
 #define        HT_INFINITY     0xffffffffffffffffULL   /* infinite time value */
 
 
@@ -394,28 +380,17 @@ cftree_update(struct hfsc_class *cl)
  *     ism: (psched_us/byte) << ISM_SHIFT
  *     dx: psched_us
  *
- * Clock source resolution (CONFIG_NET_SCH_CLK_*)
- *  JIFFIES: for 48<=HZ<=1534 resolution is between 0.63us and 1.27us.
- *  CPU: resolution is between 0.5us and 1us.
- *  GETTIMEOFDAY: resolution is exactly 1us.
+ * The clock source resolution with ktime is 1.024us.
  *
  * sm and ism are scaled in order to keep effective digits.
  * SM_SHIFT and ISM_SHIFT are selected to keep at least 4 effective
  * digits in decimal using the following table.
  *
- * Note: We can afford the additional accuracy (altq hfsc keeps at most
- * 3 effective digits) thanks to the fact that linux clock is bounded
- * much more tightly.
- *
  *  bits/sec      100Kbps     1Mbps     10Mbps     100Mbps    1Gbps
  *  ------------+-------------------------------------------------------
- *  bytes/0.5us   6.25e-3    62.5e-3    625e-3     6250e-e    62500e-3
- *  bytes/us      12.5e-3    125e-3     1250e-3    12500e-3   125000e-3
- *  bytes/1.27us  15.875e-3  158.75e-3  1587.5e-3  15875e-3   158750e-3
+ *  bytes/1.024us 12.8e-3    128e-3     1280e-3    12800e-3   128000e-3
  *
- *  0.5us/byte    160        16         1.6        0.16       0.016
- *  us/byte       80         8          0.8        0.08       0.008
- *  1.27us/byte   63         6.3        0.63       0.063      0.0063
+ *  1.024us/byte  78.125     7.8125     0.78125    0.078125   0.0078125
  */
 #define        SM_SHIFT        20
 #define        ISM_SHIFT       18
@@ -460,8 +435,8 @@ m2sm(u32 m)
        u64 sm;
 
        sm = ((u64)m << SM_SHIFT);
-       sm += PSCHED_JIFFIE2US(HZ) - 1;
-       do_div(sm, PSCHED_JIFFIE2US(HZ));
+       sm += PSCHED_TICKS_PER_SEC - 1;
+       do_div(sm, PSCHED_TICKS_PER_SEC);
        return sm;
 }
 
@@ -474,7 +449,7 @@ m2ism(u32 m)
        if (m == 0)
                ism = HT_INFINITY;
        else {
-               ism = ((u64)PSCHED_JIFFIE2US(HZ) << ISM_SHIFT);
+               ism = ((u64)PSCHED_TICKS_PER_SEC << ISM_SHIFT);
                ism += m - 1;
                do_div(ism, m);
        }
@@ -487,7 +462,7 @@ d2dx(u32 d)
 {
        u64 dx;
 
-       dx = ((u64)d * PSCHED_JIFFIE2US(HZ));
+       dx = ((u64)d * PSCHED_TICKS_PER_SEC);
        dx += USEC_PER_SEC - 1;
        do_div(dx, USEC_PER_SEC);
        return dx;
@@ -499,7 +474,7 @@ sm2m(u64 sm)
 {
        u64 m;
 
-       m = (sm * PSCHED_JIFFIE2US(HZ)) >> SM_SHIFT;
+       m = (sm * PSCHED_TICKS_PER_SEC) >> SM_SHIFT;
        return (u32)m;
 }
 
@@ -510,7 +485,7 @@ dx2d(u64 dx)
        u64 d;
 
        d = dx * USEC_PER_SEC;
-       do_div(d, PSCHED_JIFFIE2US(HZ));
+       do_div(d, PSCHED_TICKS_PER_SEC);
        return (u32)d;
 }
 
@@ -654,9 +629,7 @@ rtsc_min(struct runtime_sc *rtsc, struct internal_sc *isc, u64 x, u64 y)
 static void
 init_ed(struct hfsc_class *cl, unsigned int next_len)
 {
-       u64 cur_time;
-
-       PSCHED_GET_TIME(cur_time);
+       u64 cur_time = psched_get_time();
 
        /* update the deadline curve */
        rtsc_min(&cl->cl_deadline, &cl->cl_rsc, cur_time, cl->cl_cumul);
@@ -779,7 +752,7 @@ init_vf(struct hfsc_class *cl, unsigned int len)
                        if (cl->cl_flags & HFSC_USC) {
                                /* class has upper limit curve */
                                if (cur_time == 0)
-                                       PSCHED_GET_TIME(cur_time);
+                                       cur_time = psched_get_time();
 
                                /* update the ulimit curve */
                                rtsc_min(&cl->cl_ulimit, &cl->cl_usc, cur_time,
@@ -1063,7 +1036,7 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
                        if (cl->cl_parent == NULL && parentid != TC_H_ROOT)
                                return -EINVAL;
                }
-               PSCHED_GET_TIME(cur_time);
+               cur_time = psched_get_time();
 
                sch_tree_lock(sch);
                if (rsc != NULL)
@@ -1148,23 +1121,12 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
        return 0;
 }
 
-static void
-hfsc_destroy_filters(struct tcf_proto **fl)
-{
-       struct tcf_proto *tp;
-
-       while ((tp = *fl) != NULL) {
-               *fl = tp->next;
-               tcf_destroy(tp);
-       }
-}
-
 static void
 hfsc_destroy_class(struct Qdisc *sch, struct hfsc_class *cl)
 {
        struct hfsc_sched *q = qdisc_priv(sch);
 
-       hfsc_destroy_filters(&cl->filter_list);
+       tcf_destroy_chain(cl->filter_list);
        qdisc_destroy(cl->qdisc);
 #ifdef CONFIG_NET_ESTIMATOR
        gen_kill_estimator(&cl->bstats, &cl->rate_est);
@@ -1389,7 +1351,7 @@ hfsc_dump_class(struct Qdisc *sch, unsigned long arg, struct sk_buff *skb,
                struct tcmsg *tcm)
 {
        struct hfsc_class *cl = (struct hfsc_class *)arg;
-       unsigned char *b = skb->tail;
+       unsigned char *b = skb_tail_pointer(skb);
        struct rtattr *rta = (struct rtattr *)b;
 
        tcm->tcm_parent = cl->cl_parent ? cl->cl_parent->classid : TC_H_ROOT;
@@ -1400,11 +1362,11 @@ hfsc_dump_class(struct Qdisc *sch, unsigned long arg, struct sk_buff *skb,
        RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
        if (hfsc_dump_curves(skb, cl) < 0)
                goto rtattr_failure;
-       rta->rta_len = skb->tail - b;
+       rta->rta_len = skb_tail_pointer(skb) - b;
        return skb->len;
 
  rtattr_failure:
-       skb_trim(skb, b - skb->data);
+       nlmsg_trim(skb, b);
        return -1;
 }
 
@@ -1459,21 +1421,11 @@ hfsc_walk(struct Qdisc *sch, struct qdisc_walker *arg)
 }
 
 static void
-hfsc_watchdog(unsigned long arg)
-{
-       struct Qdisc *sch = (struct Qdisc *)arg;
-
-       sch->flags &= ~TCQ_F_THROTTLED;
-       netif_schedule(sch->dev);
-}
-
-static void
-hfsc_schedule_watchdog(struct Qdisc *sch, u64 cur_time)
+hfsc_schedule_watchdog(struct Qdisc *sch)
 {
        struct hfsc_sched *q = qdisc_priv(sch);
        struct hfsc_class *cl;
        u64 next_time = 0;
-       long delay;
 
        if ((cl = eltree_get_minel(q)) != NULL)
                next_time = cl->cl_e;
@@ -1482,11 +1434,7 @@ hfsc_schedule_watchdog(struct Qdisc *sch, u64 cur_time)
                        next_time = q->root.cl_cfmin;
        }
        WARN_ON(next_time == 0);
-       delay = next_time - cur_time;
-       delay = PSCHED_US2JIFFIE(delay);
-
-       sch->flags |= TCQ_F_THROTTLED;
-       mod_timer(&q->wd_timer, jiffies + delay);
+       qdisc_watchdog_schedule(&q->watchdog, next_time);
 }
 
 static int
@@ -1523,9 +1471,7 @@ hfsc_init_qdisc(struct Qdisc *sch, struct rtattr *opt)
 
        list_add(&q->root.hlist, &q->clhash[hfsc_hash(q->root.classid)]);
 
-       init_timer(&q->wd_timer);
-       q->wd_timer.function = hfsc_watchdog;
-       q->wd_timer.data = (unsigned long)sch;
+       qdisc_watchdog_init(&q->watchdog, sch);
 
        return 0;
 }
@@ -1595,8 +1541,7 @@ hfsc_reset_qdisc(struct Qdisc *sch)
        __skb_queue_purge(&q->requeue);
        q->eligible = RB_ROOT;
        INIT_LIST_HEAD(&q->droplist);
-       del_timer(&q->wd_timer);
-       sch->flags &= ~TCQ_F_THROTTLED;
+       qdisc_watchdog_cancel(&q->watchdog);
        sch->q.qlen = 0;
 }
 
@@ -1612,14 +1557,14 @@ hfsc_destroy_qdisc(struct Qdisc *sch)
                        hfsc_destroy_class(sch, cl);
        }
        __skb_queue_purge(&q->requeue);
-       del_timer(&q->wd_timer);
+       qdisc_watchdog_cancel(&q->watchdog);
 }
 
 static int
 hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb)
 {
        struct hfsc_sched *q = qdisc_priv(sch);
-       unsigned char *b = skb->tail;
+       unsigned char *b = skb_tail_pointer(skb);
        struct tc_hfsc_qopt qopt;
 
        qopt.defcls = q->defcls;
@@ -1627,7 +1572,7 @@ hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb)
        return skb->len;
 
  rtattr_failure:
-       skb_trim(skb, b - skb->data);
+       nlmsg_trim(skb, b);
        return -1;
 }
 
@@ -1681,7 +1626,7 @@ hfsc_dequeue(struct Qdisc *sch)
        if ((skb = __skb_dequeue(&q->requeue)))
                goto out;
 
-       PSCHED_GET_TIME(cur_time);
+       cur_time = psched_get_time();
 
        /*
         * if there are eligible classes, use real-time criteria.
@@ -1698,7 +1643,7 @@ hfsc_dequeue(struct Qdisc *sch)
                cl = vttree_get_minvt(&q->root, cur_time);
                if (cl == NULL) {
                        sch->qstats.overlimits++;
-                       hfsc_schedule_watchdog(sch, cur_time);
+                       hfsc_schedule_watchdog(sch);
                        return NULL;
                }
        }
index 3c3294d..99bcec8 100644 (file)
@@ -50,6 +50,7 @@
 #include <linux/skbuff.h>
 #include <linux/list.h>
 #include <linux/compiler.h>
+#include <net/netlink.h>
 #include <net/sock.h>
 #include <net/pkt_sched.h>
 #include <linux/rbtree.h>
@@ -128,7 +129,7 @@ struct htb_class {
        } un;
        struct rb_node node[TC_HTB_NUMPRIO];    /* node for self or feed tree */
        struct rb_node pq_node; /* node for event queue */
-       unsigned long pq_key;   /* the same type as jiffies global */
+       psched_time_t pq_key;
 
        int prio_activity;      /* for which prios are we active */
        enum htb_cmode cmode;   /* current mode of the class */
@@ -179,10 +180,7 @@ struct htb_sched {
        struct rb_root wait_pq[TC_HTB_MAXDEPTH];
 
        /* time of nearest event per level (row) */
-       unsigned long near_ev_cache[TC_HTB_MAXDEPTH];
-
-       /* cached value of jiffies in dequeue */
-       unsigned long jiffies;
+       psched_time_t near_ev_cache[TC_HTB_MAXDEPTH];
 
        /* whether we hit non-work conserving class during this dequeue; we use */
        int nwc_hit;            /* this to disable mindelay complaint in dequeue */
@@ -195,7 +193,7 @@ struct htb_sched {
 
        int rate2quantum;       /* quant = rate / rate2quantum */
        psched_time_t now;      /* cached dequeue time */
-       struct timer_list timer;        /* send delay timer */
+       struct qdisc_watchdog watchdog;
 #ifdef HTB_RATECM
        struct timer_list rttim;        /* rate computer timer */
        int recmp_bucket;       /* which hash bucket to recompute next */
@@ -342,19 +340,19 @@ static void htb_add_to_wait_tree(struct htb_sched *q,
 {
        struct rb_node **p = &q->wait_pq[cl->level].rb_node, *parent = NULL;
 
-       cl->pq_key = q->jiffies + PSCHED_US2JIFFIE(delay);
-       if (cl->pq_key == q->jiffies)
+       cl->pq_key = q->now + delay;
+       if (cl->pq_key == q->now)
                cl->pq_key++;
 
        /* update the nearest event cache */
-       if (time_after(q->near_ev_cache[cl->level], cl->pq_key))
+       if (q->near_ev_cache[cl->level] > cl->pq_key)
                q->near_ev_cache[cl->level] = cl->pq_key;
 
        while (*p) {
                struct htb_class *c;
                parent = *p;
                c = rb_entry(parent, struct htb_class, pq_node);
-               if (time_after_eq(cl->pq_key, c->pq_key))
+               if (cl->pq_key >= c->pq_key)
                        p = &parent->rb_right;
                else
                        p = &parent->rb_left;
@@ -679,14 +677,6 @@ static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch)
        return NET_XMIT_SUCCESS;
 }
 
-static void htb_timer(unsigned long arg)
-{
-       struct Qdisc *sch = (struct Qdisc *)arg;
-       sch->flags &= ~TCQ_F_THROTTLED;
-       wmb();
-       netif_schedule(sch->dev);
-}
-
 #ifdef HTB_RATECM
 #define RT_GEN(D,R) R+=D-(R/HTB_EWMAC);D=0
 static void htb_rate_timer(unsigned long arg)
@@ -739,7 +729,7 @@ static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
        cl->T = toks
 
        while (cl) {
-               diff = PSCHED_TDIFF_SAFE(q->now, cl->t_c, (u32) cl->mbuffer);
+               diff = psched_tdiff_bounded(q->now, cl->t_c, cl->mbuffer);
                if (cl->level >= level) {
                        if (cl->level == level)
                                cl->xstats.lends++;
@@ -778,11 +768,11 @@ static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
 /**
  * htb_do_events - make mode changes to classes at the level
  *
- * Scans event queue for pending events and applies them. Returns jiffies to
+ * Scans event queue for pending events and applies them. Returns time of
  * next pending event (0 for no event in pq).
- * Note: Aplied are events whose have cl->pq_key <= jiffies.
+ * Note: Applied are events whose have cl->pq_key <= q->now.
  */
-static long htb_do_events(struct htb_sched *q, int level)
+static psched_time_t htb_do_events(struct htb_sched *q, int level)
 {
        int i;
 
@@ -795,18 +785,18 @@ static long htb_do_events(struct htb_sched *q, int level)
                        return 0;
 
                cl = rb_entry(p, struct htb_class, pq_node);
-               if (time_after(cl->pq_key, q->jiffies)) {
-                       return cl->pq_key - q->jiffies;
-               }
+               if (cl->pq_key > q->now)
+                       return cl->pq_key;
+
                htb_safe_rb_erase(p, q->wait_pq + level);
-               diff = PSCHED_TDIFF_SAFE(q->now, cl->t_c, (u32) cl->mbuffer);
+               diff = psched_tdiff_bounded(q->now, cl->t_c, cl->mbuffer);
                htb_change_class_mode(q, cl, &diff);
                if (cl->cmode != HTB_CAN_SEND)
                        htb_add_to_wait_tree(q, cl, diff);
        }
        if (net_ratelimit())
                printk(KERN_WARNING "htb: too many events !\n");
-       return HZ / 10;
+       return q->now + PSCHED_TICKS_PER_SEC / 10;
 }
 
 /* Returns class->node+prio from id-tree where classe's id is >= id. NULL
@@ -958,30 +948,12 @@ next:
        return skb;
 }
 
-static void htb_delay_by(struct Qdisc *sch, long delay)
-{
-       struct htb_sched *q = qdisc_priv(sch);
-       if (delay <= 0)
-               delay = 1;
-       if (unlikely(delay > 5 * HZ)) {
-               if (net_ratelimit())
-                       printk(KERN_INFO "HTB delay %ld > 5sec\n", delay);
-               delay = 5 * HZ;
-       }
-       /* why don't use jiffies here ? because expires can be in past */
-       mod_timer(&q->timer, q->jiffies + delay);
-       sch->flags |= TCQ_F_THROTTLED;
-       sch->qstats.overlimits++;
-}
-
 static struct sk_buff *htb_dequeue(struct Qdisc *sch)
 {
        struct sk_buff *skb = NULL;
        struct htb_sched *q = qdisc_priv(sch);
        int level;
-       long min_delay;
-
-       q->jiffies = jiffies;
+       psched_time_t next_event;
 
        /* try to dequeue direct packets as high prio (!) to minimize cpu work */
        skb = __skb_dequeue(&q->direct_queue);
@@ -993,23 +965,25 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
 
        if (!sch->q.qlen)
                goto fin;
-       PSCHED_GET_TIME(q->now);
+       q->now = psched_get_time();
 
-       min_delay = LONG_MAX;
+       next_event = q->now + 5 * PSCHED_TICKS_PER_SEC;
        q->nwc_hit = 0;
        for (level = 0; level < TC_HTB_MAXDEPTH; level++) {
                /* common case optimization - skip event handler quickly */
                int m;
-               long delay;
-               if (time_after_eq(q->jiffies, q->near_ev_cache[level])) {
-                       delay = htb_do_events(q, level);
-                       q->near_ev_cache[level] =
-                           q->jiffies + (delay ? delay : HZ);
+               psched_time_t event;
+
+               if (q->now >= q->near_ev_cache[level]) {
+                       event = htb_do_events(q, level);
+                       q->near_ev_cache[level] = event ? event :
+                                                         PSCHED_TICKS_PER_SEC;
                } else
-                       delay = q->near_ev_cache[level] - q->jiffies;
+                       event = q->near_ev_cache[level];
+
+               if (event && next_event > event)
+                       next_event = event;
 
-               if (delay && min_delay > delay)
-                       min_delay = delay;
                m = ~q->row_mask[level];
                while (m != (int)(-1)) {
                        int prio = ffz(m);
@@ -1022,7 +996,8 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
                        }
                }
        }
-       htb_delay_by(sch, min_delay > 5 * HZ ? 5 * HZ : min_delay);
+       sch->qstats.overlimits++;
+       qdisc_watchdog_schedule(&q->watchdog, next_event);
 fin:
        return skb;
 }
@@ -1075,8 +1050,7 @@ static void htb_reset(struct Qdisc *sch)
 
                }
        }
-       sch->flags &= ~TCQ_F_THROTTLED;
-       del_timer(&q->timer);
+       qdisc_watchdog_cancel(&q->watchdog);
        __skb_queue_purge(&q->direct_queue);
        sch->q.qlen = 0;
        memset(q->row, 0, sizeof(q->row));
@@ -1113,14 +1087,12 @@ static int htb_init(struct Qdisc *sch, struct rtattr *opt)
        for (i = 0; i < TC_HTB_NUMPRIO; i++)
                INIT_LIST_HEAD(q->drops + i);
 
-       init_timer(&q->timer);
+       qdisc_watchdog_init(&q->watchdog, sch);
        skb_queue_head_init(&q->direct_queue);
 
        q->direct_qlen = sch->dev->tx_queue_len;
        if (q->direct_qlen < 2) /* some devices have zero tx_queue_len */
                q->direct_qlen = 2;
-       q->timer.function = htb_timer;
-       q->timer.data = (unsigned long)sch;
 
 #ifdef HTB_RATECM
        init_timer(&q->rttim);
@@ -1139,7 +1111,7 @@ static int htb_init(struct Qdisc *sch, struct rtattr *opt)
 static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
 {
        struct htb_sched *q = qdisc_priv(sch);
-       unsigned char *b = skb->tail;
+       unsigned char *b = skb_tail_pointer(skb);
        struct rtattr *rta;
        struct tc_htb_glob gopt;
        spin_lock_bh(&sch->dev->queue_lock);
@@ -1152,12 +1124,12 @@ static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
        rta = (struct rtattr *)b;
        RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
        RTA_PUT(skb, TCA_HTB_INIT, sizeof(gopt), &gopt);
-       rta->rta_len = skb->tail - b;
+       rta->rta_len = skb_tail_pointer(skb) - b;
        spin_unlock_bh(&sch->dev->queue_lock);
        return skb->len;
 rtattr_failure:
        spin_unlock_bh(&sch->dev->queue_lock);
-       skb_trim(skb, skb->tail - skb->data);
+       nlmsg_trim(skb, skb_tail_pointer(skb));
        return -1;
 }
 
@@ -1165,7 +1137,7 @@ static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
                          struct sk_buff *skb, struct tcmsg *tcm)
 {
        struct htb_class *cl = (struct htb_class *)arg;
-       unsigned char *b = skb->tail;
+       unsigned char *b = skb_tail_pointer(skb);
        struct rtattr *rta;
        struct tc_htb_opt opt;
 
@@ -1188,12 +1160,12 @@ static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
        opt.prio = cl->un.leaf.prio;
        opt.level = cl->level;
        RTA_PUT(skb, TCA_HTB_PARMS, sizeof(opt), &opt);
-       rta->rta_len = skb->tail - b;
+       rta->rta_len = skb_tail_pointer(skb) - b;
        spin_unlock_bh(&sch->dev->queue_lock);
        return skb->len;
 rtattr_failure:
        spin_unlock_bh(&sch->dev->queue_lock);
-       skb_trim(skb, b - skb->data);
+       nlmsg_trim(skb, b);
        return -1;
 }
 
@@ -1264,16 +1236,6 @@ static unsigned long htb_get(struct Qdisc *sch, u32 classid)
        return (unsigned long)cl;
 }
 
-static void htb_destroy_filters(struct tcf_proto **fl)
-{
-       struct tcf_proto *tp;
-
-       while ((tp = *fl) != NULL) {
-               *fl = tp->next;
-               tcf_destroy(tp);
-       }
-}
-
 static inline int htb_parent_last_child(struct htb_class *cl)
 {
        if (!cl->parent)
@@ -1302,7 +1264,7 @@ static void htb_parent_to_leaf(struct htb_class *cl, struct Qdisc *new_q)
        parent->un.leaf.prio = parent->prio;
        parent->tokens = parent->buffer;
        parent->ctokens = parent->cbuffer;
-       PSCHED_GET_TIME(parent->t_c);
+       parent->t_c = psched_get_time();
        parent->cmode = HTB_CAN_SEND;
 }
 
@@ -1317,7 +1279,7 @@ static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
        qdisc_put_rtab(cl->rate);
        qdisc_put_rtab(cl->ceil);
 
-       htb_destroy_filters(&cl->filter_list);
+       tcf_destroy_chain(cl->filter_list);
 
        while (!list_empty(&cl->children))
                htb_destroy_class(sch, list_entry(cl->children.next,
@@ -1341,7 +1303,7 @@ static void htb_destroy(struct Qdisc *sch)
 {
        struct htb_sched *q = qdisc_priv(sch);
 
-       del_timer_sync(&q->timer);
+       qdisc_watchdog_cancel(&q->watchdog);
 #ifdef HTB_RATECM
        del_timer_sync(&q->rttim);
 #endif
@@ -1349,7 +1311,7 @@ static void htb_destroy(struct Qdisc *sch)
           and surprisingly it worked in 2.4. But it must precede it
           because filter need its target class alive to be able to call
           unbind_filter on it (without Oops). */
-       htb_destroy_filters(&q->filter_list);
+       tcf_destroy_chain(q->filter_list);
 
        while (!list_empty(&q->root))
                htb_destroy_class(sch, list_entry(q->root.next,
@@ -1498,8 +1460,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
                /* set class to be in HTB_CAN_SEND state */
                cl->tokens = hopt->buffer;
                cl->ctokens = hopt->cbuffer;
-               cl->mbuffer = PSCHED_JIFFIE2US(HZ * 60);        /* 1min */
-               PSCHED_GET_TIME(cl->t_c);
+               cl->mbuffer = 60 * PSCHED_TICKS_PER_SEC;        /* 1min */
+               cl->t_c = psched_get_time();
                cl->cmode = HTB_CAN_SEND;
 
                /* attach to the hash list and parent's family */
index cfe070e..f8b9f1c 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/netfilter_ipv6.h>
 #include <linux/netfilter.h>
 #include <linux/smp.h>
+#include <net/netlink.h>
 #include <net/pkt_sched.h>
 #include <asm/byteorder.h>
 #include <asm/uaccess.h>
@@ -169,7 +170,7 @@ static int ingress_enqueue(struct sk_buff *skb,struct Qdisc *sch)
                        skb->tc_index = TC_H_MIN(res.classid);
                        result = TC_ACT_OK;
                        break;
-       };
+       }
 /* backward compat */
 #else
 #ifdef CONFIG_NET_CLS_POLICE
@@ -186,7 +187,7 @@ static int ingress_enqueue(struct sk_buff *skb,struct Qdisc *sch)
                sch->bstats.bytes += skb->len;
                result = NF_ACCEPT;
                break;
-       };
+       }
 
 #else
        D2PRINTK("Overriding result to ACCEPT\n");
@@ -247,16 +248,11 @@ ing_hook(unsigned int hook, struct sk_buff **pskb,
                skb->dev ? (*pskb)->dev->name : "(no dev)",
                skb->len);
 
-/*
-revisit later: Use a private since lock dev->queue_lock is also
-used on the egress (might slow things for an iota)
-*/
-
        if (dev->qdisc_ingress) {
-               spin_lock(&dev->queue_lock);
+               spin_lock(&dev->ingress_lock);
                if ((q = dev->qdisc_ingress) != NULL)
                        fwres = q->enqueue(skb, q);
-               spin_unlock(&dev->queue_lock);
+               spin_unlock(&dev->ingress_lock);
        }
 
        return fwres;
@@ -345,14 +341,9 @@ static void ingress_reset(struct Qdisc *sch)
 static void ingress_destroy(struct Qdisc *sch)
 {
        struct ingress_qdisc_data *p = PRIV(sch);
-       struct tcf_proto *tp;
 
        DPRINTK("ingress_destroy(sch %p,[qdisc %p])\n", sch, p);
-       while (p->filter_list) {
-               tp = p->filter_list;
-               p->filter_list = tp->next;
-               tcf_destroy(tp);
-       }
+       tcf_destroy_chain(p->filter_list);
 #if 0
 /* for future use */
        qdisc_destroy(p->q);
@@ -362,16 +353,16 @@ static void ingress_destroy(struct Qdisc *sch)
 
 static int ingress_dump(struct Qdisc *sch, struct sk_buff *skb)
 {
-       unsigned char *b = skb->tail;
+       unsigned char *b = skb_tail_pointer(skb);
        struct rtattr *rta;
 
        rta = (struct rtattr *) b;
        RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
-       rta->rta_len = skb->tail - b;
+       rta->rta_len = skb_tail_pointer(skb) - b;
        return skb->len;
 
 rtattr_failure:
-       skb_trim(skb, b - skb->data);
+       nlmsg_trim(skb, b);
        return -1;
 }
 
index 1ccbfb5..5d9d8bc 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/skbuff.h>
 #include <linux/rtnetlink.h>
 
+#include <net/netlink.h>
 #include <net/pkt_sched.h>
 
 #define VERSION "1.2"
 
 struct netem_sched_data {
        struct Qdisc    *qdisc;
-       struct timer_list timer;
+       struct qdisc_watchdog watchdog;
+
+       psched_tdiff_t latency;
+       psched_tdiff_t jitter;
 
-       u32 latency;
        u32 loss;
        u32 limit;
        u32 counter;
        u32 gap;
-       u32 jitter;
        u32 duplicate;
        u32 reorder;
        u32 corrupt;
 
        struct crndstate {
-               unsigned long last;
-               unsigned long rho;
+               u32 last;
+               u32 rho;
        } delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor;
 
        struct disttable {
@@ -95,12 +97,12 @@ static void init_crandom(struct crndstate *state, unsigned long rho)
  * Next number depends on last value.
  * rho is scaled to avoid floating point.
  */
-static unsigned long get_crandom(struct crndstate *state)
+static u32 get_crandom(struct crndstate *state)
 {
        u64 value, rho;
        unsigned long answer;
 
-       if (state->rho == 0)    /* no correllation */
+       if (state->rho == 0)    /* no correlation */
                return net_random();
 
        value = net_random();
@@ -114,11 +116,13 @@ static unsigned long get_crandom(struct crndstate *state)
  * std deviation sigma.  Uses table lookup to approximate the desired
  * distribution, and a uniformly-distributed pseudo-random source.
  */
-static long tabledist(unsigned long mu, long sigma,
-                     struct crndstate *state, const struct disttable *dist)
+static psched_tdiff_t tabledist(psched_tdiff_t mu, psched_tdiff_t sigma,
+                               struct crndstate *state,
+                               const struct disttable *dist)
 {
-       long t, x;
-       unsigned long rnd;
+       psched_tdiff_t x;
+       long t;
+       u32 rnd;
 
        if (sigma == 0)
                return mu;
@@ -213,8 +217,8 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
                delay = tabledist(q->latency, q->jitter,
                                  &q->delay_cor, q->delay_dist);
 
-               PSCHED_GET_TIME(now);
-               PSCHED_TADD2(now, delay, cb->time_to_send);
+               now = psched_get_time();
+               cb->time_to_send = now + delay;
                ++q->counter;
                ret = q->qdisc->enqueue(skb, q->qdisc);
        } else {
@@ -222,7 +226,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
                 * Do re-ordering by putting one out of N packets at the front
                 * of the queue.
                 */
-               PSCHED_GET_TIME(cb->time_to_send);
+               cb->time_to_send = psched_get_time();
                q->counter = 0;
                ret = q->qdisc->ops->requeue(skb, q->qdisc);
        }
@@ -269,55 +273,43 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
        struct netem_sched_data *q = qdisc_priv(sch);
        struct sk_buff *skb;
 
+       smp_mb();
+       if (sch->flags & TCQ_F_THROTTLED)
+               return NULL;
+
        skb = q->qdisc->dequeue(q->qdisc);
        if (skb) {
                const struct netem_skb_cb *cb
                        = (const struct netem_skb_cb *)skb->cb;
-               psched_time_t now;
+               psched_time_t now = psched_get_time();
 
                /* if more time remaining? */
-               PSCHED_GET_TIME(now);
-
-               if (PSCHED_TLESS(cb->time_to_send, now)) {
+               if (cb->time_to_send <= now) {
                        pr_debug("netem_dequeue: return skb=%p\n", skb);
                        sch->q.qlen--;
-                       sch->flags &= ~TCQ_F_THROTTLED;
                        return skb;
-               } else {
-                       psched_tdiff_t delay = PSCHED_TDIFF(cb->time_to_send, now);
-
-                       if (q->qdisc->ops->requeue(skb, q->qdisc) != NET_XMIT_SUCCESS) {
-                               qdisc_tree_decrease_qlen(q->qdisc, 1);
-                               sch->qstats.drops++;
-                               printk(KERN_ERR "netem: queue discpline %s could not requeue\n",
-                                      q->qdisc->ops->id);
-                       }
+               }
 
-                       mod_timer(&q->timer, jiffies + PSCHED_US2JIFFIE(delay));
-                       sch->flags |= TCQ_F_THROTTLED;
+               if (unlikely(q->qdisc->ops->requeue(skb, q->qdisc) != NET_XMIT_SUCCESS)) {
+                       qdisc_tree_decrease_qlen(q->qdisc, 1);
+                       sch->qstats.drops++;
+                       printk(KERN_ERR "netem: %s could not requeue\n",
+                              q->qdisc->ops->id);
                }
+
+               qdisc_watchdog_schedule(&q->watchdog, cb->time_to_send);
        }
 
        return NULL;
 }
 
-static void netem_watchdog(unsigned long arg)
-{
-       struct Qdisc *sch = (struct Qdisc *)arg;
-
-       pr_debug("netem_watchdog qlen=%d\n", sch->q.qlen);
-       sch->flags &= ~TCQ_F_THROTTLED;
-       netif_schedule(sch->dev);
-}
-
 static void netem_reset(struct Qdisc *sch)
 {
        struct netem_sched_data *q = qdisc_priv(sch);
 
        qdisc_reset(q->qdisc);
        sch->q.qlen = 0;
-       sch->flags &= ~TCQ_F_THROTTLED;
-       del_timer_sync(&q->timer);
+       qdisc_watchdog_cancel(&q->watchdog);
 }
 
 /* Pass size change message down to embedded FIFO */
@@ -438,10 +430,11 @@ static int netem_change(struct Qdisc *sch, struct rtattr *opt)
        q->loss = qopt->loss;
        q->duplicate = qopt->duplicate;
 
-       /* for compatiablity with earlier versions.
-        * if gap is set, need to assume 100% probablity
+       /* for compatibility with earlier versions.
+        * if gap is set, need to assume 100% probability
         */
-       q->reorder = ~0;
+       if (q->gap)
+               q->reorder = ~0;
 
        /* Handle nested options after initial queue options.
         * Should have put all options in nested format but too late now.
@@ -487,22 +480,28 @@ static int netem_change(struct Qdisc *sch, struct rtattr *opt)
  */
 struct fifo_sched_data {
        u32 limit;
+       psched_time_t oldest;
 };
 
 static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
 {
        struct fifo_sched_data *q = qdisc_priv(sch);
        struct sk_buff_head *list = &sch->q;
-       const struct netem_skb_cb *ncb
-               = (const struct netem_skb_cb *)nskb->cb;
+       psched_time_t tnext = ((struct netem_skb_cb *)nskb->cb)->time_to_send;
        struct sk_buff *skb;
 
        if (likely(skb_queue_len(list) < q->limit)) {
+               /* Optimize for add at tail */
+               if (likely(skb_queue_empty(list) || tnext >= q->oldest)) {
+                       q->oldest = tnext;
+                       return qdisc_enqueue_tail(nskb, sch);
+               }
+
                skb_queue_reverse_walk(list, skb) {
                        const struct netem_skb_cb *cb
                                = (const struct netem_skb_cb *)skb->cb;
 
-                       if (!PSCHED_TLESS(ncb->time_to_send, cb->time_to_send))
+                       if (tnext >= cb->time_to_send)
                                break;
                }
 
@@ -515,7 +514,7 @@ static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
                return NET_XMIT_SUCCESS;
        }
 
-       return qdisc_drop(nskb, sch);
+       return qdisc_reshape_fail(nskb, sch);
 }
 
 static int tfifo_init(struct Qdisc *sch, struct rtattr *opt)
@@ -531,6 +530,7 @@ static int tfifo_init(struct Qdisc *sch, struct rtattr *opt)
        } else
                q->limit = max_t(u32, sch->dev->tx_queue_len, 1);
 
+       q->oldest = PSCHED_PASTPERFECT;
        return 0;
 }
 
@@ -567,9 +567,7 @@ static int netem_init(struct Qdisc *sch, struct rtattr *opt)
        if (!opt)
                return -EINVAL;
 
-       init_timer(&q->timer);
-       q->timer.function = netem_watchdog;
-       q->timer.data = (unsigned long) sch;
+       qdisc_watchdog_init(&q->watchdog, sch);
 
        q->qdisc = qdisc_create_dflt(sch->dev, &tfifo_qdisc_ops,
                                     TC_H_MAKE(sch->handle, 1));
@@ -590,7 +588,7 @@ static void netem_destroy(struct Qdisc *sch)
 {
        struct netem_sched_data *q = qdisc_priv(sch);
 
-       del_timer_sync(&q->timer);
+       qdisc_watchdog_cancel(&q->watchdog);
        qdisc_destroy(q->qdisc);
        kfree(q->delay_dist);
 }
@@ -598,7 +596,7 @@ static void netem_destroy(struct Qdisc *sch)
 static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
 {
        const struct netem_sched_data *q = qdisc_priv(sch);
-       unsigned char    *b = skb->tail;
+       unsigned char *b = skb_tail_pointer(skb);
        struct rtattr *rta = (struct rtattr *) b;
        struct tc_netem_qopt qopt;
        struct tc_netem_corr cor;
@@ -626,12 +624,12 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
        corrupt.correlation = q->corrupt_cor.rho;
        RTA_PUT(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt);
 
-       rta->rta_len = skb->tail - b;
+       rta->rta_len = skb_tail_pointer(skb) - b;
 
        return skb->len;
 
 rtattr_failure:
-       skb_trim(skb, b - skb->data);
+       nlmsg_trim(skb, b);
        return -1;
 }
 
index de889f2..269a6e1 100644 (file)
@@ -32,6 +32,7 @@
 #include <net/ip.h>
 #include <net/route.h>
 #include <linux/skbuff.h>
+#include <net/netlink.h>
 #include <net/sock.h>
 #include <net/pkt_sched.h>
 
@@ -61,7 +62,7 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
                        *qerr = NET_XMIT_SUCCESS;
                case TC_ACT_SHOT:
                        return NULL;
-               };
+               }
 
                if (!q->filter_list ) {
 #else
@@ -188,13 +189,8 @@ prio_destroy(struct Qdisc* sch)
 {
        int prio;
        struct prio_sched_data *q = qdisc_priv(sch);
-       struct tcf_proto *tp;
-
-       while ((tp = q->filter_list) != NULL) {
-               q->filter_list = tp->next;
-               tcf_destroy(tp);
-       }
 
+       tcf_destroy_chain(q->filter_list);
        for (prio=0; prio<q->bands; prio++)
                qdisc_destroy(q->queues[prio]);
 }
@@ -271,7 +267,7 @@ static int prio_init(struct Qdisc *sch, struct rtattr *opt)
 static int prio_dump(struct Qdisc *sch, struct sk_buff *skb)
 {
        struct prio_sched_data *q = qdisc_priv(sch);
-       unsigned char    *b = skb->tail;
+       unsigned char *b = skb_tail_pointer(skb);
        struct tc_prio_qopt opt;
 
        opt.bands = q->bands;
@@ -280,7 +276,7 @@ static int prio_dump(struct Qdisc *sch, struct sk_buff *skb)
        return skb->len;
 
 rtattr_failure:
-       skb_trim(skb, b - skb->data);
+       nlmsg_trim(skb, b);
        return -1;
 }
 
index 66f3205..96dfdf7 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/notifier.h>
 #include <linux/init.h>
 #include <net/ip.h>
+#include <net/netlink.h>
 #include <linux/ipv6.h>
 #include <net/route.h>
 #include <linux/skbuff.h>
@@ -137,7 +138,7 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
        switch (skb->protocol) {
        case __constant_htons(ETH_P_IP):
        {
-               struct iphdr *iph = skb->nh.iph;
+               const struct iphdr *iph = ip_hdr(skb);
                h = iph->daddr;
                h2 = iph->saddr^iph->protocol;
                if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) &&
@@ -152,7 +153,7 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
        }
        case __constant_htons(ETH_P_IPV6):
        {
-               struct ipv6hdr *iph = skb->nh.ipv6h;
+               struct ipv6hdr *iph = ipv6_hdr(skb);
                h = iph->daddr.s6_addr32[3];
                h2 = iph->saddr.s6_addr32[3]^iph->nexthdr;
                if (iph->nexthdr == IPPROTO_TCP ||
@@ -461,7 +462,7 @@ static void sfq_destroy(struct Qdisc *sch)
 static int sfq_dump(struct Qdisc *sch, struct sk_buff *skb)
 {
        struct sfq_sched_data *q = qdisc_priv(sch);
-       unsigned char    *b = skb->tail;
+       unsigned char *b = skb_tail_pointer(skb);
        struct tc_sfq_qopt opt;
 
        opt.quantum = q->quantum;
@@ -476,7 +477,7 @@ static int sfq_dump(struct Qdisc *sch, struct sk_buff *skb)
        return skb->len;
 
 rtattr_failure:
-       skb_trim(skb, b - skb->data);
+       nlmsg_trim(skb, b);
        return -1;
 }
 
index 85da8da..5386295 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/etherdevice.h>
 #include <linux/notifier.h>
 #include <net/ip.h>
+#include <net/netlink.h>
 #include <net/route.h>
 #include <linux/skbuff.h>
 #include <net/sock.h>
@@ -127,8 +128,8 @@ struct tbf_sched_data
        long    tokens;                 /* Current number of B tokens */
        long    ptokens;                /* Current number of P tokens */
        psched_time_t   t_c;            /* Time check-point */
-       struct timer_list wd_timer;     /* Watchdog timer */
        struct Qdisc    *qdisc;         /* Inner qdisc, default - bfifo queue */
+       struct qdisc_watchdog watchdog; /* Watchdog timer */
 };
 
 #define L2T(q,L)   ((q)->R_tab->data[(L)>>(q)->R_tab->rate.cell_log])
@@ -185,14 +186,6 @@ static unsigned int tbf_drop(struct Qdisc* sch)
        return len;
 }
 
-static void tbf_watchdog(unsigned long arg)
-{
-       struct Qdisc *sch = (struct Qdisc*)arg;
-
-       sch->flags &= ~TCQ_F_THROTTLED;
-       netif_schedule(sch->dev);
-}
-
 static struct sk_buff *tbf_dequeue(struct Qdisc* sch)
 {
        struct tbf_sched_data *q = qdisc_priv(sch);
@@ -202,13 +195,12 @@ static struct sk_buff *tbf_dequeue(struct Qdisc* sch)
 
        if (skb) {
                psched_time_t now;
-               long toks, delay;
+               long toks;
                long ptoks = 0;
                unsigned int len = skb->len;
 
-               PSCHED_GET_TIME(now);
-
-               toks = PSCHED_TDIFF_SAFE(now, q->t_c, q->buffer);
+               now = psched_get_time();
+               toks = psched_tdiff_bounded(now, q->t_c, q->buffer);
 
                if (q->P_tab) {
                        ptoks = toks + q->ptokens;
@@ -230,12 +222,8 @@ static struct sk_buff *tbf_dequeue(struct Qdisc* sch)
                        return skb;
                }
 
-               delay = PSCHED_US2JIFFIE(max_t(long, -toks, -ptoks));
-
-               if (delay == 0)
-                       delay = 1;
-
-               mod_timer(&q->wd_timer, jiffies+delay);
+               qdisc_watchdog_schedule(&q->watchdog,
+                                       now + max_t(long, -toks, -ptoks));
 
                /* Maybe we have a shorter packet in the queue,
                   which can be sent now. It sounds cool,
@@ -254,7 +242,6 @@ static struct sk_buff *tbf_dequeue(struct Qdisc* sch)
                        sch->qstats.drops++;
                }
 
-               sch->flags |= TCQ_F_THROTTLED;
                sch->qstats.overlimits++;
        }
        return NULL;
@@ -266,11 +253,10 @@ static void tbf_reset(struct Qdisc* sch)
 
        qdisc_reset(q->qdisc);
        sch->q.qlen = 0;
-       PSCHED_GET_TIME(q->t_c);
+       q->t_c = psched_get_time();
        q->tokens = q->buffer;
        q->ptokens = q->mtu;
-       sch->flags &= ~TCQ_F_THROTTLED;
-       del_timer(&q->wd_timer);
+       qdisc_watchdog_cancel(&q->watchdog);
 }
 
 static struct Qdisc *tbf_create_dflt_qdisc(struct Qdisc *sch, u32 limit)
@@ -377,11 +363,8 @@ static int tbf_init(struct Qdisc* sch, struct rtattr *opt)
        if (opt == NULL)
                return -EINVAL;
 
-       PSCHED_GET_TIME(q->t_c);
-       init_timer(&q->wd_timer);
-       q->wd_timer.function = tbf_watchdog;
-       q->wd_timer.data = (unsigned long)sch;
-
+       q->t_c = psched_get_time();
+       qdisc_watchdog_init(&q->watchdog, sch);
        q->qdisc = &noop_qdisc;
 
        return tbf_change(sch, opt);
@@ -391,7 +374,7 @@ static void tbf_destroy(struct Qdisc *sch)
 {
        struct tbf_sched_data *q = qdisc_priv(sch);
 
-       del_timer(&q->wd_timer);
+       qdisc_watchdog_cancel(&q->watchdog);
 
        if (q->P_tab)
                qdisc_put_rtab(q->P_tab);
@@ -404,7 +387,7 @@ static void tbf_destroy(struct Qdisc *sch)
 static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb)
 {
        struct tbf_sched_data *q = qdisc_priv(sch);
-       unsigned char    *b = skb->tail;
+       unsigned char *b = skb_tail_pointer(skb);
        struct rtattr *rta;
        struct tc_tbf_qopt opt;
 
@@ -420,12 +403,12 @@ static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb)
        opt.mtu = q->mtu;
        opt.buffer = q->buffer;
        RTA_PUT(skb, TCA_TBF_PARMS, sizeof(opt), &opt);
-       rta->rta_len = skb->tail - b;
+       rta->rta_len = skb_tail_pointer(skb) - b;
 
        return skb->len;
 
 rtattr_failure:
-       skb_trim(skb, b - skb->data);
+       nlmsg_trim(skb, b);
        return -1;
 }
 
index 587123c..d24914d 100644 (file)
@@ -323,7 +323,7 @@ restart:
                        nores = 1;
                        break;
                }
-               __skb_pull(skb, skb->nh.raw - skb->data);
+               __skb_pull(skb, skb_network_offset(skb));
        } while ((q = NEXT_SLAVE(q)) != start);
 
        if (nores && skb_res == NULL) {
index 78d2ddb..db73ef9 100644 (file)
@@ -143,7 +143,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
        /* Initialize the maximum mumber of new data packets that can be sent
         * in a burst.
         */
-       asoc->max_burst = sctp_max_burst;
+       asoc->max_burst = sp->max_burst;
 
        /* initialize association timers */
        asoc->timeouts[SCTP_EVENT_TIMEOUT_NONE] = 0;
@@ -714,8 +714,16 @@ void sctp_assoc_control_transport(struct sctp_association *asoc,
        /* Record the transition on the transport.  */
        switch (command) {
        case SCTP_TRANSPORT_UP:
+               /* If we are moving from UNCONFIRMED state due
+                * to heartbeat success, report the SCTP_ADDR_CONFIRMED
+                * state to the user, otherwise report SCTP_ADDR_AVAILABLE.
+                */
+               if (SCTP_UNCONFIRMED == transport->state &&
+                   SCTP_HEARTBEAT_SUCCESS == error)
+                       spc_state = SCTP_ADDR_CONFIRMED;
+               else
+                       spc_state = SCTP_ADDR_AVAILABLE;
                transport->state = SCTP_ACTIVE;
-               spc_state = SCTP_ADDR_AVAILABLE;
                break;
 
        case SCTP_TRANSPORT_DOWN:
@@ -725,7 +733,7 @@ void sctp_assoc_control_transport(struct sctp_association *asoc,
 
        default:
                return;
-       };
+       }
 
        /* Generate and send a SCTP_PEER_ADDR_CHANGE notification to the
         * user.
index 5f5ab28..e8c0f74 100644 (file)
@@ -93,8 +93,9 @@ const char *sctp_cname(const sctp_subtype_t cid)
                return "FWD_TSN";
 
        default:
-               return "unknown chunk";
-       };
+               break;
+       }
+
        return "unknown chunk";
 }
 
index 71db668..885109f 100644 (file)
@@ -79,14 +79,10 @@ static void sctp_add_backlog(struct sock *sk, struct sk_buff *skb);
 /* Calculate the SCTP checksum of an SCTP packet.  */
 static inline int sctp_rcv_checksum(struct sk_buff *skb)
 {
-       struct sctphdr *sh;
-       __u32 cmp, val;
        struct sk_buff *list = skb_shinfo(skb)->frag_list;
-
-       sh = (struct sctphdr *) skb->h.raw;
-       cmp = ntohl(sh->checksum);
-
-       val = sctp_start_cksum((__u8 *)sh, skb_headlen(skb));
+       struct sctphdr *sh = sctp_hdr(skb);
+       __u32 cmp = ntohl(sh->checksum);
+       __u32 val = sctp_start_cksum((__u8 *)sh, skb_headlen(skb));
 
        for (; list; list = list->next)
                val = sctp_update_cksum((__u8 *)list->data, skb_headlen(list),
@@ -138,14 +134,13 @@ int sctp_rcv(struct sk_buff *skb)
        if (skb_linearize(skb))
                goto discard_it;
 
-       sh = (struct sctphdr *) skb->h.raw;
+       sh = sctp_hdr(skb);
 
        /* Pull up the IP and SCTP headers. */
-       __skb_pull(skb, skb->h.raw - skb->data);
+       __skb_pull(skb, skb_transport_offset(skb));
        if (skb->len < sizeof(struct sctphdr))
                goto discard_it;
-       if ((skb->ip_summed != CHECKSUM_UNNECESSARY) &&
-           (sctp_rcv_checksum(skb) < 0))
+       if (!skb_csum_unnecessary(skb) && sctp_rcv_checksum(skb) < 0)
                goto discard_it;
 
        skb_pull(skb, sizeof(struct sctphdr));
@@ -154,7 +149,7 @@ int sctp_rcv(struct sk_buff *skb)
        if (skb->len < sizeof(struct sctp_chunkhdr))
                goto discard_it;
 
-       family = ipver2af(skb->nh.iph->version);
+       family = ipver2af(ip_hdr(skb)->version);
        af = sctp_get_af_specific(family);
        if (unlikely(!af))
                goto discard_it;
@@ -510,30 +505,30 @@ void sctp_err_finish(struct sock *sk, struct sctp_association *asoc)
 void sctp_v4_err(struct sk_buff *skb, __u32 info)
 {
        struct iphdr *iph = (struct iphdr *)skb->data;
-       struct sctphdr *sh = (struct sctphdr *)(skb->data + (iph->ihl <<2));
-       int type = skb->h.icmph->type;
-       int code = skb->h.icmph->code;
+       const int ihlen = iph->ihl * 4;
+       const int type = icmp_hdr(skb)->type;
+       const int code = icmp_hdr(skb)->code;
        struct sock *sk;
        struct sctp_association *asoc = NULL;
        struct sctp_transport *transport;
        struct inet_sock *inet;
-       char *saveip, *savesctp;
+       sk_buff_data_t saveip, savesctp;
        int err;
 
-       if (skb->len < ((iph->ihl << 2) + 8)) {
+       if (skb->len < ihlen + 8) {
                ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
                return;
        }
 
        /* Fix up skb to look at the embedded net header. */
-       saveip = skb->nh.raw;
-       savesctp  = skb->h.raw;
-       skb->nh.iph = iph;
-       skb->h.raw = (char *)sh;
-       sk = sctp_err_lookup(AF_INET, skb, sh, &asoc, &transport);
-       /* Put back, the original pointers. */
-       skb->nh.raw = saveip;
-       skb->h.raw = savesctp;
+       saveip = skb->network_header;
+       savesctp = skb->transport_header;
+       skb_reset_network_header(skb);
+       skb_set_transport_header(skb, ihlen);
+       sk = sctp_err_lookup(AF_INET, skb, sctp_hdr(skb), &asoc, &transport);
+       /* Put back, the original values. */
+       skb->network_header = saveip;
+       skb->transport_header = savesctp;
        if (!sk) {
                ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
                return;
@@ -616,7 +611,7 @@ int sctp_rcv_ootb(struct sk_buff *skb)
                        break;
 
                ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
-               if (ch_end > skb->tail)
+               if (ch_end > skb_tail_pointer(skb))
                        break;
 
                /* RFC 8.4, 2) If the OOTB packet contains an ABORT chunk, the
@@ -648,7 +643,7 @@ int sctp_rcv_ootb(struct sk_buff *skb)
                }
 
                ch = (sctp_chunkhdr_t *) ch_end;
-       } while (ch_end < skb->tail);
+       } while (ch_end < skb_tail_pointer(skb));
 
        return 0;
 
@@ -905,7 +900,7 @@ static struct sctp_association *__sctp_rcv_init_lookup(struct sk_buff *skb,
        struct sctp_association *asoc;
        union sctp_addr addr;
        union sctp_addr *paddr = &addr;
-       struct sctphdr *sh = (struct sctphdr *) skb->h.raw;
+       struct sctphdr *sh = sctp_hdr(skb);
        sctp_chunkhdr_t *ch;
        union sctp_params params;
        sctp_init_chunk_t *init;
index c30629e..88aa224 100644 (file)
@@ -159,16 +159,16 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue)
         * the skb->tail.
         */
        if (unlikely(skb_is_nonlinear(chunk->skb))) {
-               if (chunk->chunk_end > chunk->skb->tail)
-                       chunk->chunk_end = chunk->skb->tail;
+               if (chunk->chunk_end > skb_tail_pointer(chunk->skb))
+                       chunk->chunk_end = skb_tail_pointer(chunk->skb);
        }
        skb_pull(chunk->skb, sizeof(sctp_chunkhdr_t));
        chunk->subh.v = NULL; /* Subheader is no longer valid.  */
 
-       if (chunk->chunk_end < chunk->skb->tail) {
+       if (chunk->chunk_end < skb_tail_pointer(chunk->skb)) {
                /* This is not a singleton */
                chunk->singleton = 0;
-       } else if (chunk->chunk_end > chunk->skb->tail) {
+       } else if (chunk->chunk_end > skb_tail_pointer(chunk->skb)) {
                /* RFC 2960, Section 6.10  Bundling
                 *
                 * Partial chunks MUST NOT be placed in an SCTP packet.
index 0b9c49b..ca527a2 100644 (file)
@@ -122,26 +122,24 @@ SCTP_STATIC void sctp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
                             int type, int code, int offset, __be32 info)
 {
        struct inet6_dev *idev;
-       struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
-       struct sctphdr *sh = (struct sctphdr *)(skb->data + offset);
        struct sock *sk;
        struct sctp_association *asoc;
        struct sctp_transport *transport;
        struct ipv6_pinfo *np;
-       char *saveip, *savesctp;
+       sk_buff_data_t saveip, savesctp;
        int err;
 
        idev = in6_dev_get(skb->dev);
 
        /* Fix up skb to look at the embedded net header. */
-       saveip = skb->nh.raw;
-       savesctp  = skb->h.raw;
-       skb->nh.ipv6h = iph;
-       skb->h.raw = (char *)sh;
-       sk = sctp_err_lookup(AF_INET6, skb, sh, &asoc, &transport);
+       saveip   = skb->network_header;
+       savesctp = skb->transport_header;
+       skb_reset_network_header(skb);
+       skb_set_transport_header(skb, offset);
+       sk = sctp_err_lookup(AF_INET6, skb, sctp_hdr(skb), &asoc, &transport);
        /* Put back, the original pointers. */
-       skb->nh.raw = saveip;
-       skb->h.raw = savesctp;
+       skb->network_header   = saveip;
+       skb->transport_header = savesctp;
        if (!sk) {
                ICMP6_INC_STATS_BH(idev, ICMP6_MIB_INERRORS);
                goto out;
@@ -391,13 +389,13 @@ static void sctp_v6_from_skb(union sctp_addr *addr,struct sk_buff *skb,
        addr->v6.sin6_flowinfo = 0; /* FIXME */
        addr->v6.sin6_scope_id = ((struct inet6_skb_parm *)skb->cb)->iif;
 
-       sh = (struct sctphdr *) skb->h.raw;
+       sh = sctp_hdr(skb);
        if (is_saddr) {
                *port  = sh->source;
-               from = &skb->nh.ipv6h->saddr;
+               from = &ipv6_hdr(skb)->saddr;
        } else {
                *port = sh->dest;
-               from = &skb->nh.ipv6h->daddr;
+               from = &ipv6_hdr(skb)->daddr;
        }
        ipv6_addr_copy(&addr->v6.sin6_addr, from);
 }
@@ -606,7 +604,7 @@ static sctp_scope_t sctp_v6_scope(union sctp_addr *addr)
        default:
                retval = SCTP_SCOPE_GLOBAL;
                break;
-       };
+       }
 
        return retval;
 }
@@ -699,7 +697,7 @@ static int sctp_v6_skb_iif(const struct sk_buff *skb)
 /* Was this packet marked by Explicit Congestion Notification? */
 static int sctp_v6_is_ce(const struct sk_buff *skb)
 {
-       return *((__u32 *)(skb->nh.ipv6h)) & htonl(1<<20);
+       return *((__u32 *)(ipv6_hdr(skb))) & htonl(1 << 20);
 }
 
 /* Dump the v6 addr to the seq file. */
@@ -766,19 +764,19 @@ static void sctp_inet6_skb_msgname(struct sk_buff *skb, char *msgname,
        if (msgname) {
                sctp_inet6_msgname(msgname, addr_len);
                sin6 = (struct sockaddr_in6 *)msgname;
-               sh = (struct sctphdr *)skb->h.raw;
+               sh = sctp_hdr(skb);
                sin6->sin6_port = sh->source;
 
                /* Map ipv4 address into v4-mapped-on-v6 address. */
                if (sctp_sk(skb->sk)->v4mapped &&
-                   skb->nh.iph->version == 4) {
+                   ip_hdr(skb)->version == 4) {
                        sctp_v4_map_v6((union sctp_addr *)sin6);
-                       sin6->sin6_addr.s6_addr32[3] = skb->nh.iph->saddr;
+                       sin6->sin6_addr.s6_addr32[3] = ip_hdr(skb)->saddr;
                        return;
                }
 
                /* Otherwise, just copy the v6 address. */
-               ipv6_addr_copy(&sin6->sin6_addr, &skb->nh.ipv6h->saddr);
+               ipv6_addr_copy(&sin6->sin6_addr, &ipv6_hdr(skb)->saddr);
                if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL) {
                        struct sctp_ulpevent *ev = sctp_skb2event(skb);
                        sin6->sin6_scope_id = ev->iif;
index f875fc3..d85543d 100644 (file)
@@ -176,7 +176,7 @@ sctp_xmit_t sctp_packet_transmit_chunk(struct sctp_packet *packet,
        case SCTP_XMIT_OK:
        case SCTP_XMIT_NAGLE_DELAY:
                break;
-       };
+       }
 
        return retval;
 }
index 41abfd1..992f361 100644 (file)
@@ -338,7 +338,7 @@ int sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk)
                                SCTP_INC_STATS(SCTP_MIB_OUTORDERCHUNKS);
                        q->empty = 0;
                        break;
-               };
+               }
        } else {
                list_add_tail(&chunk->list, &q->control_chunk_list);
                SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
@@ -630,7 +630,7 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
                        /* Retrieve a new chunk to bundle. */
                        lchunk = sctp_list_dequeue(lqueue);
                        break;
-               };
+               }
 
                /* If we are here due to a retransmit timeout or a fast
                 * retransmit and if there are any chunks left in the retransmit
@@ -779,7 +779,7 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
                default:
                        /* We built a chunk with an illegal type! */
                        BUG();
-               };
+               }
        }
 
        /* Is it OK to send data chunks?  */
@@ -1397,7 +1397,7 @@ static void sctp_check_transmitted(struct sctp_outq *q,
                                SCTP_DEBUG_PRINTK("ACKed: %08x", tsn);
                                dbg_prt_state = 0;
                                dbg_ack_tsn = tsn;
-                       };
+                       }
 
                        dbg_last_ack_tsn = tsn;
 #endif /* SCTP_DEBUG */
@@ -1452,7 +1452,7 @@ static void sctp_check_transmitted(struct sctp_outq *q,
                                SCTP_DEBUG_PRINTK("KEPT: %08x",tsn);
                                dbg_prt_state = 1;
                                dbg_kept_tsn = tsn;
-                       };
+                       }
 
                        dbg_last_kept_tsn = tsn;
 #endif /* SCTP_DEBUG */
@@ -1476,7 +1476,7 @@ static void sctp_check_transmitted(struct sctp_outq *q,
                } else {
                        SCTP_DEBUG_PRINTK("\n");
                }
-       };
+       }
 #endif /* SCTP_DEBUG */
        if (transport) {
                if (bytes_acked) {
index e17a823..c361deb 100644 (file)
@@ -235,13 +235,13 @@ static void sctp_v4_from_skb(union sctp_addr *addr, struct sk_buff *skb,
        port = &addr->v4.sin_port;
        addr->v4.sin_family = AF_INET;
 
-       sh = (struct sctphdr *) skb->h.raw;
+       sh = sctp_hdr(skb);
        if (is_saddr) {
                *port  = sh->source;
-               from = &skb->nh.iph->saddr;
+               from = &ip_hdr(skb)->saddr;
        } else {
                *port = sh->dest;
-               from = &skb->nh.iph->daddr;
+               from = &ip_hdr(skb)->daddr;
        }
        memcpy(&addr->v4.sin_addr.s_addr, from, sizeof(struct in_addr));
 }
@@ -530,7 +530,7 @@ static int sctp_v4_skb_iif(const struct sk_buff *skb)
 /* Was this packet marked by Explicit Congestion Notification? */
 static int sctp_v4_is_ce(const struct sk_buff *skb)
 {
-       return INET_ECN_is_ce(skb->nh.iph->tos);
+       return INET_ECN_is_ce(ip_hdr(skb)->tos);
 }
 
 /* Create and initialize a new sk for the socket returned by accept(). */
@@ -731,15 +731,13 @@ static void sctp_inet_event_msgname(struct sctp_ulpevent *event, char *msgname,
 /* Initialize and copy out a msgname from an inbound skb. */
 static void sctp_inet_skb_msgname(struct sk_buff *skb, char *msgname, int *len)
 {
-       struct sctphdr *sh;
-       struct sockaddr_in *sin;
-
        if (msgname) {
+               struct sctphdr *sh = sctp_hdr(skb);
+               struct sockaddr_in *sin = (struct sockaddr_in *)msgname;
+
                sctp_inet_msgname(msgname, len);
-               sin = (struct sockaddr_in *)msgname;
-               sh = (struct sctphdr *)skb->h.raw;
                sin->sin_port = sh->source;
-               sin->sin_addr.s_addr = skb->nh.iph->saddr;
+               sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
        }
 }
 
@@ -1044,7 +1042,7 @@ SCTP_STATIC __init int sctp_init(void)
        sctp_cookie_preserve_enable     = 1;
 
        /* Max.Burst                - 4 */
-       sctp_max_burst                  = SCTP_MAX_BURST;
+       sctp_max_burst                  = SCTP_DEFAULT_MAX_BURST;
 
        /* Association.Max.Retrans  - 10 attempts
         * Path.Max.Retrans         - 5  attempts (per destination address)
index f7fb29d..be783a3 100644 (file)
@@ -86,7 +86,7 @@ int sctp_chunk_iif(const struct sctp_chunk *chunk)
        struct sctp_af *af;
        int iif = 0;
 
-       af = sctp_get_af_specific(ipver2af(chunk->skb->nh.iph->version));
+       af = sctp_get_af_specific(ipver2af(ip_hdr(chunk->skb)->version));
        if (af)
                iif = af->skb_iif(chunk->skb);
 
@@ -1143,7 +1143,7 @@ void *sctp_addto_chunk(struct sctp_chunk *chunk, int len, const void *data)
 
        /* Adjust the chunk length field.  */
        chunk->chunk_hdr->length = htons(chunklen + padlen + len);
-       chunk->chunk_end = chunk->skb->tail;
+       chunk->chunk_end = skb_tail_pointer(chunk->skb);
 
        return target;
 }
@@ -1168,7 +1168,7 @@ int sctp_user_addto_chunk(struct sctp_chunk *chunk, int off, int len,
        /* Adjust the chunk length field.  */
        chunk->chunk_hdr->length =
                htons(ntohs(chunk->chunk_hdr->length) + len);
-       chunk->chunk_end = chunk->skb->tail;
+       chunk->chunk_end = skb_tail_pointer(chunk->skb);
 
 out:
        return err;
@@ -1233,7 +1233,7 @@ struct sctp_association *sctp_make_temp_asoc(const struct sctp_endpoint *ep,
        asoc->temp = 1;
        skb = chunk->skb;
        /* Create an entry for the source address of the packet.  */
-       af = sctp_get_af_specific(ipver2af(skb->nh.iph->version));
+       af = sctp_get_af_specific(ipver2af(ip_hdr(skb)->version));
        if (unlikely(!af))
                goto fail;
        af->from_skb(&asoc->c.peer_addr, skb, 1);
@@ -2077,7 +2077,7 @@ static int sctp_process_param(struct sctp_association *asoc,
 
                        default: /* Just ignore anything else.  */
                                break;
-                       };
+                       }
                }
                break;
 
@@ -2118,7 +2118,7 @@ static int sctp_process_param(struct sctp_association *asoc,
                SCTP_DEBUG_PRINTK("Ignoring param: %d for association %p.\n",
                                  ntohs(param.p->type), asoc);
                break;
-       };
+       }
 
        return retval;
 }
index 1355674..b37a7ad 100644 (file)
@@ -464,7 +464,7 @@ static void sctp_cmd_init_failed(sctp_cmd_seq_t *commands,
        struct sctp_ulpevent *event;
 
        event = sctp_ulpevent_make_assoc_change(asoc,0, SCTP_CANT_STR_ASSOC,
-                                               (__u16)error, 0, 0,
+                                               (__u16)error, 0, 0, NULL,
                                                GFP_ATOMIC);
 
        if (event)
@@ -492,8 +492,13 @@ static void sctp_cmd_assoc_failed(sctp_cmd_seq_t *commands,
        /* Cancel any partial delivery in progress. */
        sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC);
 
-       event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_LOST,
-                                               (__u16)error, 0, 0,
+       if (event_type == SCTP_EVENT_T_CHUNK && subtype.chunk == SCTP_CID_ABORT)
+               event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_LOST,
+                                               (__u16)error, 0, 0, chunk,
+                                               GFP_ATOMIC);
+       else
+               event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_LOST,
+                                               (__u16)error, 0, 0, NULL,
                                                GFP_ATOMIC);
        if (event)
                sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
@@ -1004,7 +1009,7 @@ static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype,
                       status, state, event_type, subtype.chunk);
                BUG();
                break;
-       };
+       }
 
 bail:
        return error;
@@ -1484,7 +1489,8 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
                        printk(KERN_WARNING "Impossible command: %u, %p\n",
                               cmd->verb, cmd->obj.ptr);
                        break;
-               };
+               }
+
                if (error)
                        break;
        }
index e9097cf..9e28a5d 100644 (file)
@@ -186,7 +186,7 @@ sctp_disposition_t sctp_sf_do_4_C(const struct sctp_endpoint *ep,
         * notification is passed to the upper layer.
         */
        ev = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_SHUTDOWN_COMP,
-                                            0, 0, 0, GFP_ATOMIC);
+                                            0, 0, 0, NULL, GFP_ATOMIC);
        if (ev)
                sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
                                SCTP_ULPEVENT(ev));
@@ -629,7 +629,7 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep,
                case -SCTP_IERROR_BAD_SIG:
                default:
                        return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
-               };
+               }
        }
 
 
@@ -661,7 +661,7 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep,
        ev = sctp_ulpevent_make_assoc_change(new_asoc, 0, SCTP_COMM_UP, 0,
                                             new_asoc->c.sinit_num_ostreams,
                                             new_asoc->c.sinit_max_instreams,
-                                            GFP_ATOMIC);
+                                            NULL, GFP_ATOMIC);
        if (!ev)
                goto nomem_ev;
 
@@ -790,7 +790,7 @@ sctp_disposition_t sctp_sf_do_5_1E_ca(const struct sctp_endpoint *ep,
        ev = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_UP,
                                             0, asoc->c.sinit_num_ostreams,
                                             asoc->c.sinit_max_instreams,
-                                            GFP_ATOMIC);
+                                            NULL, GFP_ATOMIC);
 
        if (!ev)
                goto nomem;
@@ -1195,7 +1195,7 @@ static void sctp_tietags_populate(struct sctp_association *new_asoc,
                new_asoc->c.my_ttag   = asoc->c.my_vtag;
                new_asoc->c.peer_ttag = asoc->c.peer_vtag;
                break;
-       };
+       }
 
        /* Other parameters for the endpoint SHOULD be copied from the
         * existing parameters of the association (e.g. number of
@@ -1625,7 +1625,7 @@ static sctp_disposition_t sctp_sf_do_dupcook_a(const struct sctp_endpoint *ep,
        ev = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_RESTART, 0,
                                             new_asoc->c.sinit_num_ostreams,
                                             new_asoc->c.sinit_max_instreams,
-                                            GFP_ATOMIC);
+                                            NULL, GFP_ATOMIC);
        if (!ev)
                goto nomem_ev;
 
@@ -1691,7 +1691,7 @@ static sctp_disposition_t sctp_sf_do_dupcook_b(const struct sctp_endpoint *ep,
        ev = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_UP, 0,
                                             new_asoc->c.sinit_num_ostreams,
                                             new_asoc->c.sinit_max_instreams,
-                                            GFP_ATOMIC);
+                                            NULL, GFP_ATOMIC);
        if (!ev)
                goto nomem_ev;
 
@@ -1786,7 +1786,7 @@ static sctp_disposition_t sctp_sf_do_dupcook_d(const struct sctp_endpoint *ep,
                                             SCTP_COMM_UP, 0,
                                             asoc->c.sinit_num_ostreams,
                                             asoc->c.sinit_max_instreams,
-                                            GFP_ATOMIC);
+                                             NULL, GFP_ATOMIC);
                if (!ev)
                        goto nomem;
 
@@ -1904,7 +1904,7 @@ sctp_disposition_t sctp_sf_do_5_2_4_dupcook(const struct sctp_endpoint *ep,
                case -SCTP_IERROR_BAD_SIG:
                default:
                        return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
-               };
+               }
        }
 
        /* Compare the tie_tag in cookie with the verification tag of
@@ -1936,7 +1936,7 @@ sctp_disposition_t sctp_sf_do_5_2_4_dupcook(const struct sctp_endpoint *ep,
        default: /* Discard packet for all others. */
                retval = sctp_sf_pdiscard(ep, asoc, type, arg, commands);
                break;
-       };
+       }
 
        /* Delete the tempory new association. */
        sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc));
@@ -3035,7 +3035,7 @@ sctp_disposition_t sctp_sf_do_9_2_final(const struct sctp_endpoint *ep,
         * notification is passed to the upper layer.
         */
        ev = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_SHUTDOWN_COMP,
-                                            0, 0, 0, GFP_ATOMIC);
+                                            0, 0, 0, NULL, GFP_ATOMIC);
        if (!ev)
                goto nomem;
 
@@ -3115,7 +3115,7 @@ sctp_disposition_t sctp_sf_ootb(const struct sctp_endpoint *ep,
                        break;
 
                ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
-               if (ch_end > skb->tail)
+               if (ch_end > skb_tail_pointer(skb))
                        break;
 
                if (SCTP_CID_SHUTDOWN_ACK == ch->type)
@@ -3130,7 +3130,7 @@ sctp_disposition_t sctp_sf_ootb(const struct sctp_endpoint *ep,
                        return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
 
                ch = (sctp_chunkhdr_t *) ch_end;
-       } while (ch_end < skb->tail);
+       } while (ch_end < skb_tail_pointer(skb));
 
        if (ootb_shut_ack)
                sctp_sf_shut_8_4_5(ep, asoc, type, arg, commands);
@@ -4816,7 +4816,7 @@ sctp_disposition_t sctp_sf_t2_timer_expire(const struct sctp_endpoint *ep,
        default:
                BUG();
                break;
-       };
+       }
 
        if (!reply)
                goto nomem;
@@ -5286,7 +5286,7 @@ static int sctp_eat_data(const struct sctp_association *asoc,
                chunk->ecn_ce_done = 1;
 
                af = sctp_get_af_specific(
-                       ipver2af(chunk->skb->nh.iph->version));
+                       ipver2af(ip_hdr(chunk->skb)->version));
 
                if (af && af->is_ce(chunk->skb) && asoc->peer.ecn_capable) {
                        /* Do real work as sideffect. */
index 5e54b17..523071c 100644 (file)
@@ -101,7 +101,7 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
        default:
                /* Yikes!  We got an illegal event type.  */
                return &bug;
-       };
+       }
 }
 
 #define TYPE_SCTP_FUNC(func) {.fn = func, .name = #func}
index a1d026f..11938fb 100644 (file)
@@ -941,7 +941,7 @@ SCTP_STATIC int sctp_setsockopt_bindx(struct sock* sk,
        default:
                err = -EINVAL;
                break;
-       };
+       }
 
 out:
        kfree(kaddrs);
@@ -2039,6 +2039,10 @@ static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval,
  *                     SPP_HB_DEMAND - Request a user initiated heartbeat
  *                     to be made immediately.
  *
+ *                     SPP_HB_TIME_IS_ZERO - Specify's that the time for
+ *                     heartbeat delayis to be set to the value of 0
+ *                     milliseconds.
+ *
  *                     SPP_PMTUD_ENABLE - This field will enable PMTU
  *                     discovery upon the specified address. Note that
  *                     if the address feild is empty then all addresses
@@ -2081,13 +2085,30 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
                        return error;
        }
 
-       if (params->spp_hbinterval) {
-               if (trans) {
-                       trans->hbinterval = msecs_to_jiffies(params->spp_hbinterval);
-               } else if (asoc) {
-                       asoc->hbinterval = msecs_to_jiffies(params->spp_hbinterval);
-               } else {
-                       sp->hbinterval = params->spp_hbinterval;
+       /* Note that unless the spp_flag is set to SPP_HB_ENABLE the value of
+        * this field is ignored.  Note also that a value of zero indicates
+        * the current setting should be left unchanged.
+        */
+       if (params->spp_flags & SPP_HB_ENABLE) {
+
+               /* Re-zero the interval if the SPP_HB_TIME_IS_ZERO is
+                * set.  This lets us use 0 value when this flag
+                * is set.
+                */
+               if (params->spp_flags & SPP_HB_TIME_IS_ZERO)
+                       params->spp_hbinterval = 0;
+
+               if (params->spp_hbinterval ||
+                   (params->spp_flags & SPP_HB_TIME_IS_ZERO)) {
+                       if (trans) {
+                               trans->hbinterval =
+                                   msecs_to_jiffies(params->spp_hbinterval);
+                       } else if (asoc) {
+                               asoc->hbinterval =
+                                   msecs_to_jiffies(params->spp_hbinterval);
+                       } else {
+                               sp->hbinterval = params->spp_hbinterval;
+                       }
                }
        }
 
@@ -2104,7 +2125,12 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
                }
        }
 
-       if (params->spp_pathmtu) {
+       /* When Path MTU discovery is disabled the value specified here will
+        * be the "fixed" path mtu (i.e. the value of the spp_flags field must
+        * include the flag SPP_PMTUD_DISABLE for this field to have any
+        * effect).
+        */
+       if ((params->spp_flags & SPP_PMTUD_DISABLE) && params->spp_pathmtu) {
                if (trans) {
                        trans->pathmtu = params->spp_pathmtu;
                        sctp_assoc_sync_pmtu(asoc);
@@ -2135,7 +2161,11 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
                }
        }
 
-       if (params->spp_sackdelay) {
+       /* Note that unless the spp_flag is set to SPP_SACKDELAY_ENABLE the
+        * value of this field is ignored.  Note also that a value of zero
+        * indicates the current setting should be left unchanged.
+        */
+       if ((params->spp_flags & SPP_SACKDELAY_ENABLE) && params->spp_sackdelay) {
                if (trans) {
                        trans->sackdelay =
                                msecs_to_jiffies(params->spp_sackdelay);
@@ -2163,7 +2193,11 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
                }
        }
 
-       if (params->spp_pathmaxrxt) {
+       /* Note that unless the spp_flag is set to SPP_PMTUD_ENABLE the value
+        * of this field is ignored.  Note also that a value of zero
+        * indicates the current setting should be left unchanged.
+        */
+       if ((params->spp_flags & SPP_PMTUD_ENABLE) && params->spp_pathmaxrxt) {
                if (trans) {
                        trans->pathmaxrxt = params->spp_pathmaxrxt;
                } else if (asoc) {
@@ -2255,7 +2289,7 @@ static int sctp_setsockopt_peer_addr_params(struct sock *sk,
        return 0;
 }
 
-/* 7.1.24. Delayed Ack Timer (SCTP_DELAYED_ACK_TIME)
+/* 7.1.23. Delayed Ack Timer (SCTP_DELAYED_ACK_TIME)
  *
  *   This options will get or set the delayed ack timer.  The time is set
  *   in milliseconds.  If the assoc_id is 0, then this sets or gets the
@@ -2792,6 +2826,102 @@ static int sctp_setsockopt_context(struct sock *sk, char __user *optval,
        return 0;
 }
 
+/*
+ * 7.1.24.  Get or set fragmented interleave (SCTP_FRAGMENT_INTERLEAVE)
+ *
+ * This options will at a minimum specify if the implementation is doing
+ * fragmented interleave.  Fragmented interleave, for a one to many
+ * socket, is when subsequent calls to receive a message may return
+ * parts of messages from different associations.  Some implementations
+ * may allow you to turn this value on or off.  If so, when turned off,
+ * no fragment interleave will occur (which will cause a head of line
+ * blocking amongst multiple associations sharing the same one to many
+ * socket).  When this option is turned on, then each receive call may
+ * come from a different association (thus the user must receive data
+ * with the extended calls (e.g. sctp_recvmsg) to keep track of which
+ * association each receive belongs to.
+ *
+ * This option takes a boolean value.  A non-zero value indicates that
+ * fragmented interleave is on.  A value of zero indicates that
+ * fragmented interleave is off.
+ *
+ * Note that it is important that an implementation that allows this
+ * option to be turned on, have it off by default.  Otherwise an unaware
+ * application using the one to many model may become confused and act
+ * incorrectly.
+ */
+static int sctp_setsockopt_fragment_interleave(struct sock *sk,
+                                              char __user *optval,
+                                              int optlen)
+{
+       int val;
+
+       if (optlen != sizeof(int))
+               return -EINVAL;
+       if (get_user(val, (int __user *)optval))
+               return -EFAULT;
+
+       sctp_sk(sk)->frag_interleave = (val == 0) ? 0 : 1;
+
+       return 0;
+}
+
+/*
+ * 7.1.25.  Set or Get the sctp partial delivery point
+ *       (SCTP_PARTIAL_DELIVERY_POINT)
+ * This option will set or get the SCTP partial delivery point.  This
+ * point is the size of a message where the partial delivery API will be
+ * invoked to help free up rwnd space for the peer.  Setting this to a
+ * lower value will cause partial delivery's to happen more often.  The
+ * calls argument is an integer that sets or gets the partial delivery
+ * point.
+ */
+static int sctp_setsockopt_partial_delivery_point(struct sock *sk,
+                                                 char __user *optval,
+                                                 int optlen)
+{
+       u32 val;
+
+       if (optlen != sizeof(u32))
+               return -EINVAL;
+       if (get_user(val, (int __user *)optval))
+               return -EFAULT;
+
+       sctp_sk(sk)->pd_point = val;
+
+       return 0; /* is this the right error code? */
+}
+
+/*
+ * 7.1.28.  Set or Get the maximum burst (SCTP_MAX_BURST)
+ *
+ * This option will allow a user to change the maximum burst of packets
+ * that can be emitted by this association.  Note that the default value
+ * is 4, and some implementations may restrict this setting so that it
+ * can only be lowered.
+ *
+ * NOTE: This text doesn't seem right.  Do this on a socket basis with
+ * future associations inheriting the socket value.
+ */
+static int sctp_setsockopt_maxburst(struct sock *sk,
+                                   char __user *optval,
+                                   int optlen)
+{
+       int val;
+
+       if (optlen != sizeof(int))
+               return -EINVAL;
+       if (get_user(val, (int __user *)optval))
+               return -EFAULT;
+
+       if (val < 0)
+               return -EINVAL;
+
+       sctp_sk(sk)->max_burst = val;
+
+       return 0;
+}
+
 /* API 6.2 setsockopt(), getsockopt()
  *
  * Applications use setsockopt() and getsockopt() to set or retrieve
@@ -2871,6 +3001,9 @@ SCTP_STATIC int sctp_setsockopt(struct sock *sk, int level, int optname,
        case SCTP_DELAYED_ACK_TIME:
                retval = sctp_setsockopt_delayed_ack_time(sk, optval, optlen);
                break;
+       case SCTP_PARTIAL_DELIVERY_POINT:
+               retval = sctp_setsockopt_partial_delivery_point(sk, optval, optlen);
+               break;
 
        case SCTP_INITMSG:
                retval = sctp_setsockopt_initmsg(sk, optval, optlen);
@@ -2906,11 +3039,16 @@ SCTP_STATIC int sctp_setsockopt(struct sock *sk, int level, int optname,
        case SCTP_CONTEXT:
                retval = sctp_setsockopt_context(sk, optval, optlen);
                break;
-
+       case SCTP_FRAGMENT_INTERLEAVE:
+               retval = sctp_setsockopt_fragment_interleave(sk, optval, optlen);
+               break;
+       case SCTP_MAX_BURST:
+               retval = sctp_setsockopt_maxburst(sk, optval, optlen);
+               break;
        default:
                retval = -ENOPROTOOPT;
                break;
-       };
+       }
 
        sctp_release_sock(sk);
 
@@ -3066,6 +3204,7 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
        sp->default_timetolive = 0;
 
        sp->default_rcv_context = 0;
+       sp->max_burst = sctp_max_burst;
 
        /* Initialize default setup parameters. These parameters
         * can be modified with the SCTP_INITMSG socket option or
@@ -3134,8 +3273,9 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
        sp->pf = sctp_get_pf_specific(sk->sk_family);
 
        /* Control variables for partial data delivery. */
-       sp->pd_mode           = 0;
+       atomic_set(&sp->pd_mode, 0);
        skb_queue_head_init(&sp->pd_lobby);
+       sp->frag_interleave = 0;
 
        /* Create a per socket endpoint structure.  Even if we
         * change the data structure relationships, this may still
@@ -3642,7 +3782,7 @@ static int sctp_getsockopt_peer_addr_params(struct sock *sk, int len,
        return 0;
 }
 
-/* 7.1.24. Delayed Ack Timer (SCTP_DELAYED_ACK_TIME)
+/* 7.1.23. Delayed Ack Timer (SCTP_DELAYED_ACK_TIME)
  *
  *   This options will get or set the delayed ack timer.  The time is set
  *   in milliseconds.  If the assoc_id is 0, then this sets or gets the
@@ -4536,6 +4676,77 @@ static int sctp_getsockopt_maxseg(struct sock *sk, int len,
        return 0;
 }
 
+/*
+ * 7.1.24.  Get or set fragmented interleave (SCTP_FRAGMENT_INTERLEAVE)
+ * (chapter and verse is quoted at sctp_setsockopt_fragment_interleave())
+ */
+static int sctp_getsockopt_fragment_interleave(struct sock *sk, int len,
+                                              char __user *optval, int __user *optlen)
+{
+       int val;
+
+       if (len < sizeof(int))
+               return -EINVAL;
+
+       len = sizeof(int);
+
+       val = sctp_sk(sk)->frag_interleave;
+       if (put_user(len, optlen))
+               return -EFAULT;
+       if (copy_to_user(optval, &val, len))
+               return -EFAULT;
+
+       return 0;
+}
+
+/*
+ * 7.1.25.  Set or Get the sctp partial delivery point
+ * (chapter and verse is quoted at sctp_setsockopt_partial_delivery_point())
+ */
+static int sctp_getsockopt_partial_delivery_point(struct sock *sk, int len,
+                                                 char __user *optval,
+                                                 int __user *optlen)
+{
+        u32 val;
+
+       if (len < sizeof(u32))
+               return -EINVAL;
+
+       len = sizeof(u32);
+
+       val = sctp_sk(sk)->pd_point;
+       if (put_user(len, optlen))
+               return -EFAULT;
+       if (copy_to_user(optval, &val, len))
+               return -EFAULT;
+
+       return -ENOTSUPP;
+}
+
+/*
+ * 7.1.28.  Set or Get the maximum burst (SCTP_MAX_BURST)
+ * (chapter and verse is quoted at sctp_setsockopt_maxburst())
+ */
+static int sctp_getsockopt_maxburst(struct sock *sk, int len,
+                                   char __user *optval,
+                                   int __user *optlen)
+{
+        int val;
+
+       if (len < sizeof(int))
+               return -EINVAL;
+
+       len = sizeof(int);
+
+       val = sctp_sk(sk)->max_burst;
+       if (put_user(len, optlen))
+               return -EFAULT;
+       if (copy_to_user(optval, &val, len))
+               return -EFAULT;
+
+       return -ENOTSUPP;
+}
+
 SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname,
                                char __user *optval, int __user *optlen)
 {
@@ -4648,10 +4859,21 @@ SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname,
        case SCTP_CONTEXT:
                retval = sctp_getsockopt_context(sk, len, optval, optlen);
                break;
+       case SCTP_FRAGMENT_INTERLEAVE:
+               retval = sctp_getsockopt_fragment_interleave(sk, len, optval,
+                                                            optlen);
+               break;
+       case SCTP_PARTIAL_DELIVERY_POINT:
+               retval = sctp_getsockopt_partial_delivery_point(sk, len, optval,
+                                                               optlen);
+               break;
+       case SCTP_MAX_BURST:
+               retval = sctp_getsockopt_maxburst(sk, len, optval, optlen);
+               break;
        default:
                retval = -ENOPROTOOPT;
                break;
-       };
+       }
 
        sctp_release_sock(sk);
        return retval;
@@ -4976,7 +5198,8 @@ int sctp_inet_listen(struct socket *sock, int backlog)
                break;
        default:
                break;
-       };
+       }
+
        if (err)
                goto cleanup;
 
@@ -5239,7 +5462,7 @@ SCTP_STATIC int sctp_msghdr_parse(const struct msghdr *msg,
 
                default:
                        return -EINVAL;
-               };
+               }
        }
        return 0;
 }
@@ -5742,9 +5965,9 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
         * 3) Peeling off non-partial delivery; move pd_lobby to receive_queue.
         */
        skb_queue_head_init(&newsp->pd_lobby);
-       sctp_sk(newsk)->pd_mode = assoc->ulpq.pd_mode;
+       atomic_set(&sctp_sk(newsk)->pd_mode, assoc->ulpq.pd_mode);
 
-       if (sctp_sk(oldsk)->pd_mode) {
+       if (atomic_read(&sctp_sk(oldsk)->pd_mode)) {
                struct sk_buff_head *queue;
 
                /* Decide which queue to move pd_lobby skbs to. */
@@ -5770,7 +5993,7 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
                 * delivery to finish.
                 */
                if (assoc->ulpq.pd_mode)
-                       sctp_clear_pd(oldsk);
+                       sctp_clear_pd(oldsk, NULL);
 
        }
 
index 4d8c2ab..961df27 100644 (file)
@@ -507,7 +507,7 @@ void sctp_transport_lower_cwnd(struct sctp_transport *transport,
                        transport->cwnd = max(transport->cwnd/2,
                                                 4*transport->asoc->pathmtu);
                break;
-       };
+       }
 
        transport->partial_bytes_acked = 0;
        SCTP_DEBUG_PRINTK("%s: transport: %p reason: %d cwnd: "
index 2e11bc8..661ea2d 100644 (file)
@@ -131,19 +131,54 @@ static inline void sctp_ulpevent_release_owner(struct sctp_ulpevent *event)
 struct sctp_ulpevent  *sctp_ulpevent_make_assoc_change(
        const struct sctp_association *asoc,
        __u16 flags, __u16 state, __u16 error, __u16 outbound,
-       __u16 inbound, gfp_t gfp)
+       __u16 inbound, struct sctp_chunk *chunk, gfp_t gfp)
 {
        struct sctp_ulpevent *event;
        struct sctp_assoc_change *sac;
        struct sk_buff *skb;
 
-       event = sctp_ulpevent_new(sizeof(struct sctp_assoc_change),
+       /* If the lower layer passed in the chunk, it will be
+        * an ABORT, so we need to include it in the sac_info.
+        */
+       if (chunk) {
+               /* sctp_inqu_pop() has allready pulled off the chunk
+                * header.  We need to put it back temporarily
+                */
+               skb_push(chunk->skb, sizeof(sctp_chunkhdr_t));
+
+               /* Copy the chunk data to a new skb and reserve enough
+                * head room to use as notification.
+                */
+               skb = skb_copy_expand(chunk->skb,
+                                     sizeof(struct sctp_assoc_change), 0, gfp);
+
+               if (!skb)
+                       goto fail;
+
+               /* put back the chunk header now that we have a copy */
+               skb_pull(chunk->skb, sizeof(sctp_chunkhdr_t));
+
+               /* Embed the event fields inside the cloned skb.  */
+               event = sctp_skb2event(skb);
+               sctp_ulpevent_init(event, MSG_NOTIFICATION, skb->truesize);
+
+               /* Include the notification structure */
+               sac = (struct sctp_assoc_change *)
+                       skb_push(skb, sizeof(struct sctp_assoc_change));
+
+               /* Trim the buffer to the right length.  */
+               skb_trim(skb, sizeof(struct sctp_assoc_change) +
+                        ntohs(chunk->chunk_hdr->length));
+       } else {
+               event = sctp_ulpevent_new(sizeof(struct sctp_assoc_change),
                                  MSG_NOTIFICATION, gfp);
-       if (!event)
-               goto fail;
-       skb = sctp_event2skb(event);
-       sac = (struct sctp_assoc_change *)
-               skb_put(skb, sizeof(struct sctp_assoc_change));
+               if (!event)
+                       goto fail;
+
+               skb = sctp_event2skb(event);
+               sac = (struct sctp_assoc_change *) skb_put(skb,
+                                       sizeof(struct sctp_assoc_change));
+       }
 
        /* Socket Extensions for SCTP
         * 5.3.1.1 SCTP_ASSOC_CHANGE
index bfb197e..34eb977 100644 (file)
@@ -138,26 +138,59 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
 /* Clear the partial delivery mode for this socket.   Note: This
  * assumes that no association is currently in partial delivery mode.
  */
-int sctp_clear_pd(struct sock *sk)
+int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc)
 {
        struct sctp_sock *sp = sctp_sk(sk);
 
-       sp->pd_mode = 0;
-       if (!skb_queue_empty(&sp->pd_lobby)) {
-               struct list_head *list;
-               sctp_skb_list_tail(&sp->pd_lobby, &sk->sk_receive_queue);
-               list = (struct list_head *)&sctp_sk(sk)->pd_lobby;
-               INIT_LIST_HEAD(list);
-               return 1;
+       if (atomic_dec_and_test(&sp->pd_mode)) {
+               /* This means there are no other associations in PD, so
+                * we can go ahead and clear out the lobby in one shot
+                */
+               if (!skb_queue_empty(&sp->pd_lobby)) {
+                       struct list_head *list;
+                       sctp_skb_list_tail(&sp->pd_lobby, &sk->sk_receive_queue);
+                       list = (struct list_head *)&sctp_sk(sk)->pd_lobby;
+                       INIT_LIST_HEAD(list);
+                       return 1;
+               }
+       } else {
+               /* There are other associations in PD, so we only need to
+                * pull stuff out of the lobby that belongs to the
+                * associations that is exiting PD (all of its notifications
+                * are posted here).
+                */
+               if (!skb_queue_empty(&sp->pd_lobby) && asoc) {
+                       struct sk_buff *skb, *tmp;
+                       struct sctp_ulpevent *event;
+
+                       sctp_skb_for_each(skb, &sp->pd_lobby, tmp) {
+                               event = sctp_skb2event(skb);
+                               if (event->asoc == asoc) {
+                                       __skb_unlink(skb, &sp->pd_lobby);
+                                       __skb_queue_tail(&sk->sk_receive_queue,
+                                                        skb);
+                               }
+                       }
+               }
        }
+
        return 0;
 }
 
+/* Set the pd_mode on the socket and ulpq */
+static void sctp_ulpq_set_pd(struct sctp_ulpq *ulpq)
+{
+       struct sctp_sock *sp = sctp_sk(ulpq->asoc->base.sk);
+
+       atomic_inc(&sp->pd_mode);
+       ulpq->pd_mode = 1;
+}
+
 /* Clear the pd_mode and restart any pending messages waiting for delivery. */
 static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq)
 {
        ulpq->pd_mode = 0;
-       return sctp_clear_pd(ulpq->asoc->base.sk);
+       return sctp_clear_pd(ulpq->asoc->base.sk, ulpq->asoc);
 }
 
 /* If the SKB of 'event' is on a list, it is the first such member
@@ -187,18 +220,35 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
         * the association the cause of the partial delivery.
         */
 
-       if (!sctp_sk(sk)->pd_mode) {
+       if (atomic_read(&sctp_sk(sk)->pd_mode) == 0) {
                queue = &sk->sk_receive_queue;
-       } else if (ulpq->pd_mode) {
-               if (event->msg_flags & MSG_NOTIFICATION)
-                       queue = &sctp_sk(sk)->pd_lobby;
-               else {
-                       clear_pd = event->msg_flags & MSG_EOR;
-                       queue = &sk->sk_receive_queue;
+       } else {
+               if (ulpq->pd_mode) {
+                       /* If the association is in partial delivery, we
+                        * need to finish delivering the partially processed
+                        * packet before passing any other data.  This is
+                        * because we don't truly support stream interleaving.
+                        */
+                       if ((event->msg_flags & MSG_NOTIFICATION) ||
+                           (SCTP_DATA_NOT_FRAG ==
+                                   (event->msg_flags & SCTP_DATA_FRAG_MASK)))
+                               queue = &sctp_sk(sk)->pd_lobby;
+                       else {
+                               clear_pd = event->msg_flags & MSG_EOR;
+                               queue = &sk->sk_receive_queue;
+                       }
+               } else {
+                       /*
+                        * If fragment interleave is enabled, we
+                        * can queue this to the recieve queue instead
+                        * of the lobby.
+                        */
+                       if (sctp_sk(sk)->frag_interleave)
+                               queue = &sk->sk_receive_queue;
+                       else
+                               queue = &sctp_sk(sk)->pd_lobby;
                }
-       } else
-               queue = &sctp_sk(sk)->pd_lobby;
-
+       }
 
        /* If we are harvesting multiple skbs they will be
         * collected on a list.
@@ -341,7 +391,7 @@ static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff_head *qu
                        break;
                pos->next = pnext;
                pos = pnext;
-       };
+       }
 
        event = sctp_skb2event(f_frag);
        SCTP_INC_STATS(SCTP_MIB_REASMUSRMSGS);
@@ -360,6 +410,11 @@ static inline struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_u
        struct sk_buff *first_frag = NULL;
        __u32 ctsn, next_tsn;
        struct sctp_ulpevent *retval = NULL;
+       struct sk_buff *pd_first = NULL;
+       struct sk_buff *pd_last = NULL;
+       size_t pd_len = 0;
+       struct sctp_association *asoc;
+       u32 pd_point;
 
        /* Initialized to 0 just to avoid compiler warning message.  Will
         * never be used with this value. It is referenced only after it
@@ -375,6 +430,10 @@ static inline struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_u
         * we expect to find the remaining middle fragments and the last
         * fragment in order. If not, first_frag is reset to NULL and we
         * start the next pass when we find another first fragment.
+        *
+        * There is a potential to do partial delivery if user sets
+        * SCTP_PARTIAL_DELIVERY_POINT option. Lets count some things here
+        * to see if can do PD.
         */
        skb_queue_walk(&ulpq->reasm, pos) {
                cevent = sctp_skb2event(pos);
@@ -382,14 +441,32 @@ static inline struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_u
 
                switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
                case SCTP_DATA_FIRST_FRAG:
+                       /* If this "FIRST_FRAG" is the first
+                        * element in the queue, then count it towards
+                        * possible PD.
+                        */
+                       if (pos == ulpq->reasm.next) {
+                           pd_first = pos;
+                           pd_last = pos;
+                           pd_len = pos->len;
+                       } else {
+                           pd_first = NULL;
+                           pd_last = NULL;
+                           pd_len = 0;
+                       }
+
                        first_frag = pos;
                        next_tsn = ctsn + 1;
                        break;
 
                case SCTP_DATA_MIDDLE_FRAG:
-                       if ((first_frag) && (ctsn == next_tsn))
+                       if ((first_frag) && (ctsn == next_tsn)) {
                                next_tsn++;
-                       else
+                               if (pd_first) {
+                                   pd_last = pos;
+                                   pd_len += pos->len;
+                               }
+                       } else
                                first_frag = NULL;
                        break;
 
@@ -399,8 +476,29 @@ static inline struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_u
                        else
                                first_frag = NULL;
                        break;
-               };
+               }
+       }
 
+       asoc = ulpq->asoc;
+       if (pd_first) {
+               /* Make sure we can enter partial deliver.
+                * We can trigger partial delivery only if framgent
+                * interleave is set, or the socket is not already
+                * in  partial delivery.
+                */
+               if (!sctp_sk(asoc->base.sk)->frag_interleave &&
+                   atomic_read(&sctp_sk(asoc->base.sk)->pd_mode))
+                       goto done;
+
+               cevent = sctp_skb2event(pd_first);
+               pd_point = sctp_sk(asoc->base.sk)->pd_point;
+               if (pd_point && pd_point <= pd_len) {
+                       retval = sctp_make_reassembled_event(&ulpq->reasm,
+                                                            pd_first,
+                                                            pd_last);
+                       if (retval)
+                               sctp_ulpq_set_pd(ulpq);
+               }
        }
 done:
        return retval;
@@ -458,7 +556,7 @@ static inline struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq
                        goto done;
                default:
                        return NULL;
-               };
+               }
        }
 
        /* We have the reassembled event. There is no need to look
@@ -550,7 +648,7 @@ static inline struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *u
                        break;
                default:
                        return NULL;
-               };
+               }
        }
 
        /* We have the reassembled event. There is no need to look
@@ -819,19 +917,29 @@ void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
 {
        struct sctp_ulpevent *event;
        struct sctp_association *asoc;
+       struct sctp_sock *sp;
 
        asoc = ulpq->asoc;
+       sp = sctp_sk(asoc->base.sk);
 
-       /* Are we already in partial delivery mode?  */
-       if (!sctp_sk(asoc->base.sk)->pd_mode) {
+       /* If the association is already in Partial Delivery mode
+        * we have noting to do.
+        */
+       if (ulpq->pd_mode)
+               return;
 
+       /* If the user enabled fragment interleave socket option,
+        * multiple associations can enter partial delivery.
+        * Otherwise, we can only enter partial delivery if the
+        * socket is not in partial deliver mode.
+        */
+       if (sp->frag_interleave || atomic_read(&sp->pd_mode) == 0) {
                /* Is partial delivery possible?  */
                event = sctp_ulpq_retrieve_first(ulpq);
                /* Send event to the ULP.   */
                if (event) {
                        sctp_ulpq_tail_event(ulpq, event);
-                       sctp_sk(asoc->base.sk)->pd_mode = 1;
-                       ulpq->pd_mode = 1;
+                       sctp_ulpq_set_pd(ulpq);
                        return;
                }
        }
index ea8f81a..1ad62c0 100644 (file)
@@ -585,6 +585,37 @@ int kernel_sendmsg(struct socket *sock, struct msghdr *msg,
        return result;
 }
 
+/*
+ * called from sock_recv_timestamp() if sock_flag(sk, SOCK_RCVTSTAMP)
+ */
+void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
+       struct sk_buff *skb)
+{
+       ktime_t kt = skb->tstamp;
+
+       if (!sock_flag(sk, SOCK_RCVTSTAMPNS)) {
+               struct timeval tv;
+               /* Race occurred between timestamp enabling and packet
+                  receiving.  Fill in the current time for now. */
+               if (kt.tv64 == 0)
+                       kt = ktime_get_real();
+               skb->tstamp = kt;
+               tv = ktime_to_timeval(kt);
+               put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMP, sizeof(tv), &tv);
+       } else {
+               struct timespec ts;
+               /* Race occurred between timestamp enabling and packet
+                  receiving.  Fill in the current time for now. */
+               if (kt.tv64 == 0)
+                       kt = ktime_get_real();
+               skb->tstamp = kt;
+               ts = ktime_to_timespec(kt);
+               put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMPNS, sizeof(ts), &ts);
+       }
+}
+
+EXPORT_SYMBOL_GPL(__sock_recv_timestamp);
+
 static inline int __sock_recvmsg(struct kiocb *iocb, struct socket *sock,
                                 struct msghdr *msg, size_t size, int flags)
 {
@@ -1292,7 +1323,7 @@ asmlinkage long sys_bind(int fd, struct sockaddr __user *umyaddr, int addrlen)
        int err, fput_needed;
 
        sock = sockfd_lookup_light(fd, &err, &fput_needed);
-       if(sock) {
+       if (sock) {
                err = move_addr_to_kernel(umyaddr, addrlen, address);
                if (err >= 0) {
                        err = security_socket_bind(sock,
index f02f24a..543b085 100644 (file)
@@ -1237,20 +1237,12 @@ static int content_open(struct inode *inode, struct file *file)
 
        return res;
 }
-static int content_release(struct inode *inode, struct file *file)
-{
-       struct seq_file *m = (struct seq_file *)file->private_data;
-       struct handle *han = m->private;
-       kfree(han);
-       m->private = NULL;
-       return seq_release(inode, file);
-}
 
 static const struct file_operations content_file_operations = {
        .open           = content_open,
        .read           = seq_read,
        .llseek         = seq_lseek,
-       .release        = content_release,
+       .release        = seq_release_private,
 };
 
 static ssize_t read_flush(struct file *file, char __user *buf,
index 6d7221f..396cdbe 100644 (file)
@@ -1046,6 +1046,8 @@ call_status(struct rpc_task *task)
                rpc_delay(task, 3*HZ);
        case -ETIMEDOUT:
                task->tk_action = call_timeout;
+               if (task->tk_client->cl_discrtry)
+                       xprt_disconnect(task->tk_xprt);
                break;
        case -ECONNREFUSED:
        case -ENOTCONN:
@@ -1169,6 +1171,8 @@ call_decode(struct rpc_task *task)
 out_retry:
        req->rq_received = req->rq_private_buf.len = 0;
        task->tk_status = 0;
+       if (task->tk_client->cl_discrtry)
+               xprt_disconnect(task->tk_xprt);
 }
 
 /*
index 634885b..1d377d1 100644 (file)
@@ -154,7 +154,7 @@ int csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb)
        desc.offset = sizeof(struct udphdr);
        desc.count = skb->len - desc.offset;
 
-       if (skb->ip_summed == CHECKSUM_UNNECESSARY)
+       if (skb_csum_unnecessary(skb))
                goto no_checksum;
 
        desc.csum = csum_partial(skb->data, desc.offset, skb->csum);
index 2772fee..22f61ae 100644 (file)
@@ -798,16 +798,12 @@ svc_udp_recvfrom(struct svc_rqst *rqstp)
                dprintk("svc: recvfrom returned error %d\n", -err);
        }
        rqstp->rq_addrlen = sizeof(rqstp->rq_addr);
-       if (skb->tstamp.off_sec == 0) {
-               struct timeval tv;
-
-               tv.tv_sec = xtime.tv_sec;
-               tv.tv_usec = xtime.tv_nsec / NSEC_PER_USEC;
-               skb_set_timestamp(skb, &tv);
+       if (skb->tstamp.tv64 == 0) {
+               skb->tstamp = ktime_get_real();
                /* Don't enable netstamp, sunrpc doesn't
                   need that much accuracy */
        }
-       skb_get_timestamp(skb, &svsk->sk_sk->sk_stamp);
+       svsk->sk_sk->sk_stamp = skb->tstamp;
        set_bit(SK_DATA, &svsk->sk_flags); /* there may be more data... */
 
        /*
index ee6ffa0..456a145 100644 (file)
@@ -735,16 +735,6 @@ void xprt_transmit(struct rpc_task *task)
                        xprt_reset_majortimeo(req);
                        /* Turn off autodisconnect */
                        del_singleshot_timer_sync(&xprt->timer);
-               } else {
-                       /* If all request bytes have been sent,
-                        * then we must be retransmitting this one */
-                       if (!req->rq_bytes_sent) {
-                               if (task->tk_client->cl_discrtry) {
-                                       xprt_disconnect(xprt);
-                                       task->tk_status = -ENOTCONN;
-                                       return;
-                               }
-                       }
                }
        } else if (!req->rq_bytes_sent)
                return;
index 14789a8..c71337a 100644 (file)
@@ -89,7 +89,7 @@ struct sk_buff *tipc_cfg_reply_alloc(int payload_size)
 int tipc_cfg_append_tlv(struct sk_buff *buf, int tlv_type,
                        void *tlv_data, int tlv_data_size)
 {
-       struct tlv_desc *tlv = (struct tlv_desc *)buf->tail;
+       struct tlv_desc *tlv = (struct tlv_desc *)skb_tail_pointer(buf);
        int new_tlv_space = TLV_SPACE(tlv_data_size);
 
        if (skb_tailroom(buf) < new_tlv_space) {
index 9be4839..67bb29b 100644 (file)
@@ -73,7 +73,7 @@ static int send_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr,
 
        clone = skb_clone(buf, GFP_ATOMIC);
        if (clone) {
-               clone->nh.raw = clone->data;
+               skb_reset_network_header(clone);
                dev = ((struct eth_bearer *)(tb_ptr->usr_handle))->dev;
                clone->dev = dev;
                dev->hard_header(clone, dev, ETH_P_TIPC,
@@ -99,8 +99,8 @@ static int recv_msg(struct sk_buff *buf, struct net_device *dev,
 
        if (likely(eb_ptr->bearer)) {
               if (likely(!dev->promiscuity) ||
-                  !memcmp(buf->mac.raw,dev->dev_addr,ETH_ALEN) ||
-                  !memcmp(buf->mac.raw,dev->broadcast,ETH_ALEN)) {
+                  !memcmp(skb_mac_header(buf), dev->dev_addr, ETH_ALEN) ||
+                  !memcmp(skb_mac_header(buf), dev->broadcast, ETH_ALEN)) {
                        size = msg_size((struct tipc_msg *)buf->data);
                        skb_trim(buf, size);
                        if (likely(buf->len == size)) {
@@ -140,7 +140,7 @@ static int enable_bearer(struct tipc_bearer *tb_ptr)
                return -EDQUOT;
        if (!eb_ptr->dev) {
                eb_ptr->dev = dev;
-               eb_ptr->tipc_packet_type.type = __constant_htons(ETH_P_TIPC);
+               eb_ptr->tipc_packet_type.type = htons(ETH_P_TIPC);
                eb_ptr->tipc_packet_type.dev = dev;
                eb_ptr->tipc_packet_type.func = recv_msg;
                eb_ptr->tipc_packet_type.af_packet_priv = eb_ptr;
index 71c2f2f..2124f32 100644 (file)
@@ -1001,7 +1001,7 @@ static int link_bundle_buf(struct link *l_ptr,
                return 0;
 
        skb_put(bundler, pad + size);
-       memcpy(bundler->data + to_pos, buf->data, size);
+       skb_copy_to_linear_data_offset(bundler, to_pos, buf->data, size);
        msg_set_size(bundler_msg, to_pos + size);
        msg_set_msgcnt(bundler_msg, msg_msgcnt(bundler_msg) + 1);
        dbg("Packed msg # %u(%u octets) into pos %u in buf(#%u)\n",
@@ -1109,8 +1109,8 @@ int tipc_link_send_buf(struct link *l_ptr, struct sk_buff *buf)
                        if (bundler) {
                                msg_init(&bundler_hdr, MSG_BUNDLER, OPEN_MSG,
                                         TIPC_OK, INT_H_SIZE, l_ptr->addr);
-                               memcpy(bundler->data, (unchar *)&bundler_hdr,
-                                      INT_H_SIZE);
+                               skb_copy_to_linear_data(bundler, &bundler_hdr,
+                                                       INT_H_SIZE);
                                skb_trim(bundler, INT_H_SIZE);
                                link_bundle_buf(l_ptr, bundler, buf);
                                buf = bundler;
@@ -1383,9 +1383,9 @@ again:
        if (!buf)
                return -ENOMEM;
        buf->next = NULL;
-       memcpy(buf->data, (unchar *)&fragm_hdr, INT_H_SIZE);
+       skb_copy_to_linear_data(buf, &fragm_hdr, INT_H_SIZE);
        hsz = msg_hdr_sz(hdr);
-       memcpy(buf->data + INT_H_SIZE, (unchar *)hdr, hsz);
+       skb_copy_to_linear_data_offset(buf, INT_H_SIZE, hdr, hsz);
        msg_dbg(buf_msg(buf), ">BUILD>");
 
        /* Chop up message: */
@@ -1416,8 +1416,8 @@ error:
                                return -EFAULT;
                        }
                } else
-                       memcpy(buf->data + fragm_crs, sect_crs, sz);
-
+                       skb_copy_to_linear_data_offset(buf, fragm_crs,
+                                                      sect_crs, sz);
                sect_crs += sz;
                sect_rest -= sz;
                fragm_crs += sz;
@@ -1442,7 +1442,7 @@ error:
 
                        buf->next = NULL;
                        prev->next = buf;
-                       memcpy(buf->data, (unchar *)&fragm_hdr, INT_H_SIZE);
+                       skb_copy_to_linear_data(buf, &fragm_hdr, INT_H_SIZE);
                        fragm_crs = INT_H_SIZE;
                        fragm_rest = fragm_sz;
                        msg_dbg(buf_msg(buf),"  >BUILD>");
@@ -2130,7 +2130,7 @@ void tipc_link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int probe_msg,
                buf = l_ptr->proto_msg_queue;
                if (!buf)
                        return;
-               memcpy(buf->data, (unchar *)msg, sizeof(l_ptr->proto_msg));
+               skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg));
                return;
        }
        msg_set_timestamp(msg, jiffies_to_msecs(jiffies));
@@ -2143,7 +2143,7 @@ void tipc_link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int probe_msg,
        if (!buf)
                return;
 
-       memcpy(buf->data, (unchar *)msg, sizeof(l_ptr->proto_msg));
+       skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg));
        msg_set_size(buf_msg(buf), msg_size);
 
        if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
@@ -2319,8 +2319,8 @@ void tipc_link_tunnel(struct link *l_ptr,
                     "unable to send tunnel msg\n");
                return;
        }
-       memcpy(buf->data, (unchar *)tunnel_hdr, INT_H_SIZE);
-       memcpy(buf->data + INT_H_SIZE, (unchar *)msg, length);
+       skb_copy_to_linear_data(buf, tunnel_hdr, INT_H_SIZE);
+       skb_copy_to_linear_data_offset(buf, INT_H_SIZE, msg, length);
        dbg("%c->%c:", l_ptr->b_ptr->net_plane, tunnel->b_ptr->net_plane);
        msg_dbg(buf_msg(buf), ">SEND>");
        tipc_link_send_buf(tunnel, buf);
@@ -2361,7 +2361,7 @@ void tipc_link_changeover(struct link *l_ptr)
 
                buf = buf_acquire(INT_H_SIZE);
                if (buf) {
-                       memcpy(buf->data, (unchar *)&tunnel_hdr, INT_H_SIZE);
+                       skb_copy_to_linear_data(buf, &tunnel_hdr, INT_H_SIZE);
                        msg_set_size(&tunnel_hdr, INT_H_SIZE);
                        dbg("%c->%c:", l_ptr->b_ptr->net_plane,
                            tunnel->b_ptr->net_plane);
@@ -2426,8 +2426,9 @@ void tipc_link_send_duplicate(struct link *l_ptr, struct link *tunnel)
                             "unable to send duplicate msg\n");
                        return;
                }
-               memcpy(outbuf->data, (unchar *)&tunnel_hdr, INT_H_SIZE);
-               memcpy(outbuf->data + INT_H_SIZE, iter->data, length);
+               skb_copy_to_linear_data(outbuf, &tunnel_hdr, INT_H_SIZE);
+               skb_copy_to_linear_data_offset(outbuf, INT_H_SIZE, iter->data,
+                                              length);
                dbg("%c->%c:", l_ptr->b_ptr->net_plane,
                    tunnel->b_ptr->net_plane);
                msg_dbg(buf_msg(outbuf), ">SEND>");
@@ -2457,7 +2458,7 @@ static struct sk_buff *buf_extract(struct sk_buff *skb, u32 from_pos)
 
        eb = buf_acquire(size);
        if (eb)
-               memcpy(eb->data, (unchar *)msg, size);
+               skb_copy_to_linear_data(eb, msg, size);
        return eb;
 }
 
@@ -2569,7 +2570,7 @@ void tipc_link_recv_bundle(struct sk_buff *buf)
                if (obuf == NULL) {
                        warn("Link unable to unbundle message(s)\n");
                        break;
-               };
+               }
                pos += align(msg_size(buf_msg(obuf)));
                msg_dbg(buf_msg(obuf), "     /");
                tipc_net_route_msg(obuf);
@@ -2631,9 +2632,9 @@ int tipc_link_send_long_buf(struct link *l_ptr, struct sk_buff *buf)
                        goto exit;
                }
                msg_set_size(&fragm_hdr, fragm_sz + INT_H_SIZE);
-               memcpy(fragm->data, (unchar *)&fragm_hdr, INT_H_SIZE);
-               memcpy(fragm->data + INT_H_SIZE, crs, fragm_sz);
-
+               skb_copy_to_linear_data(fragm, &fragm_hdr, INT_H_SIZE);
+               skb_copy_to_linear_data_offset(fragm, INT_H_SIZE, crs,
+                                              fragm_sz);
                /*  Send queued messages first, if any: */
 
                l_ptr->stats.sent_fragments++;
@@ -2733,8 +2734,8 @@ int tipc_link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb,
                if (pbuf != NULL) {
                        pbuf->next = *pending;
                        *pending = pbuf;
-                       memcpy(pbuf->data, (unchar *)imsg, msg_data_sz(fragm));
-
+                       skb_copy_to_linear_data(pbuf, imsg,
+                                               msg_data_sz(fragm));
                        /*  Prepare buffer for subsequent fragments. */
 
                        set_long_msg_seqno(pbuf, long_msg_seq_no);
@@ -2750,7 +2751,8 @@ int tipc_link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb,
                u32 fsz = get_fragm_size(pbuf);
                u32 crs = ((msg_fragm_no(fragm) - 1) * fsz);
                u32 exp_frags = get_expected_frags(pbuf) - 1;
-               memcpy(pbuf->data + crs, msg_data(fragm), dsz);
+               skb_copy_to_linear_data_offset(pbuf, crs,
+                                              msg_data(fragm), dsz);
                buf_discard(fbuf);
 
                /* Is message complete? */
index 62d5490..35d5ba1 100644 (file)
@@ -1,8 +1,8 @@
 /*
  * net/tipc/msg.h: Include file for TIPC message header routines
  *
- * Copyright (c) 2000-2006, Ericsson AB
- * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2000-2007, Ericsson AB
+ * Copyright (c) 2005-2007, Wind River Systems
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -71,8 +71,11 @@ static inline void msg_set_word(struct tipc_msg *m, u32 w, u32 val)
 static inline void msg_set_bits(struct tipc_msg *m, u32 w,
                                u32 pos, u32 mask, u32 val)
 {
-       u32 word = msg_word(m,w) & ~(mask << pos);
-       msg_set_word(m, w, (word |= (val << pos)));
+       val = (val & mask) << pos;
+       val = htonl(val);
+       mask = htonl(mask << pos);
+       m->hdr[w] &= ~mask;
+       m->hdr[w] |= val;
 }
 
 /*
@@ -786,15 +789,16 @@ static inline int msg_build(struct tipc_msg *hdr,
        *buf = buf_acquire(sz);
        if (!(*buf))
                return -ENOMEM;
-       memcpy((*buf)->data, (unchar *)hdr, hsz);
+       skb_copy_to_linear_data(*buf, hdr, hsz);
        for (res = 1, cnt = 0; res && (cnt < num_sect); cnt++) {
                if (likely(usrmem))
                        res = !copy_from_user((*buf)->data + pos,
                                              msg_sect[cnt].iov_base,
                                              msg_sect[cnt].iov_len);
                else
-                       memcpy((*buf)->data + pos, msg_sect[cnt].iov_base,
-                              msg_sect[cnt].iov_len);
+                       skb_copy_to_linear_data_offset(*buf, pos,
+                                                      msg_sect[cnt].iov_base,
+                                                      msg_sect[cnt].iov_len);
                pos += msg_sect[cnt].iov_len;
        }
        if (likely(res))
index b8e1edc..4cdafa2 100644 (file)
@@ -57,7 +57,7 @@ static int handle_cmd(struct sk_buff *skb, struct genl_info *info)
 
        if (rep_buf) {
                skb_push(rep_buf, hdr_space);
-               rep_nlh = (struct nlmsghdr *)rep_buf->data;
+               rep_nlh = nlmsg_hdr(rep_buf);
                memcpy(rep_nlh, req_nlh, hdr_space);
                rep_nlh->nlmsg_len = rep_buf->len;
                genlmsg_unicast(rep_buf, req_nlh->nlmsg_pid);
index 5f8217d..bcd5da0 100644 (file)
@@ -464,7 +464,7 @@ int tipc_reject_msg(struct sk_buff *buf, u32 err)
        msg_set_size(rmsg, data_sz + hdr_sz);
        msg_set_nametype(rmsg, msg_nametype(msg));
        msg_set_nameinst(rmsg, msg_nameinst(msg));
-       memcpy(rbuf->data + hdr_sz, msg_data(msg), data_sz);
+       skb_copy_to_linear_data_offset(rbuf, hdr_sz, msg_data(msg), data_sz);
 
        /* send self-abort message when rejecting on a connected port */
        if (msg_connected(msg)) {
@@ -1419,7 +1419,7 @@ int tipc_send_buf(u32 ref, struct sk_buff *buf, unsigned int dsz)
                return -ENOMEM;
 
        skb_push(buf, hsz);
-       memcpy(buf->data, (unchar *)msg, hsz);
+       skb_copy_to_linear_data(buf, msg, hsz);
        destnode = msg_destnode(msg);
        p_ptr->publ.congested = 1;
        if (!tipc_port_congested(p_ptr)) {
@@ -1555,7 +1555,7 @@ int tipc_forward_buf2name(u32 ref,
        if (skb_cow(buf, LONG_H_SIZE))
                return -ENOMEM;
        skb_push(buf, LONG_H_SIZE);
-       memcpy(buf->data, (unchar *)msg, LONG_H_SIZE);
+       skb_copy_to_linear_data(buf, msg, LONG_H_SIZE);
        msg_dbg(buf_msg(buf),"PREP:");
        if (likely(destport || destnode)) {
                p_ptr->sent++;
@@ -1679,7 +1679,7 @@ int tipc_forward_buf2port(u32 ref,
                return -ENOMEM;
 
        skb_push(buf, DIR_MSG_H_SIZE);
-       memcpy(buf->data, (unchar *)msg, DIR_MSG_H_SIZE);
+       skb_copy_to_linear_data(buf, msg, DIR_MSG_H_SIZE);
        msg_dbg(msg, "buf2port: ");
        p_ptr->sent++;
        if (dest->node == tipc_own_addr)
index b71739f..45832fb 100644 (file)
@@ -1020,7 +1020,7 @@ restart:
 
        if (!err) {
                buf_crs = (unsigned char *)(TIPC_SKB_CB(buf)->handle);
-               sz = buf->tail - buf_crs;
+               sz = skb_tail_pointer(buf) - buf_crs;
 
                needed = (buf_len - sz_copied);
                sz_to_copy = (sz <= needed) ? sz : needed;
index 6069716..aec8cf1 100644 (file)
@@ -1319,7 +1319,7 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
                unix_attach_fds(siocb->scm, skb);
        unix_get_secdata(siocb->scm, skb);
 
-       skb->h.raw = skb->data;
+       skb_reset_transport_header(skb);
        err = memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len);
        if (err)
                goto out_free;
index 5d2d93d..7a19e0e 100644 (file)
@@ -277,8 +277,8 @@ int wanrouter_encapsulate(struct sk_buff *skb, struct net_device *dev,
                skb_push(skb, 7);
                skb->data[0] = 0;
                skb->data[1] = NLPID_SNAP;
-               memcpy(&skb->data[2], wanrouter_oui_ether,
-                      sizeof(wanrouter_oui_ether));
+               skb_copy_to_linear_data_offset(skb, 2, wanrouter_oui_ether,
+                                              sizeof(wanrouter_oui_ether));
                *((unsigned short*)&skb->data[5]) = htons(type);
                break;
 
@@ -339,7 +339,7 @@ __be16 wanrouter_type_trans(struct sk_buff *skb, struct net_device *dev)
        skb->protocol = ethertype;
        skb->pkt_type = PACKET_HOST;    /*      Physically point to point */
        skb_pull(skb, cnt);
-       skb->mac.raw  = skb->data;
+       skb_reset_mac_header(skb);
        return ethertype;
 }
 
diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig
new file mode 100644 (file)
index 0000000..a228d56
--- /dev/null
@@ -0,0 +1,16 @@
+config CFG80211
+        tristate "Improved wireless configuration API"
+
+config WIRELESS_EXT
+       bool "Wireless extensions"
+       default n
+       ---help---
+         This option enables the legacy wireless extensions
+         (wireless network interface configuration via ioctls.)
+
+         Wireless extensions will be replaced by cfg80211 and
+         will be required only by legacy drivers that implement
+         wireless extension handlers.
+
+         Say N (if you can) unless you know you need wireless
+         extensions for external modules.
diff --git a/net/wireless/Makefile b/net/wireless/Makefile
new file mode 100644 (file)
index 0000000..3a96ae6
--- /dev/null
@@ -0,0 +1,4 @@
+obj-$(CONFIG_WIRELESS_EXT) += wext.o
+obj-$(CONFIG_CFG80211) += cfg80211.o
+
+cfg80211-y += core.o sysfs.o
diff --git a/net/wireless/core.c b/net/wireless/core.c
new file mode 100644 (file)
index 0000000..7eabd55
--- /dev/null
@@ -0,0 +1,224 @@
+/*
+ * This is the linux wireless configuration interface.
+ *
+ * Copyright 2006, 2007                Johannes Berg <johannes@sipsolutions.net>
+ */
+
+#include <linux/if.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/nl80211.h>
+#include <linux/debugfs.h>
+#include <linux/notifier.h>
+#include <linux/device.h>
+#include <net/genetlink.h>
+#include <net/cfg80211.h>
+#include <net/wireless.h>
+#include "core.h"
+#include "sysfs.h"
+
+/* name for sysfs, %d is appended */
+#define PHY_NAME "phy"
+
+MODULE_AUTHOR("Johannes Berg");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("wireless configuration support");
+
+/* RCU might be appropriate here since we usually
+ * only read the list, and that can happen quite
+ * often because we need to do it for each command */
+LIST_HEAD(cfg80211_drv_list);
+DEFINE_MUTEX(cfg80211_drv_mutex);
+static int wiphy_counter;
+
+/* for debugfs */
+static struct dentry *ieee80211_debugfs_dir;
+
+/* exported functions */
+
+struct wiphy *wiphy_new(struct cfg80211_ops *ops, int sizeof_priv)
+{
+       struct cfg80211_registered_device *drv;
+       int alloc_size;
+
+       alloc_size = sizeof(*drv) + sizeof_priv;
+
+       drv = kzalloc(alloc_size, GFP_KERNEL);
+       if (!drv)
+               return NULL;
+
+       drv->ops = ops;
+
+       mutex_lock(&cfg80211_drv_mutex);
+
+       drv->idx = wiphy_counter;
+
+       /* now increase counter for the next device unless
+        * it has wrapped previously */
+       if (wiphy_counter >= 0)
+               wiphy_counter++;
+
+       mutex_unlock(&cfg80211_drv_mutex);
+
+       if (unlikely(drv->idx < 0)) {
+               /* ugh, wrapped! */
+               kfree(drv);
+               return NULL;
+       }
+
+       /* give it a proper name */
+       snprintf(drv->wiphy.dev.bus_id, BUS_ID_SIZE,
+                PHY_NAME "%d", drv->idx);
+
+       mutex_init(&drv->mtx);
+       mutex_init(&drv->devlist_mtx);
+       INIT_LIST_HEAD(&drv->netdev_list);
+
+       device_initialize(&drv->wiphy.dev);
+       drv->wiphy.dev.class = &ieee80211_class;
+       drv->wiphy.dev.platform_data = drv;
+
+       return &drv->wiphy;
+}
+EXPORT_SYMBOL(wiphy_new);
+
+int wiphy_register(struct wiphy *wiphy)
+{
+       struct cfg80211_registered_device *drv = wiphy_to_dev(wiphy);
+       int res;
+
+       mutex_lock(&cfg80211_drv_mutex);
+
+       res = device_add(&drv->wiphy.dev);
+       if (res)
+               goto out_unlock;
+
+       list_add(&drv->list, &cfg80211_drv_list);
+
+       /* add to debugfs */
+       drv->wiphy.debugfsdir =
+               debugfs_create_dir(wiphy_name(&drv->wiphy),
+                                  ieee80211_debugfs_dir);
+
+       res = 0;
+out_unlock:
+       mutex_unlock(&cfg80211_drv_mutex);
+       return res;
+}
+EXPORT_SYMBOL(wiphy_register);
+
+void wiphy_unregister(struct wiphy *wiphy)
+{
+       struct cfg80211_registered_device *drv = wiphy_to_dev(wiphy);
+
+       /* protect the device list */
+       mutex_lock(&cfg80211_drv_mutex);
+
+       BUG_ON(!list_empty(&drv->netdev_list));
+
+       /*
+        * Try to grab drv->mtx. If a command is still in progress,
+        * hopefully the driver will refuse it since it's tearing
+        * down the device already. We wait for this command to complete
+        * before unlinking the item from the list.
+        * Note: as codified by the BUG_ON above we cannot get here if
+        * a virtual interface is still associated. Hence, we can only
+        * get to lock contention here if userspace issues a command
+        * that identified the hardware by wiphy index.
+        */
+       mutex_lock(&drv->mtx);
+       /* unlock again before freeing */
+       mutex_unlock(&drv->mtx);
+
+       list_del(&drv->list);
+       device_del(&drv->wiphy.dev);
+       debugfs_remove(drv->wiphy.debugfsdir);
+
+       mutex_unlock(&cfg80211_drv_mutex);
+}
+EXPORT_SYMBOL(wiphy_unregister);
+
+void cfg80211_dev_free(struct cfg80211_registered_device *drv)
+{
+       mutex_destroy(&drv->mtx);
+       mutex_destroy(&drv->devlist_mtx);
+       kfree(drv);
+}
+
+void wiphy_free(struct wiphy *wiphy)
+{
+       put_device(&wiphy->dev);
+}
+EXPORT_SYMBOL(wiphy_free);
+
+static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
+                                        unsigned long state,
+                                        void *ndev)
+{
+       struct net_device *dev = ndev;
+       struct cfg80211_registered_device *rdev;
+
+       if (!dev->ieee80211_ptr)
+               return 0;
+
+       rdev = wiphy_to_dev(dev->ieee80211_ptr->wiphy);
+
+       switch (state) {
+       case NETDEV_REGISTER:
+               mutex_lock(&rdev->devlist_mtx);
+               list_add(&dev->ieee80211_ptr->list, &rdev->netdev_list);
+               if (sysfs_create_link(&dev->dev.kobj, &rdev->wiphy.dev.kobj,
+                                     "phy80211")) {
+                       printk(KERN_ERR "wireless: failed to add phy80211 "
+                               "symlink to netdev!\n");
+               }
+               dev->ieee80211_ptr->netdev = dev;
+               mutex_unlock(&rdev->devlist_mtx);
+               break;
+       case NETDEV_UNREGISTER:
+               mutex_lock(&rdev->devlist_mtx);
+               if (!list_empty(&dev->ieee80211_ptr->list)) {
+                       sysfs_remove_link(&dev->dev.kobj, "phy80211");
+                       list_del_init(&dev->ieee80211_ptr->list);
+               }
+               mutex_unlock(&rdev->devlist_mtx);
+               break;
+       }
+
+       return 0;
+}
+
+static struct notifier_block cfg80211_netdev_notifier = {
+       .notifier_call = cfg80211_netdev_notifier_call,
+};
+
+static int cfg80211_init(void)
+{
+       int err = wiphy_sysfs_init();
+       if (err)
+               goto out_fail_sysfs;
+
+       err = register_netdevice_notifier(&cfg80211_netdev_notifier);
+       if (err)
+               goto out_fail_notifier;
+
+       ieee80211_debugfs_dir = debugfs_create_dir("ieee80211", NULL);
+
+       return 0;
+
+out_fail_notifier:
+       wiphy_sysfs_exit();
+out_fail_sysfs:
+       return err;
+}
+module_init(cfg80211_init);
+
+static void cfg80211_exit(void)
+{
+       debugfs_remove(ieee80211_debugfs_dir);
+       unregister_netdevice_notifier(&cfg80211_netdev_notifier);
+       wiphy_sysfs_exit();
+}
+module_exit(cfg80211_exit);
diff --git a/net/wireless/core.h b/net/wireless/core.h
new file mode 100644 (file)
index 0000000..158db1e
--- /dev/null
@@ -0,0 +1,49 @@
+/*
+ * Wireless configuration interface internals.
+ *
+ * Copyright 2006, 2007 Johannes Berg <johannes@sipsolutions.net>
+ */
+#ifndef __NET_WIRELESS_CORE_H
+#define __NET_WIRELESS_CORE_H
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <net/genetlink.h>
+#include <net/wireless.h>
+#include <net/cfg80211.h>
+
+struct cfg80211_registered_device {
+       struct cfg80211_ops *ops;
+       struct list_head list;
+       /* we hold this mutex during any call so that
+        * we cannot do multiple calls at once, and also
+        * to avoid the deregister call to proceed while
+        * any call is in progress */
+       struct mutex mtx;
+
+       /* wiphy index, internal only */
+       int idx;
+
+       /* associate netdev list */
+       struct mutex devlist_mtx;
+       struct list_head netdev_list;
+
+       /* must be last because of the way we do wiphy_priv(),
+        * and it should at least be aligned to NETDEV_ALIGN */
+       struct wiphy wiphy __attribute__((__aligned__(NETDEV_ALIGN)));
+};
+
+static inline
+struct cfg80211_registered_device *wiphy_to_dev(struct wiphy *wiphy)
+{
+       BUG_ON(!wiphy);
+       return container_of(wiphy, struct cfg80211_registered_device, wiphy);
+}
+
+extern struct mutex cfg80211_drv_mutex;
+extern struct list_head cfg80211_drv_list;
+
+/* free object */
+extern void cfg80211_dev_free(struct cfg80211_registered_device *drv);
+
+#endif /* __NET_WIRELESS_CORE_H */
diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c
new file mode 100644 (file)
index 0000000..3ebae14
--- /dev/null
@@ -0,0 +1,80 @@
+/*
+ * This file provides /sys/class/ieee80211/<wiphy name>/
+ * and some default attributes.
+ *
+ * Copyright 2005-2006 Jiri Benc <jbenc@suse.cz>
+ * Copyright 2006      Johannes Berg <johannes@sipsolutions.net>
+ *
+ * This file is GPLv2 as found in COPYING.
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/nl80211.h>
+#include <linux/rtnetlink.h>
+#include <net/cfg80211.h>
+#include "sysfs.h"
+#include "core.h"
+
+static inline struct cfg80211_registered_device *dev_to_rdev(
+       struct device *dev)
+{
+       return container_of(dev, struct cfg80211_registered_device, wiphy.dev);
+}
+
+static ssize_t _show_index(struct device *dev, struct device_attribute *attr,
+                          char *buf)
+{
+       return sprintf(buf, "%d\n", dev_to_rdev(dev)->idx);
+}
+
+static ssize_t _show_permaddr(struct device *dev,
+                             struct device_attribute *attr,
+                             char *buf)
+{
+       char *addr = dev_to_rdev(dev)->wiphy.perm_addr;
+
+       return sprintf(buf, "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n",
+                      addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
+}
+
+static struct device_attribute ieee80211_dev_attrs[] = {
+       __ATTR(index, S_IRUGO, _show_index, NULL),
+       __ATTR(macaddress, S_IRUGO, _show_permaddr, NULL),
+       {}
+};
+
+static void wiphy_dev_release(struct device *dev)
+{
+       struct cfg80211_registered_device *rdev = dev_to_rdev(dev);
+
+       cfg80211_dev_free(rdev);
+}
+
+static int wiphy_uevent(struct device *dev, char **envp,
+                       int num_envp, char *buf, int size)
+{
+       /* TODO, we probably need stuff here */
+       return 0;
+}
+
+struct class ieee80211_class = {
+       .name = "ieee80211",
+       .owner = THIS_MODULE,
+       .dev_release = wiphy_dev_release,
+       .dev_attrs = ieee80211_dev_attrs,
+#ifdef CONFIG_HOTPLUG
+       .dev_uevent = wiphy_uevent,
+#endif
+};
+
+int wiphy_sysfs_init(void)
+{
+       return class_register(&ieee80211_class);
+}
+
+void wiphy_sysfs_exit(void)
+{
+       class_unregister(&ieee80211_class);
+}
diff --git a/net/wireless/sysfs.h b/net/wireless/sysfs.h
new file mode 100644 (file)
index 0000000..65acbeb
--- /dev/null
@@ -0,0 +1,9 @@
+#ifndef __WIRELESS_SYSFS_H
+#define __WIRELESS_SYSFS_H
+
+extern int wiphy_sysfs_init(void);
+extern void wiphy_sysfs_exit(void);
+
+extern struct class ieee80211_class;
+
+#endif /* __WIRELESS_SYSFS_H */
similarity index 56%
rename from net/core/wireless.c
rename to net/wireless/wext.c
index b07fe27..d6aaf65 100644 (file)
 #include <linux/wireless.h>            /* Pretty obvious */
 #include <net/iw_handler.h>            /* New driver API */
 #include <net/netlink.h>
+#include <net/wext.h>
 
 #include <asm/uaccess.h>               /* copy_to_user() */
 
-/**************************** CONSTANTS ****************************/
-
-/* Debugging stuff */
-#undef WE_IOCTL_DEBUG          /* Debug IOCTL API */
-#undef WE_RTNETLINK_DEBUG      /* Debug RtNetlink API */
-#undef WE_EVENT_DEBUG          /* Debug Event dispatcher */
-#undef WE_SPY_DEBUG            /* Debug enhanced spy support */
-
-/* Options */
-//CONFIG_NET_WIRELESS_RTNETLINK        /* Wireless requests over RtNetlink */
-#define WE_EVENT_RTNETLINK     /* Propagate events using RtNetlink */
-#define WE_SET_EVENT           /* Generate an event on some set commands */
-
 /************************* GLOBAL VARIABLES *************************/
 /*
  * You should not use global variables, because of re-entrancy.
@@ -349,8 +337,7 @@ static const struct iw_ioctl_description standard_ioctl[] = {
                .max_tokens     = sizeof(struct iw_pmksa),
        },
 };
-static const unsigned standard_ioctl_num = (sizeof(standard_ioctl) /
-                                           sizeof(struct iw_ioctl_description));
+static const unsigned standard_ioctl_num = ARRAY_SIZE(standard_ioctl);
 
 /*
  * Meta-data about all the additional standard Wireless Extension events
@@ -400,8 +387,7 @@ static const struct iw_ioctl_description standard_event[] = {
                .max_tokens     = sizeof(struct iw_pmkid_cand),
        },
 };
-static const unsigned standard_event_num = (sizeof(standard_event) /
-                                           sizeof(struct iw_ioctl_description));
+static const unsigned standard_event_num = ARRAY_SIZE(standard_event);
 
 /* Size (in bytes) of the various private data types */
 static const char iw_priv_type_size[] = {
@@ -454,26 +440,24 @@ static const int event_type_pk_size[] = {
 /* ---------------------------------------------------------------- */
 /*
  * Return the driver handler associated with a specific Wireless Extension.
- * Called from various place, so make sure it remains efficient.
  */
-static inline iw_handler get_handler(struct net_device *dev,
-                                    unsigned int cmd)
+static iw_handler get_handler(struct net_device *dev, unsigned int cmd)
 {
        /* Don't "optimise" the following variable, it will crash */
        unsigned int    index;          /* *MUST* be unsigned */
 
        /* Check if we have some wireless handlers defined */
-       if(dev->wireless_handlers == NULL)
+       if (dev->wireless_handlers == NULL)
                return NULL;
 
        /* Try as a standard command */
        index = cmd - SIOCIWFIRST;
-       if(index < dev->wireless_handlers->num_standard)
+       if (index < dev->wireless_handlers->num_standard)
                return dev->wireless_handlers->standard[index];
 
        /* Try as a private command */
        index = cmd - SIOCIWFIRSTPRIV;
-       if(index < dev->wireless_handlers->num_private)
+       if (index < dev->wireless_handlers->num_private)
                return dev->wireless_handlers->private[index];
 
        /* Not found */
@@ -484,15 +468,15 @@ static inline iw_handler get_handler(struct net_device *dev,
 /*
  * Get statistics out of the driver
  */
-static inline struct iw_statistics *get_wireless_stats(struct net_device *dev)
+static struct iw_statistics *get_wireless_stats(struct net_device *dev)
 {
        /* New location */
-       if((dev->wireless_handlers != NULL) &&
+       if ((dev->wireless_handlers != NULL) &&
           (dev->wireless_handlers->get_wireless_stats != NULL))
                return dev->wireless_handlers->get_wireless_stats(dev);
 
        /* Not found */
-       return (struct iw_statistics *) NULL;
+       return NULL;
 }
 
 /* ---------------------------------------------------------------- */
@@ -514,14 +498,14 @@ static inline struct iw_statistics *get_wireless_stats(struct net_device *dev)
  * netif_running(dev) test. I'm open on that one...
  * Hopefully, the driver will remember to do a commit in "open()" ;-)
  */
-static inline int call_commit_handler(struct net_device *      dev)
+static int call_commit_handler(struct net_device *dev)
 {
-       if((netif_running(dev)) &&
-          (dev->wireless_handlers->standard[0] != NULL)) {
+       if ((netif_running(dev)) &&
+          (dev->wireless_handlers->standard[0] != NULL))
                /* Call the commit handler on the driver */
                return dev->wireless_handlers->standard[0](dev, NULL,
                                                           NULL, NULL);
-       else
+       else
                return 0;               /* Command completed successfully */
 }
 
@@ -570,14 +554,13 @@ static int iw_handler_get_iwstats(struct net_device *             dev,
        struct iw_statistics *stats;
 
        stats = get_wireless_stats(dev);
-       if (stats != (struct iw_statistics *) NULL) {
-
+       if (stats) {
                /* Copy statistics to extra */
                memcpy(extra, stats, sizeof(struct iw_statistics));
                wrqu->data.length = sizeof(struct iw_statistics);
 
                /* Check if we need to clear the updated flag */
-               if(wrqu->data.flags != 0)
+               if (wrqu->data.flags != 0)
                        stats->qual.updated &= ~IW_QUAL_ALL_UPDATED;
                return 0;
        } else
@@ -596,12 +579,12 @@ static int iw_handler_get_private(struct net_device *             dev,
                                  char *                        extra)
 {
        /* Check if the driver has something to export */
-       if((dev->wireless_handlers->num_private_args == 0) ||
+       if ((dev->wireless_handlers->num_private_args == 0) ||
           (dev->wireless_handlers->private_args == NULL))
                return -EOPNOTSUPP;
 
        /* Check if there is enough buffer up there */
-       if(wrqu->data.length < dev->wireless_handlers->num_private_args) {
+       if (wrqu->data.length < dev->wireless_handlers->num_private_args) {
                /* User space can't know in advance how large the buffer
                 * needs to be. Give it a hint, so that we can support
                 * any size buffer we want somewhat efficiently... */
@@ -636,8 +619,8 @@ static int iw_handler_get_private(struct net_device *               dev,
 /*
  * Print one entry (line) of /proc/net/wireless
  */
-static __inline__ void wireless_seq_printf_stats(struct seq_file *seq,
-                                                struct net_device *dev)
+static void wireless_seq_printf_stats(struct seq_file *seq,
+                                     struct net_device *dev)
 {
        /* Get stats from the driver */
        struct iw_statistics *stats = get_wireless_stats(dev);
@@ -680,7 +663,7 @@ static int wireless_seq_show(struct seq_file *seq, void *v)
        return 0;
 }
 
-static struct seq_operations wireless_seq_ops = {
+static const struct seq_operations wireless_seq_ops = {
        .start = dev_seq_start,
        .next  = dev_seq_next,
        .stop  = dev_seq_stop,
@@ -700,7 +683,7 @@ static const struct file_operations wireless_seq_fops = {
        .release = seq_release,
 };
 
-int __init wireless_proc_init(void)
+int __init wext_proc_init(void)
 {
        /* Create /proc/net/wireless entry */
        if (!proc_net_fops_create("wireless", S_IRUGO, &wireless_seq_fops))
@@ -735,32 +718,24 @@ static int ioctl_standard_call(struct net_device *        dev,
        int                                     ret = -EINVAL;
 
        /* Get the description of the IOCTL */
-       if((cmd - SIOCIWFIRST) >= standard_ioctl_num)
+       if ((cmd - SIOCIWFIRST) >= standard_ioctl_num)
                return -EOPNOTSUPP;
        descr = &(standard_ioctl[cmd - SIOCIWFIRST]);
 
-#ifdef WE_IOCTL_DEBUG
-       printk(KERN_DEBUG "%s (WE) : Found standard handler for 0x%04X\n",
-              ifr->ifr_name, cmd);
-       printk(KERN_DEBUG "%s (WE) : Header type : %d, Token type : %d, size : %d, token : %d\n", dev->name, descr->header_type, descr->token_type, descr->token_size, descr->max_tokens);
-#endif /* WE_IOCTL_DEBUG */
-
        /* Prepare the call */
        info.cmd = cmd;
        info.flags = 0;
 
        /* Check if we have a pointer to user space data or not */
-       if(descr->header_type != IW_HEADER_TYPE_POINT) {
+       if (descr->header_type != IW_HEADER_TYPE_POINT) {
 
                /* No extra arguments. Trivial to handle */
                ret = handler(dev, &info, &(iwr->u), NULL);
 
-#ifdef WE_SET_EVENT
                /* Generate an event to notify listeners of the change */
-               if((descr->flags & IW_DESCR_FLAG_EVENT) &&
+               if ((descr->flags & IW_DESCR_FLAG_EVENT) &&
                   ((ret == 0) || (ret == -EIWCOMMIT)))
                        wireless_send_event(dev, cmd, &(iwr->u), NULL);
-#endif /* WE_SET_EVENT */
        } else {
                char *  extra;
                int     extra_size;
@@ -800,19 +775,19 @@ static int ioctl_standard_call(struct net_device *        dev,
                iwr->u.data.length -= essid_compat;
 
                /* Check what user space is giving us */
-               if(IW_IS_SET(cmd)) {
+               if (IW_IS_SET(cmd)) {
                        /* Check NULL pointer */
-                       if((iwr->u.data.pointer == NULL) &&
+                       if ((iwr->u.data.pointer == NULL) &&
                           (iwr->u.data.length != 0))
                                return -EFAULT;
                        /* Check if number of token fits within bounds */
-                       if(iwr->u.data.length > descr->max_tokens)
+                       if (iwr->u.data.length > descr->max_tokens)
                                return -E2BIG;
-                       if(iwr->u.data.length < descr->min_tokens)
+                       if (iwr->u.data.length < descr->min_tokens)
                                return -EINVAL;
                } else {
                        /* Check NULL pointer */
-                       if(iwr->u.data.pointer == NULL)
+                       if (iwr->u.data.pointer == NULL)
                                return -EFAULT;
                        /* Save user space buffer size for checking */
                        user_length = iwr->u.data.length;
@@ -822,7 +797,7 @@ static int ioctl_standard_call(struct net_device *  dev,
                         * implied by the test at the end. */
 
                        /* Support for very large requests */
-                       if((descr->flags & IW_DESCR_FLAG_NOMAX) &&
+                       if ((descr->flags & IW_DESCR_FLAG_NOMAX) &&
                           (user_length > descr->max_tokens)) {
                                /* Allow userspace to GET more than max so
                                 * we can support any size GET requests.
@@ -835,20 +810,14 @@ static int ioctl_standard_call(struct net_device *        dev,
                        }
                }
 
-#ifdef WE_IOCTL_DEBUG
-               printk(KERN_DEBUG "%s (WE) : Malloc %d bytes\n",
-                      dev->name, extra_size);
-#endif /* WE_IOCTL_DEBUG */
-
                /* Create the kernel buffer */
                /*    kzalloc ensures NULL-termination for essid_compat */
                extra = kzalloc(extra_size, GFP_KERNEL);
-               if (extra == NULL) {
+               if (extra == NULL)
                        return -ENOMEM;
-               }
 
                /* If it is a SET, get all the extra data in here */
-               if(IW_IS_SET(cmd) && (iwr->u.data.length != 0)) {
+               if (IW_IS_SET(cmd) && (iwr->u.data.length != 0)) {
                        err = copy_from_user(extra, iwr->u.data.pointer,
                                             iwr->u.data.length *
                                             descr->token_size);
@@ -856,11 +825,6 @@ static int ioctl_standard_call(struct net_device * dev,
                                kfree(extra);
                                return -EFAULT;
                        }
-#ifdef WE_IOCTL_DEBUG
-                       printk(KERN_DEBUG "%s (WE) : Got %d bytes\n",
-                              dev->name,
-                              iwr->u.data.length * descr->token_size);
-#endif /* WE_IOCTL_DEBUG */
                }
 
                /* Call the handler */
@@ -871,7 +835,7 @@ static int ioctl_standard_call(struct net_device *  dev,
                /* If we have something to return to the user */
                if (!ret && IW_IS_GET(cmd)) {
                        /* Check if there is enough buffer up there */
-                       if(user_length < iwr->u.data.length) {
+                       if (user_length < iwr->u.data.length) {
                                kfree(extra);
                                return -E2BIG;
                        }
@@ -881,18 +845,12 @@ static int ioctl_standard_call(struct net_device *        dev,
                                           descr->token_size);
                        if (err)
                                ret =  -EFAULT;
-#ifdef WE_IOCTL_DEBUG
-                       printk(KERN_DEBUG "%s (WE) : Wrote %d bytes\n",
-                              dev->name,
-                              iwr->u.data.length * descr->token_size);
-#endif /* WE_IOCTL_DEBUG */
                }
 
-#ifdef WE_SET_EVENT
                /* Generate an event to notify listeners of the change */
-               if((descr->flags & IW_DESCR_FLAG_EVENT) &&
+               if ((descr->flags & IW_DESCR_FLAG_EVENT) &&
                   ((ret == 0) || (ret == -EIWCOMMIT))) {
-                       if(descr->flags & IW_DESCR_FLAG_RESTRICT)
+                       if (descr->flags & IW_DESCR_FLAG_RESTRICT)
                                /* If the event is restricted, don't
                                 * export the payload */
                                wireless_send_event(dev, cmd, &(iwr->u), NULL);
@@ -900,14 +858,13 @@ static int ioctl_standard_call(struct net_device *        dev,
                                wireless_send_event(dev, cmd, &(iwr->u),
                                                    extra);
                }
-#endif /* WE_SET_EVENT */
 
                /* Cleanup - I told you it wasn't that long ;-) */
                kfree(extra);
        }
 
        /* Call commit handler if needed and defined */
-       if(ret == -EIWCOMMIT)
+       if (ret == -EIWCOMMIT)
                ret = call_commit_handler(dev);
 
        /* Here, we will generate the appropriate event if needed */
@@ -931,10 +888,8 @@ static int ioctl_standard_call(struct net_device * dev,
  * a iw_handler but process it in your ioctl handler (i.e. use the
  * old driver API).
  */
-static inline int ioctl_private_call(struct net_device *       dev,
-                                    struct ifreq *             ifr,
-                                    unsigned int               cmd,
-                                    iw_handler         handler)
+static int ioctl_private_call(struct net_device *dev, struct ifreq *ifr,
+                             unsigned int cmd, iw_handler handler)
 {
        struct iwreq *                  iwr = (struct iwreq *) ifr;
        const struct iw_priv_args *     descr = NULL;
@@ -944,28 +899,18 @@ static inline int ioctl_private_call(struct net_device *  dev,
        int                             ret = -EINVAL;
 
        /* Get the description of the IOCTL */
-       for(i = 0; i < dev->wireless_handlers->num_private_args; i++)
-               if(cmd == dev->wireless_handlers->private_args[i].cmd) {
+       for (i = 0; i < dev->wireless_handlers->num_private_args; i++)
+               if (cmd == dev->wireless_handlers->private_args[i].cmd) {
                        descr = &(dev->wireless_handlers->private_args[i]);
                        break;
                }
 
-#ifdef WE_IOCTL_DEBUG
-       printk(KERN_DEBUG "%s (WE) : Found private handler for 0x%04X\n",
-              ifr->ifr_name, cmd);
-       if(descr) {
-               printk(KERN_DEBUG "%s (WE) : Name %s, set %X, get %X\n",
-                      dev->name, descr->name,
-                      descr->set_args, descr->get_args);
-       }
-#endif /* WE_IOCTL_DEBUG */
-
        /* Compute the size of the set/get arguments */
-       if(descr != NULL) {
-               if(IW_IS_SET(cmd)) {
+       if (descr != NULL) {
+               if (IW_IS_SET(cmd)) {
                        int     offset = 0;     /* For sub-ioctls */
                        /* Check for sub-ioctl handler */
-                       if(descr->name[0] == '\0')
+                       if (descr->name[0] == '\0')
                                /* Reserve one int for sub-ioctl index */
                                offset = sizeof(__u32);
 
@@ -973,7 +918,7 @@ static inline int ioctl_private_call(struct net_device *    dev,
                        extra_size = get_priv_size(descr->set_args);
 
                        /* Does it fits in iwr ? */
-                       if((descr->set_args & IW_PRIV_SIZE_FIXED) &&
+                       if ((descr->set_args & IW_PRIV_SIZE_FIXED) &&
                           ((extra_size + offset) <= IFNAMSIZ))
                                extra_size = 0;
                } else {
@@ -981,7 +926,7 @@ static inline int ioctl_private_call(struct net_device *    dev,
                        extra_size = get_priv_size(descr->get_args);
 
                        /* Does it fits in iwr ? */
-                       if((descr->get_args & IW_PRIV_SIZE_FIXED) &&
+                       if ((descr->get_args & IW_PRIV_SIZE_FIXED) &&
                           (extra_size <= IFNAMSIZ))
                                extra_size = 0;
                }
@@ -992,7 +937,7 @@ static inline int ioctl_private_call(struct net_device *    dev,
        info.flags = 0;
 
        /* Check if we have a pointer to user space data or not. */
-       if(extra_size == 0) {
+       if (extra_size == 0) {
                /* No extra arguments. Trivial to handle */
                ret = handler(dev, &info, &(iwr->u), (char *) &(iwr->u));
        } else {
@@ -1000,46 +945,33 @@ static inline int ioctl_private_call(struct net_device * dev,
                int     err;
 
                /* Check what user space is giving us */
-               if(IW_IS_SET(cmd)) {
+               if (IW_IS_SET(cmd)) {
                        /* Check NULL pointer */
-                       if((iwr->u.data.pointer == NULL) &&
+                       if ((iwr->u.data.pointer == NULL) &&
                           (iwr->u.data.length != 0))
                                return -EFAULT;
 
                        /* Does it fits within bounds ? */
-                       if(iwr->u.data.length > (descr->set_args &
+                       if (iwr->u.data.length > (descr->set_args &
                                                 IW_PRIV_SIZE_MASK))
                                return -E2BIG;
-               } else {
-                       /* Check NULL pointer */
-                       if(iwr->u.data.pointer == NULL)
-                               return -EFAULT;
-               }
-
-#ifdef WE_IOCTL_DEBUG
-               printk(KERN_DEBUG "%s (WE) : Malloc %d bytes\n",
-                      dev->name, extra_size);
-#endif /* WE_IOCTL_DEBUG */
+               } else if (iwr->u.data.pointer == NULL)
+                       return -EFAULT;
 
                /* Always allocate for max space. Easier, and won't last
                 * long... */
                extra = kmalloc(extra_size, GFP_KERNEL);
-               if (extra == NULL) {
+               if (extra == NULL)
                        return -ENOMEM;
-               }
 
                /* If it is a SET, get all the extra data in here */
-               if(IW_IS_SET(cmd) && (iwr->u.data.length != 0)) {
+               if (IW_IS_SET(cmd) && (iwr->u.data.length != 0)) {
                        err = copy_from_user(extra, iwr->u.data.pointer,
                                             extra_size);
                        if (err) {
                                kfree(extra);
                                return -EFAULT;
                        }
-#ifdef WE_IOCTL_DEBUG
-                       printk(KERN_DEBUG "%s (WE) : Got %d elem\n",
-                              dev->name, iwr->u.data.length);
-#endif /* WE_IOCTL_DEBUG */
                }
 
                /* Call the handler */
@@ -1059,10 +991,6 @@ static inline int ioctl_private_call(struct net_device *  dev,
                                           extra_size);
                        if (err)
                                ret =  -EFAULT;
-#ifdef WE_IOCTL_DEBUG
-                       printk(KERN_DEBUG "%s (WE) : Wrote %d elem\n",
-                              dev->name, iwr->u.data.length);
-#endif /* WE_IOCTL_DEBUG */
                }
 
                /* Cleanup - I told you it wasn't that long ;-) */
@@ -1071,7 +999,7 @@ static inline int ioctl_private_call(struct net_device *   dev,
 
 
        /* Call commit handler if needed and defined */
-       if(ret == -EIWCOMMIT)
+       if (ret == -EIWCOMMIT)
                ret = call_commit_handler(dev);
 
        return ret;
@@ -1079,11 +1007,10 @@ static inline int ioctl_private_call(struct net_device *        dev,
 
 /* ---------------------------------------------------------------- */
 /*
- * Main IOCTl dispatcher. Called from the main networking code
- * (dev_ioctl() in net/core/dev.c).
+ * Main IOCTl dispatcher.
  * Check the type of IOCTL and call the appropriate wrapper...
  */
-int wireless_process_ioctl(struct ifreq *ifr, unsigned int cmd)
+static int wireless_process_ioctl(struct ifreq *ifr, unsigned int cmd)
 {
        struct net_device *dev;
        iw_handler      handler;
@@ -1098,789 +1025,54 @@ int wireless_process_ioctl(struct ifreq *ifr, unsigned int cmd)
        /* A bunch of special cases, then the generic case...
         * Note that 'cmd' is already filtered in dev_ioctl() with
         * (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) */
-       switch(cmd)
-       {
-               case SIOCGIWSTATS:
-                       /* Get Wireless Stats */
-                       return ioctl_standard_call(dev,
-                                                  ifr,
-                                                  cmd,
-                                                  &iw_handler_get_iwstats);
-
-               case SIOCGIWPRIV:
-                       /* Check if we have some wireless handlers defined */
-                       if(dev->wireless_handlers != NULL) {
-                               /* We export to user space the definition of
-                                * the private handler ourselves */
-                               return ioctl_standard_call(dev,
-                                                          ifr,
-                                                          cmd,
-                                                          &iw_handler_get_private);
-                       }
-                       // ## Fall-through for old API ##
-               default:
-                       /* Generic IOCTL */
-                       /* Basic check */
-                       if (!netif_device_present(dev))
-                               return -ENODEV;
-                       /* New driver API : try to find the handler */
-                       handler = get_handler(dev, cmd);
-                       if(handler != NULL) {
-                               /* Standard and private are not the same */
-                               if(cmd < SIOCIWFIRSTPRIV)
-                                       return ioctl_standard_call(dev,
-                                                                  ifr,
-                                                                  cmd,
-                                                                  handler);
-                               else
-                                       return ioctl_private_call(dev,
-                                                                 ifr,
-                                                                 cmd,
-                                                                 handler);
-                       }
-                       /* Old driver API : call driver ioctl handler */
-                       if (dev->do_ioctl) {
-                               return dev->do_ioctl(dev, ifr, cmd);
-                       }
-                       return -EOPNOTSUPP;
-       }
-       /* Not reached */
-       return -EINVAL;
-}
-
-/********************** RTNETLINK REQUEST API **********************/
-/*
- * The alternate user space API to configure all those Wireless Extensions
- * is through RtNetlink.
- * This API support only the new driver API (iw_handler).
- *
- * This RtNetlink API use the same query/reply model as the ioctl API.
- * Maximum effort has been done to fit in the RtNetlink model, and
- * we support both RtNetlink Set and RtNelink Get operations.
- * On the other hand, we don't offer Dump operations because of the
- * following reasons :
- *     o Large number of parameters, most optional
- *     o Large size of some parameters (> 100 bytes)
- *     o Each parameters need to be extracted from hardware
- *     o Scan requests can take seconds and disable network activity.
- * Because of this high cost/overhead, we want to return only the
- * parameters the user application is really interested in.
- * We could offer partial Dump using the IW_DESCR_FLAG_DUMP flag.
- *
- * The API uses the standard RtNetlink socket. When the RtNetlink code
- * find a IFLA_WIRELESS field in a RtNetlink SET_LINK request,
- * it calls here.
- */
-
-#ifdef CONFIG_NET_WIRELESS_RTNETLINK
-/* ---------------------------------------------------------------- */
-/*
- * Wrapper to call a standard Wireless Extension GET handler.
- * We do various checks and call the handler with the proper args.
- */
-static int rtnetlink_standard_get(struct net_device *  dev,
-                                 struct iw_event *     request,
-                                 int                   request_len,
-                                 iw_handler            handler,
-                                 char **               p_buf,
-                                 int *                 p_len)
-{
-       const struct iw_ioctl_description *     descr = NULL;
-       unsigned int                            cmd;
-       union iwreq_data *                      wrqu;
-       int                                     hdr_len;
-       struct iw_request_info                  info;
-       char *                                  buffer = NULL;
-       int                                     buffer_size = 0;
-       int                                     ret = -EINVAL;
-
-       /* Get the description of the Request */
-       cmd = request->cmd;
-       if((cmd - SIOCIWFIRST) >= standard_ioctl_num)
-               return -EOPNOTSUPP;
-       descr = &(standard_ioctl[cmd - SIOCIWFIRST]);
-
-#ifdef WE_RTNETLINK_DEBUG
-       printk(KERN_DEBUG "%s (WE.r) : Found standard handler for 0x%04X\n",
-              dev->name, cmd);
-       printk(KERN_DEBUG "%s (WE.r) : Header type : %d, Token type : %d, size : %d, token : %d\n", dev->name, descr->header_type, descr->token_type, descr->token_size, descr->max_tokens);
-#endif /* WE_RTNETLINK_DEBUG */
-
-       /* Check if wrqu is complete */
-       hdr_len = event_type_size[descr->header_type];
-       if(request_len < hdr_len) {
-#ifdef WE_RTNETLINK_DEBUG
-               printk(KERN_DEBUG
-                      "%s (WE.r) : Wireless request too short (%d)\n",
-                      dev->name, request_len);
-#endif /* WE_RTNETLINK_DEBUG */
-               return -EINVAL;
-       }
-
-       /* Prepare the call */
-       info.cmd = cmd;
-       info.flags = 0;
-
-       /* Check if we have extra data in the reply or not */
-       if(descr->header_type != IW_HEADER_TYPE_POINT) {
-
-               /* Create the kernel buffer that we will return.
-                * It's at an offset to match the TYPE_POINT case... */
-               buffer_size = request_len + IW_EV_POINT_OFF;
-               buffer = kmalloc(buffer_size, GFP_KERNEL);
-               if (buffer == NULL) {
-                       return -ENOMEM;
-               }
-               /* Copy event data */
-               memcpy(buffer + IW_EV_POINT_OFF, request, request_len);
-               /* Use our own copy of wrqu */
-               wrqu = (union iwreq_data *) (buffer + IW_EV_POINT_OFF
-                                            + IW_EV_LCP_PK_LEN);
-
-               /* No extra arguments. Trivial to handle */
-               ret = handler(dev, &info, wrqu, NULL);
-
-       } else {
-               union iwreq_data        wrqu_point;
-               char *                  extra = NULL;
-               int                     extra_size = 0;
+       if (cmd == SIOCGIWSTATS)
+               return ioctl_standard_call(dev, ifr, cmd,
+                                          &iw_handler_get_iwstats);
 
-               /* Get a temp copy of wrqu (skip pointer) */
-               memcpy(((char *) &wrqu_point) + IW_EV_POINT_OFF,
-                      ((char *) request) + IW_EV_LCP_PK_LEN,
-                      IW_EV_POINT_LEN - IW_EV_LCP_PK_LEN);
-
-               /* Calculate space needed by arguments. Always allocate
-                * for max space. Easier, and won't last long... */
-               extra_size = descr->max_tokens * descr->token_size;
-               /* Support for very large requests */
-               if((descr->flags & IW_DESCR_FLAG_NOMAX) &&
-                  (wrqu_point.data.length > descr->max_tokens))
-                       extra_size = (wrqu_point.data.length
-                                     * descr->token_size);
-               buffer_size = extra_size + IW_EV_POINT_PK_LEN + IW_EV_POINT_OFF;
-#ifdef WE_RTNETLINK_DEBUG
-               printk(KERN_DEBUG "%s (WE.r) : Malloc %d bytes (%d bytes)\n",
-                      dev->name, extra_size, buffer_size);
-#endif /* WE_RTNETLINK_DEBUG */
-
-               /* Create the kernel buffer that we will return */
-               buffer = kmalloc(buffer_size, GFP_KERNEL);
-               if (buffer == NULL) {
-                       return -ENOMEM;
-               }
-
-               /* Put wrqu in the right place (just before extra).
-                * Leave space for IWE header and dummy pointer...
-                * Note that IW_EV_LCP_PK_LEN==4 bytes, so it's still aligned.
-                */
-               memcpy(buffer + IW_EV_LCP_PK_LEN + IW_EV_POINT_OFF,
-                      ((char *) &wrqu_point) + IW_EV_POINT_OFF,
-                      IW_EV_POINT_PK_LEN - IW_EV_LCP_PK_LEN);
-               wrqu = (union iwreq_data *) (buffer + IW_EV_LCP_PK_LEN);
-
-               /* Extra comes logically after that. Offset +12 bytes. */
-               extra = buffer + IW_EV_POINT_OFF + IW_EV_POINT_PK_LEN;
-
-               /* Call the handler */
-               ret = handler(dev, &info, wrqu, extra);
-
-               /* Calculate real returned length */
-               extra_size = (wrqu->data.length * descr->token_size);
-               /* Re-adjust reply size */
-               request->len = extra_size + IW_EV_POINT_PK_LEN;
-
-               /* Put the iwe header where it should, i.e. scrap the
-                * dummy pointer. */
-               memcpy(buffer + IW_EV_POINT_OFF, request, IW_EV_LCP_PK_LEN);
-
-#ifdef WE_RTNETLINK_DEBUG
-               printk(KERN_DEBUG "%s (WE.r) : Reply 0x%04X, hdr_len %d, tokens %d, extra_size %d, buffer_size %d\n", dev->name, cmd, hdr_len, wrqu->data.length, extra_size, buffer_size);
-#endif /* WE_RTNETLINK_DEBUG */
-
-               /* Check if there is enough buffer up there */
-               if(wrqu_point.data.length < wrqu->data.length)
-                       ret = -E2BIG;
-       }
-
-       /* Return the buffer to the caller */
-       if (!ret) {
-               *p_buf = buffer;
-               *p_len = request->len;
-       } else {
-               /* Cleanup */
-               if(buffer)
-                       kfree(buffer);
-       }
-
-       return ret;
-}
-
-/* ---------------------------------------------------------------- */
-/*
- * Wrapper to call a standard Wireless Extension SET handler.
- * We do various checks and call the handler with the proper args.
- */
-static inline int rtnetlink_standard_set(struct net_device *   dev,
-                                        struct iw_event *      request,
-                                        int                    request_len,
-                                        iw_handler             handler)
-{
-       const struct iw_ioctl_description *     descr = NULL;
-       unsigned int                            cmd;
-       union iwreq_data *                      wrqu;
-       union iwreq_data                        wrqu_point;
-       int                                     hdr_len;
-       char *                                  extra = NULL;
-       int                                     extra_size = 0;
-       struct iw_request_info                  info;
-       int                                     ret = -EINVAL;
-
-       /* Get the description of the Request */
-       cmd = request->cmd;
-       if((cmd - SIOCIWFIRST) >= standard_ioctl_num)
-               return -EOPNOTSUPP;
-       descr = &(standard_ioctl[cmd - SIOCIWFIRST]);
-
-#ifdef WE_RTNETLINK_DEBUG
-       printk(KERN_DEBUG "%s (WE.r) : Found standard SET handler for 0x%04X\n",
-              dev->name, cmd);
-       printk(KERN_DEBUG "%s (WE.r) : Header type : %d, Token type : %d, size : %d, token : %d\n", dev->name, descr->header_type, descr->token_type, descr->token_size, descr->max_tokens);
-#endif /* WE_RTNETLINK_DEBUG */
-
-       /* Extract fixed header from request. This is properly aligned. */
-       wrqu = (union iwreq_data *) (((char *) request) + IW_EV_LCP_PK_LEN);
-
-       /* Check if wrqu is complete */
-       hdr_len = event_type_pk_size[descr->header_type];
-       if(request_len < hdr_len) {
-#ifdef WE_RTNETLINK_DEBUG
-               printk(KERN_DEBUG
-                      "%s (WE.r) : Wireless request too short (%d)\n",
-                      dev->name, request_len);
-#endif /* WE_RTNETLINK_DEBUG */
-               return -EINVAL;
-       }
-
-       /* Prepare the call */
-       info.cmd = cmd;
-       info.flags = 0;
-
-       /* Check if we have extra data in the request or not */
-       if(descr->header_type != IW_HEADER_TYPE_POINT) {
-
-               /* No extra arguments. Trivial to handle */
-               ret = handler(dev, &info, wrqu, NULL);
-
-       } else {
-               int     extra_len;
-
-               /* Put wrqu in the right place (skip pointer) */
-               memcpy(((char *) &wrqu_point) + IW_EV_POINT_OFF,
-                      wrqu, IW_EV_POINT_PK_LEN - IW_EV_LCP_PK_LEN);
-               /* Don't forget about the event code... */
-               wrqu = &wrqu_point;
-
-               /* Check if number of token fits within bounds */
-               if(wrqu_point.data.length > descr->max_tokens)
-                       return -E2BIG;
-               if(wrqu_point.data.length < descr->min_tokens)
-                       return -EINVAL;
-
-               /* Real length of payload */
-               extra_len = wrqu_point.data.length * descr->token_size;
-
-               /* Check if request is self consistent */
-               if((request_len - hdr_len) < extra_len) {
-#ifdef WE_RTNETLINK_DEBUG
-                       printk(KERN_DEBUG "%s (WE.r) : Wireless request data too short (%d)\n",
-                              dev->name, extra_size);
-#endif /* WE_RTNETLINK_DEBUG */
-                       return -EINVAL;
-               }
-
-#ifdef WE_RTNETLINK_DEBUG
-               printk(KERN_DEBUG "%s (WE.r) : Malloc %d bytes\n",
-                      dev->name, extra_size);
-#endif /* WE_RTNETLINK_DEBUG */
-
-               /* Always allocate for max space. Easier, and won't last
-                * long... */
-               extra_size = descr->max_tokens * descr->token_size;
-               extra = kmalloc(extra_size, GFP_KERNEL);
-               if (extra == NULL)
-                       return -ENOMEM;
-
-               /* Copy extra in aligned buffer */
-               memcpy(extra, ((char *) request) + hdr_len, extra_len);
-
-               /* Call the handler */
-               ret = handler(dev, &info, &wrqu_point, extra);
-       }
-
-#ifdef WE_SET_EVENT
-       /* Generate an event to notify listeners of the change */
-       if((descr->flags & IW_DESCR_FLAG_EVENT) &&
-          ((ret == 0) || (ret == -EIWCOMMIT))) {
-               if(descr->flags & IW_DESCR_FLAG_RESTRICT)
-                       /* If the event is restricted, don't
-                        * export the payload */
-                       wireless_send_event(dev, cmd, wrqu, NULL);
-               else
-                       wireless_send_event(dev, cmd, wrqu, extra);
-       }
-#endif /* WE_SET_EVENT */
-
-       /* Cleanup - I told you it wasn't that long ;-) */
-       if(extra)
-               kfree(extra);
-
-       /* Call commit handler if needed and defined */
-       if(ret == -EIWCOMMIT)
-               ret = call_commit_handler(dev);
-
-       return ret;
-}
-
-/* ---------------------------------------------------------------- */
-/*
- * Wrapper to call a private Wireless Extension GET handler.
- * Same as above...
- * It's not as nice and slimline as the standard wrapper. The cause
- * is struct iw_priv_args, which was not really designed for the
- * job we are going here.
- *
- * IMPORTANT : This function prevent to set and get data on the same
- * IOCTL and enforce the SET/GET convention. Not doing it would be
- * far too hairy...
- * If you need to set and get data at the same time, please don't use
- * a iw_handler but process it in your ioctl handler (i.e. use the
- * old driver API).
- */
-static inline int rtnetlink_private_get(struct net_device *    dev,
-                                       struct iw_event *       request,
-                                       int                     request_len,
-                                       iw_handler              handler,
-                                       char **                 p_buf,
-                                       int *                   p_len)
-{
-       const struct iw_priv_args *     descr = NULL;
-       unsigned int                    cmd;
-       union iwreq_data *              wrqu;
-       int                             hdr_len;
-       struct iw_request_info          info;
-       int                             extra_size = 0;
-       int                             i;
-       char *                          buffer = NULL;
-       int                             buffer_size = 0;
-       int                             ret = -EINVAL;
-
-       /* Get the description of the Request */
-       cmd = request->cmd;
-       for(i = 0; i < dev->wireless_handlers->num_private_args; i++)
-               if(cmd == dev->wireless_handlers->private_args[i].cmd) {
-                       descr = &(dev->wireless_handlers->private_args[i]);
-                       break;
-               }
-       if(descr == NULL)
-               return -EOPNOTSUPP;
-
-#ifdef WE_RTNETLINK_DEBUG
-       printk(KERN_DEBUG "%s (WE.r) : Found private handler for 0x%04X\n",
-              dev->name, cmd);
-       printk(KERN_DEBUG "%s (WE.r) : Name %s, set %X, get %X\n",
-              dev->name, descr->name, descr->set_args, descr->get_args);
-#endif /* WE_RTNETLINK_DEBUG */
-
-       /* Compute the max size of the get arguments */
-       extra_size = get_priv_size(descr->get_args);
-
-       /* Does it fits in wrqu ? */
-       if((descr->get_args & IW_PRIV_SIZE_FIXED) &&
-          (extra_size <= IFNAMSIZ)) {
-               hdr_len = extra_size;
-               extra_size = 0;
-       } else {
-               hdr_len = IW_EV_POINT_PK_LEN;
-       }
-
-       /* Check if wrqu is complete */
-       if(request_len < hdr_len) {
-#ifdef WE_RTNETLINK_DEBUG
-               printk(KERN_DEBUG
-                      "%s (WE.r) : Wireless request too short (%d)\n",
-                      dev->name, request_len);
-#endif /* WE_RTNETLINK_DEBUG */
-               return -EINVAL;
-       }
-
-       /* Prepare the call */
-       info.cmd = cmd;
-       info.flags = 0;
-
-       /* Check if we have a pointer to user space data or not. */
-       if(extra_size == 0) {
-
-               /* Create the kernel buffer that we will return.
-                * It's at an offset to match the TYPE_POINT case... */
-               buffer_size = request_len + IW_EV_POINT_OFF;
-               buffer = kmalloc(buffer_size, GFP_KERNEL);
-               if (buffer == NULL) {
-                       return -ENOMEM;
-               }
-               /* Copy event data */
-               memcpy(buffer + IW_EV_POINT_OFF, request, request_len);
-               /* Use our own copy of wrqu */
-               wrqu = (union iwreq_data *) (buffer + IW_EV_POINT_OFF
-                                            + IW_EV_LCP_PK_LEN);
-
-               /* No extra arguments. Trivial to handle */
-               ret = handler(dev, &info, wrqu, (char *) wrqu);
-
-       } else {
-               char *  extra;
-
-               /* Buffer for full reply */
-               buffer_size = extra_size + IW_EV_POINT_PK_LEN + IW_EV_POINT_OFF;
-
-#ifdef WE_RTNETLINK_DEBUG
-               printk(KERN_DEBUG "%s (WE.r) : Malloc %d bytes (%d bytes)\n",
-                      dev->name, extra_size, buffer_size);
-#endif /* WE_RTNETLINK_DEBUG */
-
-               /* Create the kernel buffer that we will return */
-               buffer = kmalloc(buffer_size, GFP_KERNEL);
-               if (buffer == NULL) {
-                       return -ENOMEM;
-               }
-
-               /* Put wrqu in the right place (just before extra).
-                * Leave space for IWE header and dummy pointer...
-                * Note that IW_EV_LCP_PK_LEN==4 bytes, so it's still aligned.
-                */
-               memcpy(buffer + IW_EV_LCP_PK_LEN + IW_EV_POINT_OFF,
-                      ((char *) request) + IW_EV_LCP_PK_LEN,
-                      IW_EV_POINT_PK_LEN - IW_EV_LCP_PK_LEN);
-               wrqu = (union iwreq_data *) (buffer + IW_EV_LCP_PK_LEN);
-
-               /* Extra comes logically after that. Offset +12 bytes. */
-               extra = buffer + IW_EV_POINT_OFF + IW_EV_POINT_PK_LEN;
-
-               /* Call the handler */
-               ret = handler(dev, &info, wrqu, extra);
-
-               /* Adjust for the actual length if it's variable,
-                * avoid leaking kernel bits outside. */
-               if (!(descr->get_args & IW_PRIV_SIZE_FIXED))
-                       extra_size = adjust_priv_size(descr->get_args, wrqu);
-               /* Re-adjust reply size */
-               request->len = extra_size + IW_EV_POINT_PK_LEN;
-
-               /* Put the iwe header where it should, i.e. scrap the
-                * dummy pointer. */
-               memcpy(buffer + IW_EV_POINT_OFF, request, IW_EV_LCP_PK_LEN);
-
-#ifdef WE_RTNETLINK_DEBUG
-               printk(KERN_DEBUG "%s (WE.r) : Reply 0x%04X, hdr_len %d, tokens %d, extra_size %d, buffer_size %d\n", dev->name, cmd, hdr_len, wrqu->data.length, extra_size, buffer_size);
-#endif /* WE_RTNETLINK_DEBUG */
-       }
-
-       /* Return the buffer to the caller */
-       if (!ret) {
-               *p_buf = buffer;
-               *p_len = request->len;
-       } else {
-               /* Cleanup */
-               if(buffer)
-                       kfree(buffer);
-       }
-
-       return ret;
-}
-
-/* ---------------------------------------------------------------- */
-/*
- * Wrapper to call a private Wireless Extension SET handler.
- * Same as above...
- * It's not as nice and slimline as the standard wrapper. The cause
- * is struct iw_priv_args, which was not really designed for the
- * job we are going here.
- *
- * IMPORTANT : This function prevent to set and get data on the same
- * IOCTL and enforce the SET/GET convention. Not doing it would be
- * far too hairy...
- * If you need to set and get data at the same time, please don't use
- * a iw_handler but process it in your ioctl handler (i.e. use the
- * old driver API).
- */
-static inline int rtnetlink_private_set(struct net_device *    dev,
-                                       struct iw_event *       request,
-                                       int                     request_len,
-                                       iw_handler              handler)
-{
-       const struct iw_priv_args *     descr = NULL;
-       unsigned int                    cmd;
-       union iwreq_data *              wrqu;
-       union iwreq_data                wrqu_point;
-       int                             hdr_len;
-       char *                          extra = NULL;
-       int                             extra_size = 0;
-       int                             offset = 0;     /* For sub-ioctls */
-       struct iw_request_info          info;
-       int                             i;
-       int                             ret = -EINVAL;
-
-       /* Get the description of the Request */
-       cmd = request->cmd;
-       for(i = 0; i < dev->wireless_handlers->num_private_args; i++)
-               if(cmd == dev->wireless_handlers->private_args[i].cmd) {
-                       descr = &(dev->wireless_handlers->private_args[i]);
-                       break;
-               }
-       if(descr == NULL)
-               return -EOPNOTSUPP;
-
-#ifdef WE_RTNETLINK_DEBUG
-       printk(KERN_DEBUG "%s (WE.r) : Found private handler for 0x%04X\n",
-              ifr->ifr_name, cmd);
-       printk(KERN_DEBUG "%s (WE.r) : Name %s, set %X, get %X\n",
-              dev->name, descr->name, descr->set_args, descr->get_args);
-#endif /* WE_RTNETLINK_DEBUG */
-
-       /* Compute the size of the set arguments */
-       /* Check for sub-ioctl handler */
-       if(descr->name[0] == '\0')
-               /* Reserve one int for sub-ioctl index */
-               offset = sizeof(__u32);
-
-       /* Size of set arguments */
-       extra_size = get_priv_size(descr->set_args);
-
-       /* Does it fits in wrqu ? */
-       if((descr->set_args & IW_PRIV_SIZE_FIXED) &&
-          (extra_size <= IFNAMSIZ)) {
-               hdr_len = IW_EV_LCP_PK_LEN + extra_size;
-               extra_size = 0;
-       } else {
-               hdr_len = IW_EV_POINT_PK_LEN;
-       }
-
-       /* Extract fixed header from request. This is properly aligned. */
-       wrqu = (union iwreq_data *) (((char *) request) + IW_EV_LCP_PK_LEN);
-
-       /* Check if wrqu is complete */
-       if(request_len < hdr_len) {
-#ifdef WE_RTNETLINK_DEBUG
-               printk(KERN_DEBUG
-                      "%s (WE.r) : Wireless request too short (%d)\n",
-                      dev->name, request_len);
-#endif /* WE_RTNETLINK_DEBUG */
-               return -EINVAL;
-       }
-
-       /* Prepare the call */
-       info.cmd = cmd;
-       info.flags = 0;
-
-       /* Check if we have a pointer to user space data or not. */
-       if(extra_size == 0) {
-
-               /* No extra arguments. Trivial to handle */
-               ret = handler(dev, &info, wrqu, (char *) wrqu);
-
-       } else {
-               int     extra_len;
-
-               /* Put wrqu in the right place (skip pointer) */
-               memcpy(((char *) &wrqu_point) + IW_EV_POINT_OFF,
-                      wrqu, IW_EV_POINT_PK_LEN - IW_EV_LCP_PK_LEN);
-
-               /* Does it fits within bounds ? */
-               if(wrqu_point.data.length > (descr->set_args &
-                                            IW_PRIV_SIZE_MASK))
-                       return -E2BIG;
-
-               /* Real length of payload */
-               extra_len = adjust_priv_size(descr->set_args, &wrqu_point);
-
-               /* Check if request is self consistent */
-               if((request_len - hdr_len) < extra_len) {
-#ifdef WE_RTNETLINK_DEBUG
-                       printk(KERN_DEBUG "%s (WE.r) : Wireless request data too short (%d)\n",
-                              dev->name, extra_size);
-#endif /* WE_RTNETLINK_DEBUG */
-                       return -EINVAL;
-               }
-
-#ifdef WE_RTNETLINK_DEBUG
-               printk(KERN_DEBUG "%s (WE.r) : Malloc %d bytes\n",
-                      dev->name, extra_size);
-#endif /* WE_RTNETLINK_DEBUG */
-
-               /* Always allocate for max space. Easier, and won't last
-                * long... */
-               extra = kmalloc(extra_size, GFP_KERNEL);
-               if (extra == NULL)
-                       return -ENOMEM;
-
-               /* Copy extra in aligned buffer */
-               memcpy(extra, ((char *) request) + hdr_len, extra_len);
-
-               /* Call the handler */
-               ret = handler(dev, &info, &wrqu_point, extra);
-
-               /* Cleanup - I told you it wasn't that long ;-) */
-               kfree(extra);
-       }
-
-       /* Call commit handler if needed and defined */
-       if(ret == -EIWCOMMIT)
-               ret = call_commit_handler(dev);
-
-       return ret;
-}
-
-/* ---------------------------------------------------------------- */
-/*
- * Main RtNetlink dispatcher. Called from the main networking code
- * (do_getlink() in net/core/rtnetlink.c).
- * Check the type of Request and call the appropriate wrapper...
- */
-int wireless_rtnetlink_get(struct net_device * dev,
-                          char *               data,
-                          int                  len,
-                          char **              p_buf,
-                          int *                p_len)
-{
-       struct iw_event *       request = (struct iw_event *) data;
-       iw_handler              handler;
-
-       /* Check length */
-       if(len < IW_EV_LCP_PK_LEN) {
-               printk(KERN_DEBUG "%s (WE.r) : RtNetlink request too short (%d)\n",
-                      dev->name, len);
-               return -EINVAL;
-       }
-
-       /* ReCheck length (len may have padding) */
-       if(request->len > len) {
-               printk(KERN_DEBUG "%s (WE.r) : RtNetlink request len invalid (%d-%d)\n",
-                      dev->name, request->len, len);
-               return -EINVAL;
-       }
-
-       /* Only accept GET requests in here */
-       if(!IW_IS_GET(request->cmd))
-               return -EOPNOTSUPP;
-
-       /* If command is `get the encoding parameters', check if
-        * the user has the right to do it */
-       if (request->cmd == SIOCGIWENCODE ||
-           request->cmd == SIOCGIWENCODEEXT) {
-               if (!capable(CAP_NET_ADMIN))
-                       return -EPERM;
-       }
-
-       /* Special cases */
-       if(request->cmd == SIOCGIWSTATS)
-               /* Get Wireless Stats */
-               return rtnetlink_standard_get(dev,
-                                             request,
-                                             request->len,
-                                             &iw_handler_get_iwstats,
-                                             p_buf, p_len);
-       if(request->cmd == SIOCGIWPRIV) {
-               /* Check if we have some wireless handlers defined */
-               if(dev->wireless_handlers == NULL)
-                       return -EOPNOTSUPP;
-               /* Get Wireless Stats */
-               return rtnetlink_standard_get(dev,
-                                             request,
-                                             request->len,
-                                             &iw_handler_get_private,
-                                             p_buf, p_len);
-       }
+       if (cmd == SIOCGIWPRIV && dev->wireless_handlers)
+               return ioctl_standard_call(dev, ifr, cmd,
+                                          &iw_handler_get_private);
 
        /* Basic check */
        if (!netif_device_present(dev))
                return -ENODEV;
 
-       /* Try to find the handler */
-       handler = get_handler(dev, request->cmd);
-       if(handler != NULL) {
+       /* New driver API : try to find the handler */
+       handler = get_handler(dev, cmd);
+       if (handler) {
                /* Standard and private are not the same */
-               if(request->cmd < SIOCIWFIRSTPRIV)
-                       return rtnetlink_standard_get(dev,
-                                                     request,
-                                                     request->len,
-                                                     handler,
-                                                     p_buf, p_len);
+               if (cmd < SIOCIWFIRSTPRIV)
+                       return ioctl_standard_call(dev, ifr, cmd, handler);
                else
-                       return rtnetlink_private_get(dev,
-                                                    request,
-                                                    request->len,
-                                                    handler,
-                                                    p_buf, p_len);
+                       return ioctl_private_call(dev, ifr, cmd, handler);
        }
-
+       /* Old driver API : call driver ioctl handler */
+       if (dev->do_ioctl)
+               return dev->do_ioctl(dev, ifr, cmd);
        return -EOPNOTSUPP;
 }
 
-/* ---------------------------------------------------------------- */
-/*
- * Main RtNetlink dispatcher. Called from the main networking code
- * (do_setlink() in net/core/rtnetlink.c).
- * Check the type of Request and call the appropriate wrapper...
- */
-int wireless_rtnetlink_set(struct net_device * dev,
-                          char *               data,
-                          int                  len)
+/* entry point from dev ioctl */
+int wext_handle_ioctl(struct ifreq *ifr, unsigned int cmd,
+                     void __user *arg)
 {
-       struct iw_event *       request = (struct iw_event *) data;
-       iw_handler              handler;
-
-       /* Check length */
-       if(len < IW_EV_LCP_PK_LEN) {
-               printk(KERN_DEBUG "%s (WE.r) : RtNetlink request too short (%d)\n",
-                      dev->name, len);
-               return -EINVAL;
-       }
-
-       /* ReCheck length (len may have padding) */
-       if(request->len > len) {
-               printk(KERN_DEBUG "%s (WE.r) : RtNetlink request len invalid (%d-%d)\n",
-                      dev->name, request->len, len);
-               return -EINVAL;
-       }
-
-       /* Only accept SET requests in here */
-       if(!IW_IS_SET(request->cmd))
-               return -EOPNOTSUPP;
-
-       /* Basic check */
-       if (!netif_device_present(dev))
-               return -ENODEV;
+       int ret;
 
-       /* New driver API : try to find the handler */
-       handler = get_handler(dev, request->cmd);
-       if(handler != NULL) {
-               /* Standard and private are not the same */
-               if(request->cmd < SIOCIWFIRSTPRIV)
-                       return rtnetlink_standard_set(dev,
-                                                     request,
-                                                     request->len,
-                                                     handler);
-               else
-                       return rtnetlink_private_set(dev,
-                                                    request,
-                                                    request->len,
-                                                    handler);
-       }
-
-       return -EOPNOTSUPP;
+       /* If command is `set a parameter', or
+        * `get the encoding parameters', check if
+        * the user has the right to do it */
+       if ((IW_IS_SET(cmd) || cmd == SIOCGIWENCODE || cmd == SIOCGIWENCODEEXT)
+           && !capable(CAP_NET_ADMIN))
+               return -EPERM;
+
+       dev_load(ifr->ifr_name);
+       rtnl_lock();
+       ret = wireless_process_ioctl(ifr, cmd);
+       rtnl_unlock();
+       if (IW_IS_GET(cmd) && copy_to_user(arg, ifr, sizeof(struct ifreq)))
+               return -EFAULT;
+       return ret;
 }
-#endif /* CONFIG_NET_WIRELESS_RTNETLINK */
-
 
 /************************* EVENT PROCESSING *************************/
 /*
@@ -1888,7 +1080,6 @@ int wireless_rtnetlink_set(struct net_device *    dev,
  * Most often, the event will be propagated through rtnetlink
  */
 
-#ifdef WE_EVENT_RTNETLINK
 /* ---------------------------------------------------------------- */
 /*
  * Locking...
@@ -1933,15 +1124,12 @@ static DECLARE_TASKLET(wireless_nlevent_tasklet, wireless_nlevent_process, 0);
  * current wireless config. Dumping the wireless config is far too
  * expensive (for each parameter, the driver need to query the hardware).
  */
-static inline int rtnetlink_fill_iwinfo(struct sk_buff *       skb,
-                                       struct net_device *     dev,
-                                       int                     type,
-                                       char *                  event,
-                                       int                     event_len)
+static int rtnetlink_fill_iwinfo(struct sk_buff *skb, struct net_device *dev,
+                                int type, char *event, int event_len)
 {
        struct ifinfomsg *r;
        struct nlmsghdr  *nlh;
-       unsigned char    *b = skb->tail;
+       unsigned char    *b = skb_tail_pointer(skb);
 
        nlh = NLMSG_PUT(skb, 0, 0, type, sizeof(*r));
        r = NLMSG_DATA(nlh);
@@ -1955,12 +1143,12 @@ static inline int rtnetlink_fill_iwinfo(struct sk_buff *        skb,
        /* Add the wireless events in the netlink packet */
        RTA_PUT(skb, IFLA_WIRELESS, event_len, event);
 
-       nlh->nlmsg_len = skb->tail - b;
+       nlh->nlmsg_len = skb_tail_pointer(skb) - b;
        return skb->len;
 
 nlmsg_failure:
 rtattr_failure:
-       skb_trim(skb, b - skb->data);
+       nlmsg_trim(skb, b);
        return -1;
 }
 
@@ -1971,9 +1159,7 @@ rtattr_failure:
  * Andrzej Krzysztofowicz mandated that I used a IFLA_XXX field
  * within a RTM_NEWLINK event.
  */
-static inline void rtmsg_iwinfo(struct net_device *    dev,
-                               char *                  event,
-                               int                     event_len)
+static void rtmsg_iwinfo(struct net_device *dev, char *event, int event_len)
 {
        struct sk_buff *skb;
        int size = NLMSG_GOODSIZE;
@@ -1992,8 +1178,6 @@ static inline void rtmsg_iwinfo(struct net_device *       dev,
        tasklet_schedule(&wireless_nlevent_tasklet);
 }
 
-#endif /* WE_EVENT_RTNETLINK */
-
 /* ---------------------------------------------------------------- */
 /*
  * Main event dispatcher. Called from other parts and drivers.
@@ -2015,17 +1199,17 @@ void wireless_send_event(struct net_device *    dev,
        unsigned        cmd_index;              /* *MUST* be unsigned */
 
        /* Get the description of the Event */
-       if(cmd <= SIOCIWLAST) {
+       if (cmd <= SIOCIWLAST) {
                cmd_index = cmd - SIOCIWFIRST;
-               if(cmd_index < standard_ioctl_num)
+               if (cmd_index < standard_ioctl_num)
                        descr = &(standard_ioctl[cmd_index]);
        } else {
                cmd_index = cmd - IWEVFIRST;
-               if(cmd_index < standard_event_num)
+               if (cmd_index < standard_event_num)
                        descr = &(standard_event[cmd_index]);
        }
        /* Don't accept unknown events */
-       if(descr == NULL) {
+       if (descr == NULL) {
                /* Note : we don't return an error to the driver, because
                 * the driver would not know what to do about it. It can't
                 * return an error to the user, because the event is not
@@ -2037,63 +1221,50 @@ void wireless_send_event(struct net_device *    dev,
                       dev->name, cmd);
                return;
        }
-#ifdef WE_EVENT_DEBUG
-       printk(KERN_DEBUG "%s (WE) : Got event 0x%04X\n",
-              dev->name, cmd);
-       printk(KERN_DEBUG "%s (WE) : Header type : %d, Token type : %d, size : %d, token : %d\n", dev->name, descr->header_type, descr->token_type, descr->token_size, descr->max_tokens);
-#endif /* WE_EVENT_DEBUG */
 
        /* Check extra parameters and set extra_len */
-       if(descr->header_type == IW_HEADER_TYPE_POINT) {
+       if (descr->header_type == IW_HEADER_TYPE_POINT) {
                /* Check if number of token fits within bounds */
-               if(wrqu->data.length > descr->max_tokens) {
+               if (wrqu->data.length > descr->max_tokens) {
                        printk(KERN_ERR "%s (WE) : Wireless Event too big (%d)\n", dev->name, wrqu->data.length);
                        return;
                }
-               if(wrqu->data.length < descr->min_tokens) {
+               if (wrqu->data.length < descr->min_tokens) {
                        printk(KERN_ERR "%s (WE) : Wireless Event too small (%d)\n", dev->name, wrqu->data.length);
                        return;
                }
                /* Calculate extra_len - extra is NULL for restricted events */
-               if(extra != NULL)
+               if (extra != NULL)
                        extra_len = wrqu->data.length * descr->token_size;
                /* Always at an offset in wrqu */
                wrqu_off = IW_EV_POINT_OFF;
-#ifdef WE_EVENT_DEBUG
-               printk(KERN_DEBUG "%s (WE) : Event 0x%04X, tokens %d, extra_len %d\n", dev->name, cmd, wrqu->data.length, extra_len);
-#endif /* WE_EVENT_DEBUG */
        }
 
        /* Total length of the event */
        hdr_len = event_type_size[descr->header_type];
        event_len = hdr_len + extra_len;
 
-#ifdef WE_EVENT_DEBUG
-       printk(KERN_DEBUG "%s (WE) : Event 0x%04X, hdr_len %d, wrqu_off %d, event_len %d\n", dev->name, cmd, hdr_len, wrqu_off, event_len);
-#endif /* WE_EVENT_DEBUG */
-
        /* Create temporary buffer to hold the event */
        event = kmalloc(event_len, GFP_ATOMIC);
-       if(event == NULL)
+       if (event == NULL)
                return;
 
        /* Fill event */
        event->len = event_len;
        event->cmd = cmd;
        memcpy(&event->u, ((char *) wrqu) + wrqu_off, hdr_len - IW_EV_LCP_LEN);
-       if(extra != NULL)
+       if (extra)
                memcpy(((char *) event) + hdr_len, extra, extra_len);
 
-#ifdef WE_EVENT_RTNETLINK
        /* Send via the RtNetlink event channel */
        rtmsg_iwinfo(dev, (char *) event, event_len);
-#endif /* WE_EVENT_RTNETLINK */
 
        /* Cleanup */
        kfree(event);
 
        return;         /* Always success, I guess ;-) */
 }
+EXPORT_SYMBOL(wireless_send_event);
 
 /********************** ENHANCED IWSPY SUPPORT **********************/
 /*
@@ -2113,11 +1284,11 @@ void wireless_send_event(struct net_device *    dev,
  * Because this is called on the Rx path via wireless_spy_update(),
  * we want it to be efficient...
  */
-static inline struct iw_spy_data * get_spydata(struct net_device *dev)
+static inline struct iw_spy_data *get_spydata(struct net_device *dev)
 {
        /* This is the new way */
-       if(dev->wireless_data)
-               return(dev->wireless_data->spy_data);
+       if (dev->wireless_data)
+               return dev->wireless_data->spy_data;
        return NULL;
 }
 
@@ -2134,7 +1305,7 @@ int iw_handler_set_spy(struct net_device *        dev,
        struct sockaddr *       address = (struct sockaddr *) extra;
 
        /* Make sure driver is not buggy or using the old API */
-       if(!spydata)
+       if (!spydata)
                return -EOPNOTSUPP;
 
        /* Disable spy collection while we copy the addresses.
@@ -2151,29 +1322,16 @@ int iw_handler_set_spy(struct net_device *      dev,
        smp_wmb();
 
        /* Are there are addresses to copy? */
-       if(wrqu->data.length > 0) {
+       if (wrqu->data.length > 0) {
                int i;
 
                /* Copy addresses */
-               for(i = 0; i < wrqu->data.length; i++)
+               for (i = 0; i < wrqu->data.length; i++)
                        memcpy(spydata->spy_address[i], address[i].sa_data,
                               ETH_ALEN);
                /* Reset stats */
                memset(spydata->spy_stat, 0,
                       sizeof(struct iw_quality) * IW_MAX_SPY);
-
-#ifdef WE_SPY_DEBUG
-               printk(KERN_DEBUG "iw_handler_set_spy() :  wireless_data %p, spydata %p, num %d\n", dev->wireless_data, spydata, wrqu->data.length);
-               for (i = 0; i < wrqu->data.length; i++)
-                       printk(KERN_DEBUG
-                              "%02X:%02X:%02X:%02X:%02X:%02X \n",
-                              spydata->spy_address[i][0],
-                              spydata->spy_address[i][1],
-                              spydata->spy_address[i][2],
-                              spydata->spy_address[i][3],
-                              spydata->spy_address[i][4],
-                              spydata->spy_address[i][5]);
-#endif /* WE_SPY_DEBUG */
        }
 
        /* Make sure above is updated before re-enabling */
@@ -2184,6 +1342,7 @@ int iw_handler_set_spy(struct net_device *        dev,
 
        return 0;
 }
+EXPORT_SYMBOL(iw_handler_set_spy);
 
 /*------------------------------------------------------------------*/
 /*
@@ -2199,26 +1358,27 @@ int iw_handler_get_spy(struct net_device *      dev,
        int                     i;
 
        /* Make sure driver is not buggy or using the old API */
-       if(!spydata)
+       if (!spydata)
                return -EOPNOTSUPP;
 
        wrqu->data.length = spydata->spy_number;
 
        /* Copy addresses. */
-       for(i = 0; i < spydata->spy_number; i++)        {
+       for (i = 0; i < spydata->spy_number; i++)       {
                memcpy(address[i].sa_data, spydata->spy_address[i], ETH_ALEN);
                address[i].sa_family = AF_UNIX;
        }
        /* Copy stats to the user buffer (just after). */
-       if(spydata->spy_number > 0)
+       if (spydata->spy_number > 0)
                memcpy(extra  + (sizeof(struct sockaddr) *spydata->spy_number),
                       spydata->spy_stat,
                       sizeof(struct iw_quality) * spydata->spy_number);
        /* Reset updated flags. */
-       for(i = 0; i < spydata->spy_number; i++)
+       for (i = 0; i < spydata->spy_number; i++)
                spydata->spy_stat[i].updated &= ~IW_QUAL_ALL_UPDATED;
        return 0;
 }
+EXPORT_SYMBOL(iw_handler_get_spy);
 
 /*------------------------------------------------------------------*/
 /*
@@ -2233,7 +1393,7 @@ int iw_handler_set_thrspy(struct net_device *     dev,
        struct iw_thrspy *      threshold = (struct iw_thrspy *) extra;
 
        /* Make sure driver is not buggy or using the old API */
-       if(!spydata)
+       if (!spydata)
                return -EOPNOTSUPP;
 
        /* Just do it */
@@ -2243,12 +1403,9 @@ int iw_handler_set_thrspy(struct net_device *    dev,
        /* Clear flag */
        memset(spydata->spy_thr_under, '\0', sizeof(spydata->spy_thr_under));
 
-#ifdef WE_SPY_DEBUG
-       printk(KERN_DEBUG "iw_handler_set_thrspy() :  low %d ; high %d\n", spydata->spy_thr_low.level, spydata->spy_thr_high.level);
-#endif /* WE_SPY_DEBUG */
-
        return 0;
 }
+EXPORT_SYMBOL(iw_handler_set_thrspy);
 
 /*------------------------------------------------------------------*/
 /*
@@ -2263,7 +1420,7 @@ int iw_handler_get_thrspy(struct net_device *     dev,
        struct iw_thrspy *      threshold = (struct iw_thrspy *) extra;
 
        /* Make sure driver is not buggy or using the old API */
-       if(!spydata)
+       if (!spydata)
                return -EOPNOTSUPP;
 
        /* Just do it */
@@ -2272,6 +1429,7 @@ int iw_handler_get_thrspy(struct net_device *     dev,
 
        return 0;
 }
+EXPORT_SYMBOL(iw_handler_get_thrspy);
 
 /*------------------------------------------------------------------*/
 /*
@@ -2297,16 +1455,6 @@ static void iw_send_thrspy_event(struct net_device *     dev,
        memcpy(&(threshold.low), &(spydata->spy_thr_low),
               2 * sizeof(struct iw_quality));
 
-#ifdef WE_SPY_DEBUG
-       printk(KERN_DEBUG "iw_send_thrspy_event() : address %02X:%02X:%02X:%02X:%02X:%02X, level %d, up = %d\n",
-              threshold.addr.sa_data[0],
-              threshold.addr.sa_data[1],
-              threshold.addr.sa_data[2],
-              threshold.addr.sa_data[3],
-              threshold.addr.sa_data[4],
-              threshold.addr.sa_data[5], threshold.qual.level);
-#endif /* WE_SPY_DEBUG */
-
        /* Send event to user space */
        wireless_send_event(dev, SIOCGIWTHRSPY, &wrqu, (char *) &threshold);
 }
@@ -2327,16 +1475,12 @@ void wireless_spy_update(struct net_device *    dev,
        int                     match = -1;
 
        /* Make sure driver is not buggy or using the old API */
-       if(!spydata)
+       if (!spydata)
                return;
 
-#ifdef WE_SPY_DEBUG
-       printk(KERN_DEBUG "wireless_spy_update() :  wireless_data %p, spydata %p, address %02X:%02X:%02X:%02X:%02X:%02X\n", dev->wireless_data, spydata, address[0], address[1], address[2], address[3], address[4], address[5]);
-#endif /* WE_SPY_DEBUG */
-
        /* Update all records that match */
-       for(i = 0; i < spydata->spy_number; i++)
-               if(!compare_ether_addr(address, spydata->spy_address[i])) {
+       for (i = 0; i < spydata->spy_number; i++)
+               if (!compare_ether_addr(address, spydata->spy_address[i])) {
                        memcpy(&(spydata->spy_stat[i]), wstats,
                               sizeof(struct iw_quality));
                        match = i;
@@ -2346,15 +1490,15 @@ void wireless_spy_update(struct net_device *    dev,
         * To avoid event storms, we have a simple hysteresis : we generate
         * event only when we go under the low threshold or above the
         * high threshold. */
-       if(match >= 0) {
-               if(spydata->spy_thr_under[match]) {
-                       if(wstats->level > spydata->spy_thr_high.level) {
+       if (match >= 0) {
+               if (spydata->spy_thr_under[match]) {
+                       if (wstats->level > spydata->spy_thr_high.level) {
                                spydata->spy_thr_under[match] = 0;
                                iw_send_thrspy_event(dev, spydata,
                                                     address, wstats);
                        }
                } else {
-                       if(wstats->level < spydata->spy_thr_low.level) {
+                       if (wstats->level < spydata->spy_thr_low.level) {
                                spydata->spy_thr_under[match] = 1;
                                iw_send_thrspy_event(dev, spydata,
                                                     address, wstats);
@@ -2362,10 +1506,4 @@ void wireless_spy_update(struct net_device *     dev,
                }
        }
 }
-
-EXPORT_SYMBOL(iw_handler_get_spy);
-EXPORT_SYMBOL(iw_handler_get_thrspy);
-EXPORT_SYMBOL(iw_handler_set_spy);
-EXPORT_SYMBOL(iw_handler_set_thrspy);
-EXPORT_SYMBOL(wireless_send_event);
 EXPORT_SYMBOL(wireless_spy_update);
index e62ba41..0d6002f 100644 (file)
@@ -951,7 +951,7 @@ int x25_rx_call_request(struct sk_buff *skb, struct x25_neigh *nb,
         *      Incoming Call User Data.
         */
        if (skb->len >= 0) {
-               memcpy(makex25->calluserdata.cuddata, skb->data, skb->len);
+               skb_copy_from_linear_data(skb, makex25->calluserdata.cuddata, skb->len);
                makex25->calluserdata.cudlength = skb->len;
        }
 
@@ -1058,9 +1058,10 @@ static int x25_sendmsg(struct kiocb *iocb, struct socket *sock,
         */
        SOCK_DEBUG(sk, "x25_sendmsg: Copying user data\n");
 
-       asmptr = skb->h.raw = skb_put(skb, len);
+       skb_reset_transport_header(skb);
+       skb_put(skb, len);
 
-       rc = memcpy_fromiovec(asmptr, msg->msg_iov, len);
+       rc = memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len);
        if (rc)
                goto out_kfree_skb;
 
@@ -1210,8 +1211,7 @@ static int x25_recvmsg(struct kiocb *iocb, struct socket *sock,
                }
        }
 
-       skb->h.raw = skb->data;
-
+       skb_reset_transport_header(skb);
        copied = skb->len;
 
        if (copied > size) {
@@ -1280,6 +1280,12 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
                                rc = sock_get_timestamp(sk,
                                                (struct timeval __user *)argp);
                        break;
+               case SIOCGSTAMPNS:
+                       rc = -EINVAL;
+                       if (sk)
+                               rc = sock_get_timestampns(sk,
+                                               (struct timespec __user *)argp);
+                       break;
                case SIOCGIFADDR:
                case SIOCSIFADDR:
                case SIOCGIFDSTADDR:
@@ -1521,6 +1527,12 @@ static int compat_x25_ioctl(struct socket *sock, unsigned int cmd,
                        rc = compat_sock_get_timestamp(sk,
                                        (struct timeval __user*)argp);
                break;
+       case SIOCGSTAMPNS:
+               rc = -EINVAL;
+               if (sk)
+                       rc = compat_sock_get_timestampns(sk,
+                                       (struct timespec __user*)argp);
+               break;
        case SIOCGIFADDR:
        case SIOCSIFADDR:
        case SIOCGIFDSTADDR:
index c7221de..848a6b6 100644 (file)
@@ -48,7 +48,7 @@ static int x25_receive_data(struct sk_buff *skb, struct x25_neigh *nb)
        if ((sk = x25_find_socket(lci, nb)) != NULL) {
                int queued = 1;
 
-               skb->h.raw = skb->data;
+               skb_reset_transport_header(skb);
                bh_lock_sock(sk);
                if (!sock_owned_by_user(sk)) {
                        queued = x25_process_rx_frame(sk, skb);
@@ -191,7 +191,7 @@ void x25_send_frame(struct sk_buff *skb, struct x25_neigh *nb)
 {
        unsigned char *dptr;
 
-       skb->nh.raw = skb->data;
+       skb_reset_network_header(skb);
 
        switch (nb->dev->type) {
                case ARPHRD_X25:
index c5239fc..1c88762 100644 (file)
@@ -53,17 +53,20 @@ static int x25_queue_rx_frame(struct sock *sk, struct sk_buff *skb, int more)
 
                skb_queue_tail(&x25->fragment_queue, skb);
 
-               skbn->h.raw = skbn->data;
+               skb_reset_transport_header(skbn);
 
                skbo = skb_dequeue(&x25->fragment_queue);
-               memcpy(skb_put(skbn, skbo->len), skbo->data, skbo->len);
+               skb_copy_from_linear_data(skbo, skb_put(skbn, skbo->len),
+                                         skbo->len);
                kfree_skb(skbo);
 
                while ((skbo =
                        skb_dequeue(&x25->fragment_queue)) != NULL) {
                        skb_pull(skbo, (x25->neighbour->extended) ?
                                        X25_EXT_MIN_LEN : X25_STD_MIN_LEN);
-                       memcpy(skb_put(skbn, skbo->len), skbo->data, skbo->len);
+                       skb_copy_from_linear_data(skbo,
+                                                 skb_put(skbn, skbo->len),
+                                                 skbo->len);
                        kfree_skb(skbo);
                }
 
@@ -112,8 +115,9 @@ static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametyp
                         *      Copy any Call User Data.
                         */
                        if (skb->len >= 0) {
-                               memcpy(x25->calluserdata.cuddata, skb->data,
-                                      skb->len);
+                               skb_copy_from_linear_data(skb,
+                                             x25->calluserdata.cuddata,
+                                             skb->len);
                                x25->calluserdata.cudlength = skb->len;
                        }
                        if (!sock_flag(sk, SOCK_DEAD))
index 6f57378..2b96b52 100644 (file)
@@ -61,7 +61,7 @@ int x25_output(struct sock *sk, struct sk_buff *skb)
 
        if (skb->len - header_len > max_len) {
                /* Save a copy of the Header */
-               memcpy(header, skb->data, header_len);
+               skb_copy_from_linear_data(skb, header, header_len);
                skb_pull(skb, header_len);
 
                frontlen = skb_headroom(skb);
@@ -84,12 +84,12 @@ int x25_output(struct sock *sk, struct sk_buff *skb)
                        len = max_len > skb->len ? skb->len : max_len;
 
                        /* Copy the user data */
-                       memcpy(skb_put(skbn, len), skb->data, len);
+                       skb_copy_from_linear_data(skb, skb_put(skbn, len), len);
                        skb_pull(skb, len);
 
                        /* Duplicate the Header */
                        skb_push(skbn, header_len);
-                       memcpy(skbn->data, header, header_len);
+                       skb_copy_to_linear_data(skbn, header, header_len);
 
                        if (skb->len > 0) {
                                if (x25->neighbour->extended)
index f373a8a..be529c4 100644 (file)
@@ -532,8 +532,8 @@ EXPORT_SYMBOL_GPL(xfrm_count_enc_supported);
 int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *desc,
                 int offset, int len, icv_update_fn_t icv_update)
 {
-       int start = skb_headlen(skb);
-       int i, copy = start - offset;
+       int end = skb_headlen(skb);
+       int i, copy = end - offset;
        int err;
        struct scatterlist sg;
 
@@ -556,11 +556,9 @@ int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *desc,
        }
 
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-               int end;
+               BUG_TRAP(len >= 0);
 
-               BUG_TRAP(start <= offset + len);
-
-               end = start + skb_shinfo(skb)->frags[i].size;
+               end = offset + skb_shinfo(skb)->frags[i].size;
                if ((copy = end - offset) > 0) {
                        skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 
@@ -568,7 +566,7 @@ int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *desc,
                                copy = len;
 
                        sg.page = frag->page;
-                       sg.offset = frag->page_offset + offset-start;
+                       sg.offset = frag->page_offset;
                        sg.length = copy;
 
                        err = icv_update(desc, &sg, copy);
@@ -579,22 +577,19 @@ int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *desc,
                                return 0;
                        offset += copy;
                }
-               start = end;
        }
 
        if (skb_shinfo(skb)->frag_list) {
                struct sk_buff *list = skb_shinfo(skb)->frag_list;
 
                for (; list; list = list->next) {
-                       int end;
-
-                       BUG_TRAP(start <= offset + len);
+                       BUG_TRAP(len >= 0);
 
-                       end = start + list->len;
+                       end = offset + list->len;
                        if ((copy = end - offset) > 0) {
                                if (copy > len)
                                        copy = len;
-                               err = skb_icv_walk(list, desc, offset-start,
+                               err = skb_icv_walk(list, desc, 0,
                                                   copy, icv_update);
                                if (unlikely(err))
                                        return err;
@@ -602,7 +597,6 @@ int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *desc,
                                        return 0;
                                offset += copy;
                        }
-                       start = end;
                }
        }
        BUG_ON(len);
@@ -612,175 +606,6 @@ EXPORT_SYMBOL_GPL(skb_icv_walk);
 
 #if defined(CONFIG_INET_ESP) || defined(CONFIG_INET_ESP_MODULE) || defined(CONFIG_INET6_ESP) || defined(CONFIG_INET6_ESP_MODULE)
 
-/* Looking generic it is not used in another places. */
-
-int
-skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
-{
-       int start = skb_headlen(skb);
-       int i, copy = start - offset;
-       int elt = 0;
-
-       if (copy > 0) {
-               if (copy > len)
-                       copy = len;
-               sg[elt].page = virt_to_page(skb->data + offset);
-               sg[elt].offset = (unsigned long)(skb->data + offset) % PAGE_SIZE;
-               sg[elt].length = copy;
-               elt++;
-               if ((len -= copy) == 0)
-                       return elt;
-               offset += copy;
-       }
-
-       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-               int end;
-
-               BUG_TRAP(start <= offset + len);
-
-               end = start + skb_shinfo(skb)->frags[i].size;
-               if ((copy = end - offset) > 0) {
-                       skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
-
-                       if (copy > len)
-                               copy = len;
-                       sg[elt].page = frag->page;
-                       sg[elt].offset = frag->page_offset+offset-start;
-                       sg[elt].length = copy;
-                       elt++;
-                       if (!(len -= copy))
-                               return elt;
-                       offset += copy;
-               }
-               start = end;
-       }
-
-       if (skb_shinfo(skb)->frag_list) {
-               struct sk_buff *list = skb_shinfo(skb)->frag_list;
-
-               for (; list; list = list->next) {
-                       int end;
-
-                       BUG_TRAP(start <= offset + len);
-
-                       end = start + list->len;
-                       if ((copy = end - offset) > 0) {
-                               if (copy > len)
-                                       copy = len;
-                               elt += skb_to_sgvec(list, sg+elt, offset - start, copy);
-                               if ((len -= copy) == 0)
-                                       return elt;
-                               offset += copy;
-                       }
-                       start = end;
-               }
-       }
-       BUG_ON(len);
-       return elt;
-}
-EXPORT_SYMBOL_GPL(skb_to_sgvec);
-
-/* Check that skb data bits are writable. If they are not, copy data
- * to newly created private area. If "tailbits" is given, make sure that
- * tailbits bytes beyond current end of skb are writable.
- *
- * Returns amount of elements of scatterlist to load for subsequent
- * transformations and pointer to writable trailer skb.
- */
-
-int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
-{
-       int copyflag;
-       int elt;
-       struct sk_buff *skb1, **skb_p;
-
-       /* If skb is cloned or its head is paged, reallocate
-        * head pulling out all the pages (pages are considered not writable
-        * at the moment even if they are anonymous).
-        */
-       if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) &&
-           __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL)
-               return -ENOMEM;
-
-       /* Easy case. Most of packets will go this way. */
-       if (!skb_shinfo(skb)->frag_list) {
-               /* A little of trouble, not enough of space for trailer.
-                * This should not happen, when stack is tuned to generate
-                * good frames. OK, on miss we reallocate and reserve even more
-                * space, 128 bytes is fair. */
-
-               if (skb_tailroom(skb) < tailbits &&
-                   pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC))
-                       return -ENOMEM;
-
-               /* Voila! */
-               *trailer = skb;
-               return 1;
-       }
-
-       /* Misery. We are in troubles, going to mincer fragments... */
-
-       elt = 1;
-       skb_p = &skb_shinfo(skb)->frag_list;
-       copyflag = 0;
-
-       while ((skb1 = *skb_p) != NULL) {
-               int ntail = 0;
-
-               /* The fragment is partially pulled by someone,
-                * this can happen on input. Copy it and everything
-                * after it. */
-
-               if (skb_shared(skb1))
-                       copyflag = 1;
-
-               /* If the skb is the last, worry about trailer. */
-
-               if (skb1->next == NULL && tailbits) {
-                       if (skb_shinfo(skb1)->nr_frags ||
-                           skb_shinfo(skb1)->frag_list ||
-                           skb_tailroom(skb1) < tailbits)
-                               ntail = tailbits + 128;
-               }
-
-               if (copyflag ||
-                   skb_cloned(skb1) ||
-                   ntail ||
-                   skb_shinfo(skb1)->nr_frags ||
-                   skb_shinfo(skb1)->frag_list) {
-                       struct sk_buff *skb2;
-
-                       /* Fuck, we are miserable poor guys... */
-                       if (ntail == 0)
-                               skb2 = skb_copy(skb1, GFP_ATOMIC);
-                       else
-                               skb2 = skb_copy_expand(skb1,
-                                                      skb_headroom(skb1),
-                                                      ntail,
-                                                      GFP_ATOMIC);
-                       if (unlikely(skb2 == NULL))
-                               return -ENOMEM;
-
-                       if (skb1->sk)
-                               skb_set_owner_w(skb2, skb1->sk);
-
-                       /* Looking around. Are we still alive?
-                        * OK, link new skb, drop old one */
-
-                       skb2->next = skb1->next;
-                       *skb_p = skb2;
-                       kfree_skb(skb1);
-                       skb1 = skb2;
-               }
-               elt++;
-               *trailer = skb1;
-               skb_p = &skb1->next;
-       }
-
-       return elt;
-}
-EXPORT_SYMBOL_GPL(skb_cow_data);
-
 void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len)
 {
        if (tail != skb) {
index ee15bda..5c46958 100644 (file)
@@ -62,7 +62,7 @@ int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq)
        case IPPROTO_COMP:
                if (!pskb_may_pull(skb, sizeof(struct ip_comp_hdr)))
                        return -EINVAL;
-               *spi = htonl(ntohs(*(__be16*)(skb->h.raw + 2)));
+               *spi = htonl(ntohs(*(__be16*)(skb_transport_header(skb) + 2)));
                *seq = 0;
                return 0;
        default:
@@ -72,8 +72,8 @@ int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq)
        if (!pskb_may_pull(skb, 16))
                return -EINVAL;
 
-       *spi = *(__be32*)(skb->h.raw + offset);
-       *seq = *(__be32*)(skb->h.raw + offset_seq);
+       *spi = *(__be32*)(skb_transport_header(skb) + offset);
+       *seq = *(__be32*)(skb_transport_header(skb) + offset_seq);
        return 0;
 }
 EXPORT_SYMBOL(xfrm_parse_spi);
index 785c3e3..7629260 100644 (file)
@@ -268,7 +268,7 @@ static inline unsigned long make_jiffies(long secs)
 static void xfrm_policy_timer(unsigned long data)
 {
        struct xfrm_policy *xp = (struct xfrm_policy*)data;
-       unsigned long now = (unsigned long)xtime.tv_sec;
+       unsigned long now = get_seconds();
        long next = LONG_MAX;
        int warn = 0;
        int dir;
@@ -690,7 +690,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
        }
        policy->index = delpol ? delpol->index : xfrm_gen_index(policy->type, dir);
        hlist_add_head(&policy->byidx, xfrm_policy_byidx+idx_hash(policy->index));
-       policy->curlft.add_time = (unsigned long)xtime.tv_sec;
+       policy->curlft.add_time = get_seconds();
        policy->curlft.use_time = 0;
        if (!mod_timer(&policy->timer, jiffies + HZ))
                xfrm_pol_hold(policy);
@@ -1049,7 +1049,7 @@ static inline int policy_to_flow_dir(int dir)
                return FLOW_DIR_OUT;
        case XFRM_POLICY_FWD:
                return FLOW_DIR_FWD;
-       };
+       }
 }
 
 static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir, struct flowi *fl)
@@ -1133,7 +1133,7 @@ int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
        old_pol = sk->sk_policy[dir];
        sk->sk_policy[dir] = pol;
        if (pol) {
-               pol->curlft.add_time = (unsigned long)xtime.tv_sec;
+               pol->curlft.add_time = get_seconds();
                pol->index = xfrm_gen_index(pol->type, XFRM_POLICY_MAX+dir);
                __xfrm_policy_link(pol, XFRM_POLICY_MAX+dir);
        }
@@ -1386,7 +1386,7 @@ restart:
                return 0;
 
        family = dst_orig->ops->family;
-       policy->curlft.use_time = (unsigned long)xtime.tv_sec;
+       policy->curlft.use_time = get_seconds();
        pols[0] = policy;
        npols ++;
        xfrm_nr += pols[0]->xfrm_nr;
@@ -1682,7 +1682,7 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
                return 1;
        }
 
-       pol->curlft.use_time = (unsigned long)xtime.tv_sec;
+       pol->curlft.use_time = get_seconds();
 
        pols[0] = pol;
        npols ++;
@@ -1694,7 +1694,7 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
                if (pols[1]) {
                        if (IS_ERR(pols[1]))
                                return 0;
-                       pols[1]->curlft.use_time = (unsigned long)xtime.tv_sec;
+                       pols[1]->curlft.use_time = get_seconds();
                        npols ++;
                }
        }
index e3a0bcf..f3a61eb 100644 (file)
@@ -233,7 +233,7 @@ static inline unsigned long make_jiffies(long secs)
 static void xfrm_timer_handler(unsigned long data)
 {
        struct xfrm_state *x = (struct xfrm_state*)data;
-       unsigned long now = (unsigned long)xtime.tv_sec;
+       unsigned long now = get_seconds();
        long next = LONG_MAX;
        int warn = 0;
        int err = 0;
@@ -326,7 +326,7 @@ struct xfrm_state *xfrm_state_alloc(void)
                init_timer(&x->rtimer);
                x->rtimer.function = xfrm_replay_timer_handler;
                x->rtimer.data     = (unsigned long)x;
-               x->curlft.add_time = (unsigned long)xtime.tv_sec;
+               x->curlft.add_time = get_seconds();
                x->lft.soft_byte_limit = XFRM_INF;
                x->lft.soft_packet_limit = XFRM_INF;
                x->lft.hard_byte_limit = XFRM_INF;
@@ -421,6 +421,16 @@ restart:
 }
 EXPORT_SYMBOL(xfrm_state_flush);
 
+void xfrm_sad_getinfo(struct xfrm_sadinfo *si)
+{
+       spin_lock_bh(&xfrm_state_lock);
+       si->sadcnt = xfrm_state_num;
+       si->sadhcnt = xfrm_state_hmask;
+       si->sadhmcnt = xfrm_state_hashmax;
+       spin_unlock_bh(&xfrm_state_lock);
+}
+EXPORT_SYMBOL(xfrm_sad_getinfo);
+
 static int
 xfrm_init_tempsel(struct xfrm_state *x, struct flowi *fl,
                  struct xfrm_tmpl *tmpl,
@@ -458,7 +468,7 @@ static struct xfrm_state *__xfrm_state_lookup(xfrm_address_t *daddr, __be32 spi,
                                             x->id.daddr.a6))
                                continue;
                        break;
-               };
+               }
 
                xfrm_state_hold(x);
                return x;
@@ -493,7 +503,7 @@ static struct xfrm_state *__xfrm_state_lookup_byaddr(xfrm_address_t *daddr, xfrm
                                             x->props.saddr.a6))
                                continue;
                        break;
-               };
+               }
 
                xfrm_state_hold(x);
                return x;
@@ -722,7 +732,7 @@ static struct xfrm_state *__find_acq_core(unsigned short family, u8 mode, u32 re
                                             (struct in6_addr *)saddr))
                                continue;
                        break;
-               };
+               }
 
                xfrm_state_hold(x);
                return x;
@@ -755,7 +765,7 @@ static struct xfrm_state *__find_acq_core(unsigned short family, u8 mode, u32 re
                        ipv6_addr_copy((struct in6_addr *)x->id.daddr.a6,
                                       (struct in6_addr *)daddr);
                        break;
-               };
+               }
 
                x->km.state = XFRM_STATE_ACQ;
                x->id.proto = proto;
@@ -1051,7 +1061,7 @@ EXPORT_SYMBOL(xfrm_state_update);
 int xfrm_state_check_expire(struct xfrm_state *x)
 {
        if (!x->curlft.use_time)
-               x->curlft.use_time = (unsigned long)xtime.tv_sec;
+               x->curlft.use_time = get_seconds();
 
        if (x->km.state != XFRM_STATE_VALID)
                return -EINVAL;
@@ -1667,37 +1677,17 @@ void xfrm_state_delete_tunnel(struct xfrm_state *x)
 }
 EXPORT_SYMBOL(xfrm_state_delete_tunnel);
 
-/*
- * This function is NOT optimal.  For example, with ESP it will give an
- * MTU that's usually two bytes short of being optimal.  However, it will
- * usually give an answer that's a multiple of 4 provided the input is
- * also a multiple of 4.
- */
 int xfrm_state_mtu(struct xfrm_state *x, int mtu)
 {
-       int res = mtu;
-
-       res -= x->props.header_len;
-
-       for (;;) {
-               int m = res;
-
-               if (m < 68)
-                       return 68;
-
-               spin_lock_bh(&x->lock);
-               if (x->km.state == XFRM_STATE_VALID &&
-                   x->type && x->type->get_max_size)
-                       m = x->type->get_max_size(x, m);
-               else
-                       m += x->props.header_len;
-               spin_unlock_bh(&x->lock);
-
-               if (m <= mtu)
-                       break;
-               res -= (m - mtu);
-       }
+       int res;
 
+       spin_lock_bh(&x->lock);
+       if (x->km.state == XFRM_STATE_VALID &&
+           x->type && x->type->get_mtu)
+               res = x->type->get_mtu(x, mtu);
+       else
+               res = mtu;
+       spin_unlock_bh(&x->lock);
        return res;
 }
 
index 816e369..69110fe 100644 (file)
@@ -71,7 +71,7 @@ static int verify_one_alg(struct rtattr **xfrma, enum xfrm_attr_type_t type)
 
        default:
                return -EINVAL;
-       };
+       }
 
        algp->alg_name[CRYPTO_MAX_ALG_NAME - 1] = '\0';
        return 0;
@@ -152,7 +152,7 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
 
        default:
                goto out;
-       };
+       }
 
        err = -EINVAL;
        switch (p->id.proto) {
@@ -192,7 +192,7 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
 
        default:
                goto out;
-       };
+       }
 
        if ((err = verify_one_alg(xfrma, XFRMA_ALG_AUTH)))
                goto out;
@@ -217,7 +217,7 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
 
        default:
                goto out;
-       };
+       }
 
        err = 0;
 
@@ -576,7 +576,7 @@ static int dump_one_state(struct xfrm_state *x, int count, void *ptr)
        struct sk_buff *skb = sp->out_skb;
        struct xfrm_usersa_info *p;
        struct nlmsghdr *nlh;
-       unsigned char *b = skb->tail;
+       unsigned char *b = skb_tail_pointer(skb);
 
        if (sp->this_idx < sp->start_idx)
                goto out;
@@ -621,14 +621,14 @@ static int dump_one_state(struct xfrm_state *x, int count, void *ptr)
        if (x->lastused)
                RTA_PUT(skb, XFRMA_LASTUSED, sizeof(x->lastused), &x->lastused);
 
-       nlh->nlmsg_len = skb->tail - b;
+       nlh->nlmsg_len = skb_tail_pointer(skb) - b;
 out:
        sp->this_idx++;
        return 0;
 
 nlmsg_failure:
 rtattr_failure:
-       skb_trim(skb, b - skb->data);
+       nlmsg_trim(skb, b);
        return -1;
 }
 
@@ -672,6 +672,61 @@ static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb,
        return skb;
 }
 
+static int build_sadinfo(struct sk_buff *skb, u32 pid, u32 seq, u32 flags)
+{
+       struct xfrm_sadinfo si;
+       struct nlmsghdr *nlh;
+       u32 *f;
+
+       nlh = nlmsg_put(skb, pid, seq, XFRM_MSG_NEWSADINFO, sizeof(u32), 0);
+       if (nlh == NULL) /* shouldnt really happen ... */
+               return -EMSGSIZE;
+
+       f = nlmsg_data(nlh);
+       *f = flags;
+       xfrm_sad_getinfo(&si);
+
+       if (flags & XFRM_SAD_HMASK)
+               NLA_PUT_U32(skb, XFRMA_SADHMASK, si.sadhcnt);
+       if (flags & XFRM_SAD_HMAX)
+               NLA_PUT_U32(skb, XFRMA_SADHMAX, si.sadhmcnt);
+       if (flags & XFRM_SAD_CNT)
+               NLA_PUT_U32(skb, XFRMA_SADCNT, si.sadcnt);
+
+       return nlmsg_end(skb, nlh);
+
+nla_put_failure:
+       nlmsg_cancel(skb, nlh);
+       return -EMSGSIZE;
+}
+
+static int xfrm_get_sadinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
+               struct rtattr **xfrma)
+{
+       struct sk_buff *r_skb;
+       u32 *flags = NLMSG_DATA(nlh);
+       u32 spid = NETLINK_CB(skb).pid;
+       u32 seq = nlh->nlmsg_seq;
+       int len = NLMSG_LENGTH(sizeof(u32));
+
+       if (*flags & XFRM_SAD_HMASK)
+               len += RTA_SPACE(sizeof(u32));
+       if (*flags & XFRM_SAD_HMAX)
+               len += RTA_SPACE(sizeof(u32));
+       if (*flags & XFRM_SAD_CNT)
+               len += RTA_SPACE(sizeof(u32));
+
+       r_skb = alloc_skb(len, GFP_ATOMIC);
+
+       if (r_skb == NULL)
+               return -ENOMEM;
+
+       if (build_sadinfo(r_skb, spid, seq, *flags) < 0)
+               BUG();
+
+       return nlmsg_unicast(xfrm_nl, r_skb, spid);
+}
+
 static int xfrm_get_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
                struct rtattr **xfrma)
 {
@@ -711,7 +766,7 @@ static int verify_userspi_info(struct xfrm_userspi_info *p)
 
        default:
                return -EINVAL;
-       };
+       }
 
        if (p->min > p->max)
                return -EINVAL;
@@ -789,7 +844,7 @@ static int verify_policy_dir(u8 dir)
 
        default:
                return -EINVAL;
-       };
+       }
 
        return 0;
 }
@@ -805,7 +860,7 @@ static int verify_policy_type(u8 type)
 
        default:
                return -EINVAL;
-       };
+       }
 
        return 0;
 }
@@ -821,7 +876,7 @@ static int verify_newpolicy_info(struct xfrm_userpolicy_info *p)
 
        default:
                return -EINVAL;
-       };
+       }
 
        switch (p->action) {
        case XFRM_POLICY_ALLOW:
@@ -830,7 +885,7 @@ static int verify_newpolicy_info(struct xfrm_userpolicy_info *p)
 
        default:
                return -EINVAL;
-       };
+       }
 
        switch (p->sel.family) {
        case AF_INET:
@@ -845,7 +900,7 @@ static int verify_newpolicy_info(struct xfrm_userpolicy_info *p)
 
        default:
                return -EINVAL;
-       };
+       }
 
        return verify_policy_dir(p->dir);
 }
@@ -912,7 +967,7 @@ static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
 #endif
                default:
                        return -EINVAL;
-               };
+               }
        }
 
        return 0;
@@ -1157,7 +1212,7 @@ static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr
        struct sk_buff *in_skb = sp->in_skb;
        struct sk_buff *skb = sp->out_skb;
        struct nlmsghdr *nlh;
-       unsigned char *b = skb->tail;
+       unsigned char *b = skb_tail_pointer(skb);
 
        if (sp->this_idx < sp->start_idx)
                goto out;
@@ -1176,13 +1231,13 @@ static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr
        if (copy_to_user_policy_type(xp->type, skb) < 0)
                goto nlmsg_failure;
 
-       nlh->nlmsg_len = skb->tail - b;
+       nlh->nlmsg_len = skb_tail_pointer(skb) - b;
 out:
        sp->this_idx++;
        return 0;
 
 nlmsg_failure:
-       skb_trim(skb, b - skb->data);
+       nlmsg_trim(skb, b);
        return -1;
 }
 
@@ -1330,7 +1385,7 @@ static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, struct km_eve
        struct xfrm_aevent_id *id;
        struct nlmsghdr *nlh;
        struct xfrm_lifetime_cur ltime;
-       unsigned char *b = skb->tail;
+       unsigned char *b = skb_tail_pointer(skb);
 
        nlh = NLMSG_PUT(skb, c->pid, c->seq, XFRM_MSG_NEWAE, sizeof(*id));
        id = NLMSG_DATA(nlh);
@@ -1362,12 +1417,12 @@ static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, struct km_eve
                RTA_PUT(skb,XFRMA_ETIMER_THRESH,sizeof(u32),&etimer);
        }
 
-       nlh->nlmsg_len = skb->tail - b;
+       nlh->nlmsg_len = skb_tail_pointer(skb) - b;
        return skb->len;
 
 rtattr_failure:
 nlmsg_failure:
-       skb_trim(skb, b - skb->data);
+       nlmsg_trim(skb, b);
        return -1;
 }
 
@@ -1744,7 +1799,7 @@ static int build_migrate(struct sk_buff *skb, struct xfrm_migrate *m,
        struct xfrm_migrate *mp;
        struct xfrm_userpolicy_id *pol_id;
        struct nlmsghdr *nlh;
-       unsigned char *b = skb->tail;
+       unsigned char *b = skb_tail_pointer(skb);
        int i;
 
        nlh = NLMSG_PUT(skb, 0, 0, XFRM_MSG_MIGRATE, sizeof(*pol_id));
@@ -1764,10 +1819,10 @@ static int build_migrate(struct sk_buff *skb, struct xfrm_migrate *m,
                        goto nlmsg_failure;
        }
 
-       nlh->nlmsg_len = skb->tail - b;
+       nlh->nlmsg_len = skb_tail_pointer(skb) - b;
        return skb->len;
 nlmsg_failure:
-       skb_trim(skb, b - skb->data);
+       nlmsg_trim(skb, b);
        return -1;
 }
 
@@ -1823,6 +1878,7 @@ static const int xfrm_msg_min[XFRM_NR_MSGTYPES] = {
        [XFRM_MSG_GETAE       - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id),
        [XFRM_MSG_REPORT      - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_report),
        [XFRM_MSG_MIGRATE     - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
+       [XFRM_MSG_GETSADINFO  - XFRM_MSG_BASE] = NLMSG_LENGTH(sizeof(u32)),
 };
 
 #undef XMSGSIZE
@@ -1850,55 +1906,39 @@ static struct xfrm_link {
        [XFRM_MSG_NEWAE       - XFRM_MSG_BASE] = { .doit = xfrm_new_ae  },
        [XFRM_MSG_GETAE       - XFRM_MSG_BASE] = { .doit = xfrm_get_ae  },
        [XFRM_MSG_MIGRATE     - XFRM_MSG_BASE] = { .doit = xfrm_do_migrate    },
+       [XFRM_MSG_GETSADINFO  - XFRM_MSG_BASE] = { .doit = xfrm_get_sadinfo   },
 };
 
-static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, int *errp)
+static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
 {
        struct rtattr *xfrma[XFRMA_MAX];
        struct xfrm_link *link;
        int type, min_len;
 
-       if (!(nlh->nlmsg_flags & NLM_F_REQUEST))
-               return 0;
-
        type = nlh->nlmsg_type;
-
-       /* A control message: ignore them */
-       if (type < XFRM_MSG_BASE)
-               return 0;
-
-       /* Unknown message: reply with EINVAL */
        if (type > XFRM_MSG_MAX)
-               goto err_einval;
+               return -EINVAL;
 
        type -= XFRM_MSG_BASE;
        link = &xfrm_dispatch[type];
 
        /* All operations require privileges, even GET */
-       if (security_netlink_recv(skb, CAP_NET_ADMIN)) {
-               *errp = -EPERM;
-               return -1;
-       }
+       if (security_netlink_recv(skb, CAP_NET_ADMIN))
+               return -EPERM;
 
        if ((type == (XFRM_MSG_GETSA - XFRM_MSG_BASE) ||
             type == (XFRM_MSG_GETPOLICY - XFRM_MSG_BASE)) &&
            (nlh->nlmsg_flags & NLM_F_DUMP)) {
                if (link->dump == NULL)
-                       goto err_einval;
-
-               if ((*errp = netlink_dump_start(xfrm_nl, skb, nlh,
-                                               link->dump, NULL)) != 0) {
-                       return -1;
-               }
+                       return -EINVAL;
 
-               netlink_queue_skip(nlh, skb);
-               return -1;
+               return netlink_dump_start(xfrm_nl, skb, nlh, link->dump, NULL);
        }
 
        memset(xfrma, 0, sizeof(xfrma));
 
        if (nlh->nlmsg_len < (min_len = xfrm_msg_min[type]))
-               goto err_einval;
+               return -EINVAL;
 
        if (nlh->nlmsg_len > min_len) {
                int attrlen = nlh->nlmsg_len - NLMSG_ALIGN(min_len);
@@ -1908,7 +1948,7 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, int *err
                        unsigned short flavor = attr->rta_type;
                        if (flavor) {
                                if (flavor > XFRMA_MAX)
-                                       goto err_einval;
+                                       return -EINVAL;
                                xfrma[flavor - 1] = attr;
                        }
                        attr = RTA_NEXT(attr, attrlen);
@@ -1916,14 +1956,9 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, int *err
        }
 
        if (link->doit == NULL)
-               goto err_einval;
-       *errp = link->doit(skb, nlh, xfrma);
-
-       return *errp;
+               return -EINVAL;
 
-err_einval:
-       *errp = -EINVAL;
-       return -1;
+       return link->doit(skb, nlh, xfrma);
 }
 
 static void xfrm_netlink_rcv(struct sock *sk, int len)
@@ -1942,7 +1977,7 @@ static int build_expire(struct sk_buff *skb, struct xfrm_state *x, struct km_eve
 {
        struct xfrm_user_expire *ue;
        struct nlmsghdr *nlh;
-       unsigned char *b = skb->tail;
+       unsigned char *b = skb_tail_pointer(skb);
 
        nlh = NLMSG_PUT(skb, c->pid, 0, XFRM_MSG_EXPIRE,
                        sizeof(*ue));
@@ -1952,11 +1987,11 @@ static int build_expire(struct sk_buff *skb, struct xfrm_state *x, struct km_eve
        copy_to_user_state(x, &ue->state);
        ue->hard = (c->data.hard != 0) ? 1 : 0;
 
-       nlh->nlmsg_len = skb->tail - b;
+       nlh->nlmsg_len = skb_tail_pointer(skb) - b;
        return skb->len;
 
 nlmsg_failure:
-       skb_trim(skb, b - skb->data);
+       nlmsg_trim(skb, b);
        return -1;
 }
 
@@ -1999,7 +2034,7 @@ static int xfrm_notify_sa_flush(struct km_event *c)
        struct xfrm_usersa_flush *p;
        struct nlmsghdr *nlh;
        struct sk_buff *skb;
-       unsigned char *b;
+       sk_buff_data_t b;
        int len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_flush));
 
        skb = alloc_skb(len, GFP_ATOMIC);
@@ -2045,7 +2080,7 @@ static int xfrm_notify_sa(struct xfrm_state *x, struct km_event *c)
        struct xfrm_usersa_id *id;
        struct nlmsghdr *nlh;
        struct sk_buff *skb;
-       unsigned char *b;
+       sk_buff_data_t b;
        int len = xfrm_sa_len(x);
        int headlen;
 
@@ -2129,7 +2164,7 @@ static int build_acquire(struct sk_buff *skb, struct xfrm_state *x,
 {
        struct xfrm_user_acquire *ua;
        struct nlmsghdr *nlh;
-       unsigned char *b = skb->tail;
+       unsigned char *b = skb_tail_pointer(skb);
        __u32 seq = xfrm_get_acqseq();
 
        nlh = NLMSG_PUT(skb, 0, 0, XFRM_MSG_ACQUIRE,
@@ -2153,11 +2188,11 @@ static int build_acquire(struct sk_buff *skb, struct xfrm_state *x,
        if (copy_to_user_policy_type(xp->type, skb) < 0)
                goto nlmsg_failure;
 
-       nlh->nlmsg_len = skb->tail - b;
+       nlh->nlmsg_len = skb_tail_pointer(skb) - b;
        return skb->len;
 
 nlmsg_failure:
-       skb_trim(skb, b - skb->data);
+       nlmsg_trim(skb, b);
        return -1;
 }
 
@@ -2249,7 +2284,7 @@ static int build_polexpire(struct sk_buff *skb, struct xfrm_policy *xp,
        struct xfrm_user_polexpire *upe;
        struct nlmsghdr *nlh;
        int hard = c->data.hard;
-       unsigned char *b = skb->tail;
+       unsigned char *b = skb_tail_pointer(skb);
 
        nlh = NLMSG_PUT(skb, c->pid, 0, XFRM_MSG_POLEXPIRE, sizeof(*upe));
        upe = NLMSG_DATA(nlh);
@@ -2264,11 +2299,11 @@ static int build_polexpire(struct sk_buff *skb, struct xfrm_policy *xp,
                goto nlmsg_failure;
        upe->hard = !!hard;
 
-       nlh->nlmsg_len = skb->tail - b;
+       nlh->nlmsg_len = skb_tail_pointer(skb) - b;
        return skb->len;
 
 nlmsg_failure:
-       skb_trim(skb, b - skb->data);
+       nlmsg_trim(skb, b);
        return -1;
 }
 
@@ -2300,7 +2335,7 @@ static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, struct km_event *
        struct xfrm_userpolicy_id *id;
        struct nlmsghdr *nlh;
        struct sk_buff *skb;
-       unsigned char *b;
+       sk_buff_data_t b;
        int len = RTA_SPACE(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr);
        int headlen;
 
@@ -2357,7 +2392,7 @@ static int xfrm_notify_policy_flush(struct km_event *c)
 {
        struct nlmsghdr *nlh;
        struct sk_buff *skb;
-       unsigned char *b;
+       sk_buff_data_t b;
        int len = 0;
 #ifdef CONFIG_XFRM_SUB_POLICY
        len += RTA_SPACE(sizeof(struct xfrm_userpolicy_type));
@@ -2410,7 +2445,7 @@ static int build_report(struct sk_buff *skb, u8 proto,
 {
        struct xfrm_user_report *ur;
        struct nlmsghdr *nlh;
-       unsigned char *b = skb->tail;
+       unsigned char *b = skb_tail_pointer(skb);
 
        nlh = NLMSG_PUT(skb, 0, 0, XFRM_MSG_REPORT, sizeof(*ur));
        ur = NLMSG_DATA(nlh);
@@ -2422,12 +2457,12 @@ static int build_report(struct sk_buff *skb, u8 proto,
        if (addr)
                RTA_PUT(skb, XFRMA_COADDR, sizeof(*addr), addr);
 
-       nlh->nlmsg_len = skb->tail - b;
+       nlh->nlmsg_len = skb_tail_pointer(skb) - b;
        return skb->len;
 
 nlmsg_failure:
 rtattr_failure:
-       skb_trim(skb, b - skb->data);
+       nlmsg_trim(skb, b);
        return -1;
 }
 
@@ -2466,7 +2501,7 @@ static int __init xfrm_user_init(void)
        printk(KERN_INFO "Initializing XFRM netlink socket\n");
 
        nlsk = netlink_kernel_create(NETLINK_XFRM, XFRMNLGRP_MAX,
-                                    xfrm_netlink_rcv, THIS_MODULE);
+                                    xfrm_netlink_rcv, NULL, THIS_MODULE);
        if (nlsk == NULL)
                return -ENOMEM;
        rcu_assign_pointer(xfrm_nl, nlsk);
index ad45ce7..88292e3 100644 (file)
@@ -66,6 +66,8 @@ struct key_type key_type_keyring = {
        .read           = keyring_read,
 };
 
+EXPORT_SYMBOL(key_type_keyring);
+
 /*
  * semaphore to serialise link/link calls to prevent two link calls in parallel
  * introducing a cycle
index d41e24d..5f02b4b 100644 (file)
@@ -2944,7 +2944,7 @@ static int selinux_parse_skb_ipv4(struct sk_buff *skb,
        int offset, ihlen, ret = -EINVAL;
        struct iphdr _iph, *ih;
 
-       offset = skb->nh.raw - skb->data;
+       offset = skb_network_offset(skb);
        ih = skb_header_pointer(skb, offset, sizeof(_iph), &_iph);
        if (ih == NULL)
                goto out;
@@ -3026,7 +3026,7 @@ static int selinux_parse_skb_ipv6(struct sk_buff *skb,
        int ret = -EINVAL, offset;
        struct ipv6hdr _ipv6h, *ip6;
 
-       offset = skb->nh.raw - skb->data;
+       offset = skb_network_offset(skb);
        ip6 = skb_header_pointer(skb, offset, sizeof(_ipv6h), &_ipv6h);
        if (ip6 == NULL)
                goto out;
@@ -3786,7 +3786,7 @@ static int selinux_nlmsg_perm(struct sock *sk, struct sk_buff *skb)
                err = -EINVAL;
                goto out;
        }
-       nlh = (struct nlmsghdr *)skb->data;
+       nlh = nlmsg_hdr(skb);
        
        err = selinux_nlmsg_lookup(isec->sclass, nlh->nlmsg_type, &perm);
        if (err) {
index e203883..f49046d 100644 (file)
@@ -66,7 +66,7 @@ static void selnl_add_payload(struct nlmsghdr *nlh, int len, int msgtype, void *
 static void selnl_notify(int msgtype, void *data)
 {
        int len;
-       unsigned char *tmp;
+       sk_buff_data_t tmp;
        struct sk_buff *skb;
        struct nlmsghdr *nlh;
        
@@ -104,7 +104,7 @@ void selnl_notify_policyload(u32 seqno)
 
 static int __init selnl_init(void)
 {
-       selnl = netlink_kernel_create(NETLINK_SELINUX, SELNLGRP_MAX, NULL,
+       selnl = netlink_kernel_create(NETLINK_SELINUX, SELNLGRP_MAX, NULL, NULL,
                                      THIS_MODULE);
        if (selnl == NULL)
                panic("SELinux:  Cannot create netlink socket.");
index c899786..07962a3 100644 (file)
@@ -1067,8 +1067,8 @@ out_err:
 
 static int __devinit amd7930_obio_attach(struct device_node *dp)
 {
-       struct linux_prom_registers *regs;
-       struct linux_prom_irqs *irqp;
+       const struct linux_prom_registers *regs;
+       const struct linux_prom_irqs *irqp;
        struct resource res, *rp;
        int len;
 
index f5956d5..900a00d 100644 (file)
@@ -2284,7 +2284,7 @@ static int __init cs4231_init(void)
                        if (!strcmp(edev->prom_node->name, "SUNW,CS4231")) {
                                match = 1;
                        } else if (!strcmp(edev->prom_node->name, "audio")) {
-                               char *compat;
+                               const char *compat;
 
                                compat = of_get_property(edev->prom_node,
                                                         "compatible", NULL);